build(deps): bump the k8s group with 5 updates
Bumps the k8s group with 5 updates: | Package | From | To | | --- | --- | --- | | [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) | `0.30.3` | `0.31.0` | | [k8s.io/client-go](https://github.com/kubernetes/client-go) | `0.30.3` | `0.31.0` | | [k8s.io/component-base](https://github.com/kubernetes/component-base) | `0.30.3` | `0.31.0` | | [k8s.io/kubelet](https://github.com/kubernetes/kubelet) | `0.30.3` | `0.31.0` | | [k8s.io/utils](https://github.com/kubernetes/utils) | `0.0.0-20230726121419-3b25d923346b` | `0.0.0-20240711033017-18e509b52bc8` | Updates `k8s.io/apimachinery` from 0.30.3 to 0.31.0 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.30.3...v0.31.0) Updates `k8s.io/client-go` from 0.30.3 to 0.31.0 - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.30.3...v0.31.0) Updates `k8s.io/component-base` from 0.30.3 to 0.31.0 - [Commits](https://github.com/kubernetes/component-base/compare/v0.30.3...v0.31.0) Updates `k8s.io/kubelet` from 0.30.3 to 0.31.0 - [Commits](https://github.com/kubernetes/kubelet/compare/v0.30.3...v0.31.0) Updates `k8s.io/utils` from 0.0.0-20230726121419-3b25d923346b to 0.0.0-20240711033017-18e509b52bc8 - [Commits](https://github.com/kubernetes/utils/commits) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s - dependency-name: k8s.io/component-base dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s - dependency-name: k8s.io/kubelet dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s - dependency-name: k8s.io/utils dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
e8104a4858
commit
021063c4ab
25
go.mod
25
go.mod
@ -76,13 +76,13 @@ require (
|
|||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
|
||||||
google.golang.org/grpc v1.65.0
|
google.golang.org/grpc v1.65.0
|
||||||
google.golang.org/protobuf v1.34.2
|
google.golang.org/protobuf v1.34.2
|
||||||
k8s.io/apimachinery v0.30.3
|
k8s.io/apimachinery v0.31.0
|
||||||
k8s.io/client-go v0.30.3
|
k8s.io/client-go v0.31.0
|
||||||
k8s.io/component-base v0.30.3
|
k8s.io/component-base v0.31.0
|
||||||
k8s.io/cri-api v0.32.0-alpha.0
|
k8s.io/cri-api v0.32.0-alpha.0
|
||||||
k8s.io/klog/v2 v2.130.1
|
k8s.io/klog/v2 v2.130.1
|
||||||
k8s.io/kubelet v0.30.3
|
k8s.io/kubelet v0.31.0
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||||
tags.cncf.io/container-device-interface v0.8.0
|
tags.cncf.io/container-device-interface v0.8.0
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -95,6 +95,7 @@ require (
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
@ -111,19 +112,21 @@ require (
|
|||||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||||
github.com/mdlayher/socket v0.4.1 // indirect
|
github.com/mdlayher/socket v0.4.1 // indirect
|
||||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||||
github.com/moby/spdystream v0.2.0 // indirect
|
github.com/moby/spdystream v0.4.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.55.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||||
github.com/vishvananda/netns v0.0.4 // indirect
|
github.com/vishvananda/netns v0.0.4 // indirect
|
||||||
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
@ -132,7 +135,7 @@ require (
|
|||||||
golang.org/x/crypto v0.25.0 // indirect
|
golang.org/x/crypto v0.25.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 // indirect
|
golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 // indirect
|
||||||
golang.org/x/net v0.27.0 // indirect
|
golang.org/x/net v0.27.0 // indirect
|
||||||
golang.org/x/oauth2 v0.20.0 // indirect
|
golang.org/x/oauth2 v0.21.0 // indirect
|
||||||
golang.org/x/term v0.22.0 // indirect
|
golang.org/x/term v0.22.0 // indirect
|
||||||
golang.org/x/text v0.16.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
@ -140,8 +143,8 @@ require (
|
|||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/api v0.30.3 // indirect
|
k8s.io/api v0.31.0 // indirect
|
||||||
k8s.io/apiserver v0.30.3 // indirect
|
k8s.io/apiserver v0.31.0 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
|
58
go.sum
58
go.sum
@ -97,6 +97,8 @@ github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx
|
|||||||
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
@ -111,10 +113,9 @@ github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn
|
|||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
@ -154,13 +155,12 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
|||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
|
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
|
||||||
@ -209,8 +209,8 @@ github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5
|
|||||||
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
|
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
|
||||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||||
@ -272,13 +272,13 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
|
|||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
@ -318,6 +318,8 @@ github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhg
|
|||||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
@ -387,8 +389,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
|
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||||
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -477,26 +479,26 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
|
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||||
k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
|
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||||
k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
|
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||||
k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||||
k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g=
|
k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
|
||||||
k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg=
|
k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
|
||||||
k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
|
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||||
k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
|
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||||
k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s=
|
k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
|
||||||
k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA=
|
k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
|
||||||
k8s.io/cri-api v0.32.0-alpha.0 h1:Rs9prajcHWZAdy9ueQdD2R+OOnDD3rKYbM9hQ90iEQU=
|
k8s.io/cri-api v0.32.0-alpha.0 h1:Rs9prajcHWZAdy9ueQdD2R+OOnDD3rKYbM9hQ90iEQU=
|
||||||
k8s.io/cri-api v0.32.0-alpha.0/go.mod h1:Po3TMAYH/+KrZabi7QiwQI4a692oZcUOUThd/rqwxrI=
|
k8s.io/cri-api v0.32.0-alpha.0/go.mod h1:Po3TMAYH/+KrZabi7QiwQI4a692oZcUOUThd/rqwxrI=
|
||||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||||
k8s.io/kubelet v0.30.3 h1:KvGWDdhzD0vEyDyGTCjsDc8D+0+lwRMw3fJbfQgF7ys=
|
k8s.io/kubelet v0.31.0 h1:IlfkBy7QTojGEm97GuVGhtli0HL/Pgu4AdayiF76yWo=
|
||||||
k8s.io/kubelet v0.30.3/go.mod h1:D9or45Vkzcqg55CEiqZ8dVbwP3Ksj7DruEVRS9oq3Ys=
|
k8s.io/kubelet v0.31.0/go.mod h1:s+OnqnfdIh14PFpUb7NgzM53WSYXcczA3w/1qSzsRc8=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
|
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
depguard:
|
||||||
|
rules:
|
||||||
|
prevent_unmaintained_packages:
|
||||||
|
list-mode: strict
|
||||||
|
files:
|
||||||
|
- $all
|
||||||
|
- "!$test"
|
||||||
|
allow:
|
||||||
|
- $gostd
|
||||||
|
- github.com/x448/float16
|
||||||
|
deny:
|
||||||
|
- pkg: io/ioutil
|
||||||
|
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||||
|
dupl:
|
||||||
|
threshold: 100
|
||||||
|
funlen:
|
||||||
|
lines: 100
|
||||||
|
statements: 50
|
||||||
|
goconst:
|
||||||
|
ignore-tests: true
|
||||||
|
min-len: 2
|
||||||
|
min-occurrences: 3
|
||||||
|
gocritic:
|
||||||
|
enabled-tags:
|
||||||
|
- diagnostic
|
||||||
|
- experimental
|
||||||
|
- opinionated
|
||||||
|
- performance
|
||||||
|
- style
|
||||||
|
disabled-checks:
|
||||||
|
- commentedOutCode
|
||||||
|
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||||
|
- ifElseChain
|
||||||
|
- octalLiteral
|
||||||
|
- paramTypeCombine
|
||||||
|
- whyNoLint
|
||||||
|
gofmt:
|
||||||
|
simplify: false
|
||||||
|
goimports:
|
||||||
|
local-prefixes: github.com/fxamacker/cbor
|
||||||
|
golint:
|
||||||
|
min-confidence: 0
|
||||||
|
govet:
|
||||||
|
check-shadowing: true
|
||||||
|
lll:
|
||||||
|
line-length: 140
|
||||||
|
maligned:
|
||||||
|
suggest-new: true
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
staticcheck:
|
||||||
|
checks: ["all"]
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- asciicheck
|
||||||
|
- bidichk
|
||||||
|
- depguard
|
||||||
|
- errcheck
|
||||||
|
- exportloopref
|
||||||
|
- goconst
|
||||||
|
- gocritic
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- goprintffuncname
|
||||||
|
- gosec
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- nilerr
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- stylecheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||||
|
max-issues-per-linter: 0
|
||||||
|
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||||
|
max-same-issues: 0
|
||||||
|
|
||||||
|
exclude-rules:
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: decode.go
|
||||||
|
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: valid.go
|
||||||
|
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||||
|
- path: valid.go
|
||||||
|
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
|
||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
|
identity and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the overall
|
||||||
|
community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||||
|
any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email address,
|
||||||
|
without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
faye.github@gmail.com.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series of
|
||||||
|
actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or permanent
|
||||||
|
ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||||
|
community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.1, available at
|
||||||
|
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by
|
||||||
|
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||||
|
[https://www.contributor-covenant.org/translations][translations].
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||||
|
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||||
|
[FAQ]: https://www.contributor-covenant.org/faq
|
||||||
|
[translations]: https://www.contributor-covenant.org/translations
|
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# How to contribute
|
||||||
|
|
||||||
|
You can contribute by using the library, opening issues, or opening pull requests.
|
||||||
|
|
||||||
|
## Bug reports and security vulnerabilities
|
||||||
|
|
||||||
|
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||||
|
|
||||||
|
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||||
|
|
||||||
|
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||||
|
|
||||||
|
## Pull requests
|
||||||
|
|
||||||
|
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||||
|
|
||||||
|
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||||
|
|
||||||
|
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||||
|
|
||||||
|
Pull requests have a greater chance of being approved if:
|
||||||
|
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||||
|
- it has > 97% code coverage.
|
||||||
|
|
||||||
|
## Describe your issue
|
||||||
|
|
||||||
|
Clearly describe the issue:
|
||||||
|
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||||
|
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||||
|
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||||
|
|
||||||
|
## Please don't
|
||||||
|
|
||||||
|
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||||
|
|
||||||
|
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||||
|
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019-present Faye Amacker
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,691 @@
|
|||||||
|
# CBOR Codec in Go
|
||||||
|
|
||||||
|
<!-- [](#cbor-library-in-go) -->
|
||||||
|
|
||||||
|
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||||
|
|
||||||
|
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||||
|
|
||||||
|
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||||
|
|
||||||
|
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||||
|
|
||||||
|
## fxamacker/cbor
|
||||||
|
|
||||||
|
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||||
|
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||||
|
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||||
|
[](#fuzzing-and-code-coverage)
|
||||||
|
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||||
|
|
||||||
|
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||||
|
|
||||||
|
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||||
|
|
||||||
|
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||||
|
|
||||||
|
<details><summary>Highlights</summary><p/>
|
||||||
|
|
||||||
|
__🚀 Speed__
|
||||||
|
|
||||||
|
Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
|
||||||
|
|
||||||
|
__🔒 Security__
|
||||||
|
|
||||||
|
Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||||
|
|
||||||
|
Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
|
||||||
|
|
||||||
|
__🗜️ Data Size__
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||||
|
|
||||||
|
__:jigsaw: Usability__
|
||||||
|
|
||||||
|
API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
|
||||||
|
|
||||||
|
Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
|
||||||
|
|
||||||
|
__📆 Extensibility__
|
||||||
|
|
||||||
|
Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Secure Decoding with Configurable Settings
|
||||||
|
|
||||||
|
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||||
|
|
||||||
|
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||||
|
|
||||||
|
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||||
|
// while decoding 181 bytes.
|
||||||
|
package main
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Example data is from https://github.com/golang/go/issues/24446
|
||||||
|
// (shortened to 181 bytes).
|
||||||
|
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||||
|
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||||
|
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||||
|
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||||
|
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||||
|
"303030303030303030303030303030303030303030303030303030303030" +
|
||||||
|
"30"
|
||||||
|
|
||||||
|
type X struct {
|
||||||
|
J *X
|
||||||
|
K map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
raw, _ := hex.DecodeString(data)
|
||||||
|
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||||
|
|
||||||
|
var x X
|
||||||
|
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||||
|
fmt.Println("Decoding finished.")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||||
|
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||||
|
|
||||||
|
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||||
|
| :---- | ------------: | -----: | -----: |
|
||||||
|
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||||
|
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||||
|
|
||||||
|
<details><summary>Benchmark details</summary><p/>
|
||||||
|
|
||||||
|
Latest comparison used:
|
||||||
|
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||||
|
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||||
|
- go test -bench=. -benchmem -count=20
|
||||||
|
|
||||||
|
#### Prior comparisons
|
||||||
|
|
||||||
|
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||||
|
| :---- | ------------: | -----: | -----: |
|
||||||
|
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||||
|
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||||
|
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||||
|
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||||
|
|
||||||
|
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||||
|
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||||
|
- go test -bench=. -benchmem -count=20
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Smaller Encodings with Struct Tags
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||||
|
|
||||||
|
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||||
|
|
||||||
|
https://go.dev/play/p/YxwvfPdFQG2
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example encoding nested struct (with omitempty tag)
|
||||||
|
// - encoding/json: 18 byte JSON
|
||||||
|
// - fxamacker/cbor: 1 byte CBOR
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GrandChild struct {
|
||||||
|
Quux int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Child struct {
|
||||||
|
Baz int `json:",omitempty"`
|
||||||
|
Qux GrandChild `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Parent struct {
|
||||||
|
Foo Child `json:",omitempty"`
|
||||||
|
Bar int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func cb() {
|
||||||
|
results, _ := cbor.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||||
|
fmt.Println("DN: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func js() {
|
||||||
|
results, _ := json.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text := string(results) // JSON
|
||||||
|
fmt.Println("JSON: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cb()
|
||||||
|
fmt.Println("-------------")
|
||||||
|
js()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (DN is Diagnostic Notation):
|
||||||
|
```
|
||||||
|
hex(CBOR): a0
|
||||||
|
DN: {}
|
||||||
|
-------------
|
||||||
|
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||||
|
JSON: {"Foo":{"Qux":{}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
Example using different struct tags together:
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||||
|
|
||||||
|
### Key Points
|
||||||
|
|
||||||
|
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||||
|
|
||||||
|
- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
|
||||||
|
- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
|
||||||
|
|
||||||
|
Configurable limits and options can be used to balance trade-offs.
|
||||||
|
|
||||||
|
- Encoding and decoding modes are created from options (settings).
|
||||||
|
- Modes can be created at startup and reused.
|
||||||
|
- Modes are safe for concurrent use.
|
||||||
|
|
||||||
|
### Default Mode
|
||||||
|
|
||||||
|
Package level functions only use this library's default settings.
|
||||||
|
They provide the "default mode" of encoding and decoding.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
|
||||||
|
b, err = cbor.Marshal(v) // encode v to []byte b
|
||||||
|
err = cbor.Unmarshal(b, &v) // decode []byte b to v
|
||||||
|
decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
|
||||||
|
err = decoder.Decode(&v) // decode a CBOR data item to v
|
||||||
|
|
||||||
|
// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
|
||||||
|
err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
|
||||||
|
|
||||||
|
// v2.5.0 added new functions that return remaining bytes.
|
||||||
|
|
||||||
|
// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
|
||||||
|
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||||
|
|
||||||
|
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||||
|
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||||
|
|
||||||
|
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||||
|
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||||
|
```
|
||||||
|
|
||||||
|
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||||
|
|
||||||
|
- Different CBOR libraries may use different default settings.
|
||||||
|
- CBOR-based formats or protocols usually require specific settings.
|
||||||
|
|
||||||
|
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||||
|
|
||||||
|
### Presets
|
||||||
|
|
||||||
|
Presets can be used as-is or as a starting point for custom settings.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// EncOptions is a struct of encoder settings.
|
||||||
|
func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
|
||||||
|
func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
|
||||||
|
func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
|
||||||
|
func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
|
||||||
|
```
|
||||||
|
|
||||||
|
Presets are used to create custom modes.
|
||||||
|
|
||||||
|
### Custom Modes
|
||||||
|
|
||||||
|
Modes are created from settings. Once created, modes have immutable settings.
|
||||||
|
|
||||||
|
💡 Create the mode at startup and reuse it. It is safe for concurrent use.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Create encoding mode.
|
||||||
|
opts := cbor.CoreDetEncOptions() // use preset options as a starting point
|
||||||
|
opts.Time = cbor.TimeUnix // change any settings if needed
|
||||||
|
em, err := opts.EncMode() // create an immutable encoding mode
|
||||||
|
|
||||||
|
// Reuse the encoding mode. It is safe for concurrent use.
|
||||||
|
|
||||||
|
// API matches encoding/json.
|
||||||
|
b, err := em.Marshal(v) // encode v to []byte b
|
||||||
|
encoder := em.NewEncoder(w) // create encoder with io.Writer w
|
||||||
|
err := encoder.Encode(v) // encode v to io.Writer w
|
||||||
|
```
|
||||||
|
|
||||||
|
Default mode and custom modes automatically apply struct tags.
|
||||||
|
|
||||||
|
### User Specified Buffer for Encoding (v2.7.0)
|
||||||
|
|
||||||
|
`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Struct Tags
|
||||||
|
|
||||||
|
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||||
|
|
||||||
|
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||||
|
|
||||||
|
https://go.dev/play/p/YxwvfPdFQG2
|
||||||
|
|
||||||
|
```Go
|
||||||
|
// Example encoding nested struct (with omitempty tag)
|
||||||
|
// - encoding/json: 18 byte JSON
|
||||||
|
// - fxamacker/cbor: 1 byte CBOR
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GrandChild struct {
|
||||||
|
Quux int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Child struct {
|
||||||
|
Baz int `json:",omitempty"`
|
||||||
|
Qux GrandChild `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Parent struct {
|
||||||
|
Foo Child `json:",omitempty"`
|
||||||
|
Bar int `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func cb() {
|
||||||
|
results, _ := cbor.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||||
|
fmt.Println("DN: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func js() {
|
||||||
|
results, _ := json.Marshal(Parent{})
|
||||||
|
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||||
|
|
||||||
|
text := string(results) // JSON
|
||||||
|
fmt.Println("JSON: " + text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cb()
|
||||||
|
fmt.Println("-------------")
|
||||||
|
js()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Output (DN is Diagnostic Notation):
|
||||||
|
```
|
||||||
|
hex(CBOR): a0
|
||||||
|
DN: {}
|
||||||
|
-------------
|
||||||
|
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||||
|
JSON: {"Foo":{"Qux":{}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
<hr/>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details><summary>Example using several struct tags</summary><p/>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||||
|
|
||||||
|
### CBOR Tags
|
||||||
|
|
||||||
|
CBOR tags are specified in a `TagSet`.
|
||||||
|
|
||||||
|
Custom modes can be created with a `TagSet` to handle CBOR tags.
|
||||||
|
|
||||||
|
```go
|
||||||
|
em, err := opts.EncMode() // no CBOR tags
|
||||||
|
em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
|
||||||
|
em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||||
|
```
|
||||||
|
|
||||||
|
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||||
|
|
||||||
|
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||||
|
|
||||||
|
// Create TagSet (safe for concurrency).
|
||||||
|
tags := cbor.NewTagSet()
|
||||||
|
// Register tag COSE_Sign1 18 with signedCWT type.
|
||||||
|
tags.Add(
|
||||||
|
cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
|
||||||
|
reflect.TypeOf(signedCWT{}),
|
||||||
|
18)
|
||||||
|
|
||||||
|
// Create DecMode with immutable tags.
|
||||||
|
dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
|
||||||
|
|
||||||
|
// Unmarshal to signedCWT with tag support.
|
||||||
|
var v signedCWT
|
||||||
|
if err := dm.Unmarshal(data, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create EncMode with immutable tags.
|
||||||
|
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||||
|
|
||||||
|
// Marshal signedCWT with tag number.
|
||||||
|
if data, err := cbor.Marshal(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Functions and Interfaces
|
||||||
|
|
||||||
|
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||||
|
|
||||||
|
Common functions with same API as `encoding/json`:
|
||||||
|
- `Marshal`, `Unmarshal`
|
||||||
|
- `NewEncoder`, `(*Encoder).Encode`
|
||||||
|
- `NewDecoder`, `(*Decoder).Decode`
|
||||||
|
|
||||||
|
NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
|
||||||
|
because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||||
|
- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
|
||||||
|
|
||||||
|
Other useful functions:
|
||||||
|
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||||
|
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||||
|
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||||
|
|
||||||
|
Interfaces identical or comparable to Go `encoding` packages include:
|
||||||
|
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||||
|
|
||||||
|
The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
### Security Tips
|
||||||
|
|
||||||
|
🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
|
||||||
|
|
||||||
|
Default limits may need to be increased for systems handling very large data (e.g. blockchains).
|
||||||
|
|
||||||
|
`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||||
|
|
||||||
|
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||||
|
|
||||||
|
### Prior Release
|
||||||
|
|
||||||
|
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||||
|
|
||||||
|
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||||
|
|
||||||
|
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||||
|
|
||||||
|
See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
|
||||||
|
|
||||||
|
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||||
|
|
||||||
|
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||||
|
|
||||||
|
Comparison of v2.4.0 vs v2.5.0-beta2 provided by @448 (edited to fit width).
|
||||||
|
|
||||||
|
PR [#382](https://github.com/fxamacker/cbor/pull/382) returns buffer to pool in `Encode()`. It adds a bit of overhead to `Encode()` but `NewEncoder().Encode()` is a lot faster and uses less memory as shown here:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ benchstat bench-v2.4.0.log bench-f9e6291.log
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
pkg: github.com/fxamacker/cbor/v2
|
||||||
|
cpu: 12th Gen Intel(R) Core(TM) i7-12700H
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ sec/op │ sec/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 236.70n ± 2% 58.04n ± 1% -75.48% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 238.00n ± 2% 63.93n ± 1% -73.14% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 238.65n ± 2% 64.88n ± 1% -72.81% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 242.00n ± 2% 63.00n ± 1% -73.97% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 245.60n ± 1% 68.55n ± 1% -72.09% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 243.20n ± 3% 68.39n ± 1% -71.88% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 563.0n ± 2% 378.3n ± 0% -32.81% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 2.043µ ± 2% 1.906µ ± 2% -6.75% (p=0.000 n=10)
|
||||||
|
geomean 349.7n 122.7n -64.92%
|
||||||
|
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ B/op │ B/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 544.0 ± 0% 416.0 ± 0% -23.53% (p=0.000 n=10)
|
||||||
|
geomean 153.4 ? ¹ ²
|
||||||
|
¹ summaries must be >0 to compute geomean
|
||||||
|
² ratios must be >0 to compute geomean
|
||||||
|
|
||||||
|
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||||
|
│ allocs/op │ allocs/op vs base │
|
||||||
|
NewEncoderEncode/Go_bool_to_CBOR_bool-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_float64_to_CBOR_float-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_string_to_CBOR_text-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_[]int_to_CBOR_array-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||||
|
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 28.00 ± 0% 26.00 ± 0% -7.14% (p=0.000 n=10)
|
||||||
|
geomean 2.782 ? ¹ ²
|
||||||
|
¹ summaries must be >0 to compute geomean
|
||||||
|
² ratios must be >0 to compute geomean
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Who uses fxamacker/cbor
|
||||||
|
|
||||||
|
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||||
|
|
||||||
|
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||||
|
|
||||||
|
## Standards
|
||||||
|
|
||||||
|
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||||
|
|
||||||
|
Notable CBOR features include:
|
||||||
|
|
||||||
|
| CBOR Feature | Description |
|
||||||
|
| :--- | :--- |
|
||||||
|
| CBOR tags | API supports built-in and user-defined tags. |
|
||||||
|
| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
|
||||||
|
| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
|
||||||
|
| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
|
||||||
|
| Indefinite length data | Option to allow/forbid for encoding and decoding. |
|
||||||
|
| Well-formedness | Always checked and enforced. |
|
||||||
|
| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
|
||||||
|
| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
|
||||||
|
|
||||||
|
Known limitations are noted in the [Limitations section](#limitations).
|
||||||
|
|
||||||
|
Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
|
||||||
|
|
||||||
|
Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
|
||||||
|
|
||||||
|
After well-formedness is verified, basic validity errors are handled as follows:
|
||||||
|
|
||||||
|
* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
|
||||||
|
* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
|
||||||
|
|
||||||
|
When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
|
||||||
|
|
||||||
|
By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
|
||||||
|
|
||||||
|
__Click to expand topic:__
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Duplicate Map Keys</summary><p>
|
||||||
|
|
||||||
|
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||||
|
|
||||||
|
`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
|
||||||
|
|
||||||
|
`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
|
||||||
|
|
||||||
|
APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Tag Validity</summary><p>
|
||||||
|
|
||||||
|
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||||
|
|
||||||
|
* Inadmissible type for tag content
|
||||||
|
* Inadmissible value for tag content
|
||||||
|
|
||||||
|
Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
|
||||||
|
|
||||||
|
* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
|
||||||
|
* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
|
||||||
|
|
||||||
|
Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
|
||||||
|
|
||||||
|
For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
|
||||||
|
|
||||||
|
* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
|
||||||
|
* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
|
||||||
|
* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
|
||||||
|
|
||||||
|
## Fuzzing and Code Coverage
|
||||||
|
|
||||||
|
__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
|
||||||
|
|
||||||
|
__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
|
||||||
|
|
||||||
|
<hr>
|
||||||
|
|
||||||
|
## Versions and API Changes
|
||||||
|
This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
|
||||||
|
|
||||||
|
These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
|
||||||
|
`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
|
||||||
|
|
||||||
|
Exclusions from SemVer:
|
||||||
|
- Newly added API documented as "subject to change".
|
||||||
|
- Newly added API in the master branch that has never been tagged in non-beta release.
|
||||||
|
- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||||
|
|
||||||
|
This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
|
||||||
|
|
||||||
|
## Code of Conduct
|
||||||
|
|
||||||
|
This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
|
||||||
|
|
||||||
|
For more info, see [How to Contribute](CONTRIBUTING.md).
|
||||||
|
|
||||||
|
## Security Policy
|
||||||
|
|
||||||
|
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||||
|
|
||||||
|
For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
|
||||||
|
|
||||||
|
## Acknowledgements
|
||||||
|
|
||||||
|
Many thanks to all the contributors on this project!
|
||||||
|
|
||||||
|
I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
|
||||||
|
|
||||||
|
I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
|
||||||
|
|
||||||
|
Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
|
||||||
|
|
||||||
|
This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
|
||||||
|
|
||||||
|
Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
|
||||||
|
|
||||||
|
Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
|
||||||
|
|
||||||
|
This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
|
||||||
|
|
||||||
|
fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
|
||||||
|
|
||||||
|
<hr>
|
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||||
|
|
||||||
|
If the security vulnerability is already known to the public, then you can open an issue as a bug report.
|
||||||
|
|
||||||
|
To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
|
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ByteString represents CBOR byte string (major type 2). ByteString can be used
|
||||||
|
// when using a Go []byte is not possible or convenient. For example, Go doesn't
|
||||||
|
// allow []byte as map key, so ByteString can be used to support data formats
|
||||||
|
// having CBOR map with byte string keys. ByteString can also be used to
|
||||||
|
// encode invalid UTF-8 string as CBOR byte string.
|
||||||
|
// See DecOption.MapKeyByteStringMode for more details.
|
||||||
|
type ByteString string
|
||||||
|
|
||||||
|
// Bytes returns bytes representing ByteString.
|
||||||
|
func (bs ByteString) Bytes() []byte {
|
||||||
|
return []byte(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
|
||||||
|
func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
defer putEncodeBuffer(e)
|
||||||
|
|
||||||
|
// Encode length
|
||||||
|
encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
|
||||||
|
|
||||||
|
// Encode data
|
||||||
|
buf := make([]byte, e.Len()+len(bs))
|
||||||
|
n := copy(buf, e.Bytes())
|
||||||
|
copy(buf[n:], bs)
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||||
|
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||||
|
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||||
|
if bs == nil {
|
||||||
|
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||||
|
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||||
|
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||||
|
*bs = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
// Check if CBOR data type is byte string
|
||||||
|
if typ := d.nextCBORType(); typ != cborTypeByteString {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := d.parseByteString()
|
||||||
|
*bs = ByteString(b)
|
||||||
|
return nil
|
||||||
|
}
|
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type encodeFuncs struct {
|
||||||
|
ef encodeFunc
|
||||||
|
ief isEmptyFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
|
||||||
|
encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
|
||||||
|
encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
|
||||||
|
typeInfoCache sync.Map // map[reflect.Type]*typeInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
type specialType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
specialTypeNone specialType = iota
|
||||||
|
specialTypeUnmarshalerIface
|
||||||
|
specialTypeEmptyIface
|
||||||
|
specialTypeIface
|
||||||
|
specialTypeTag
|
||||||
|
specialTypeTime
|
||||||
|
)
|
||||||
|
|
||||||
|
type typeInfo struct {
|
||||||
|
elemTypeInfo *typeInfo
|
||||||
|
keyTypeInfo *typeInfo
|
||||||
|
typ reflect.Type
|
||||||
|
kind reflect.Kind
|
||||||
|
nonPtrType reflect.Type
|
||||||
|
nonPtrKind reflect.Kind
|
||||||
|
spclType specialType
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||||
|
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||||
|
|
||||||
|
for t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
k := t.Kind()
|
||||||
|
|
||||||
|
tInfo.nonPtrType = t
|
||||||
|
tInfo.nonPtrKind = k
|
||||||
|
|
||||||
|
if k == reflect.Interface {
|
||||||
|
if t.NumMethod() == 0 {
|
||||||
|
tInfo.spclType = specialTypeEmptyIface
|
||||||
|
} else {
|
||||||
|
tInfo.spclType = specialTypeIface
|
||||||
|
}
|
||||||
|
} else if t == typeTag {
|
||||||
|
tInfo.spclType = specialTypeTag
|
||||||
|
} else if t == typeTime {
|
||||||
|
tInfo.spclType = specialTypeTime
|
||||||
|
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||||
|
tInfo.spclType = specialTypeUnmarshalerIface
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k {
|
||||||
|
case reflect.Array, reflect.Slice:
|
||||||
|
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||||
|
case reflect.Map:
|
||||||
|
tInfo.keyTypeInfo = getTypeInfo(t.Key())
|
||||||
|
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type decodingStructType struct {
|
||||||
|
fields fields
|
||||||
|
fieldIndicesByName map[string]int
|
||||||
|
err error
|
||||||
|
toArray bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
|
||||||
|
// here's a very basic implementation of an aggregated error.
|
||||||
|
type multierror []error
|
||||||
|
|
||||||
|
func (m multierror) Error() string {
|
||||||
|
var sb strings.Builder
|
||||||
|
for i, err := range m {
|
||||||
|
sb.WriteString(err.Error())
|
||||||
|
if i < len(m)-1 {
|
||||||
|
sb.WriteString(", ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDecodingStructType(t reflect.Type) *decodingStructType {
|
||||||
|
if v, _ := decodingStructTypeCache.Load(t); v != nil {
|
||||||
|
return v.(*decodingStructType)
|
||||||
|
}
|
||||||
|
|
||||||
|
flds, structOptions := getFields(t)
|
||||||
|
|
||||||
|
toArray := hasToArrayOption(structOptions)
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
if flds[i].keyAsInt {
|
||||||
|
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||||
|
if numErr != nil {
|
||||||
|
errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
flds[i].nameAsInt = int64(nameAsInt)
|
||||||
|
}
|
||||||
|
|
||||||
|
flds[i].typInfo = getTypeInfo(flds[i].typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldIndicesByName := make(map[string]int, len(flds))
|
||||||
|
for i, fld := range flds {
|
||||||
|
if _, ok := fieldIndicesByName[fld.name]; ok {
|
||||||
|
errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fieldIndicesByName[fld.name] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
{
|
||||||
|
var multi multierror
|
||||||
|
for _, each := range errs {
|
||||||
|
if each != nil {
|
||||||
|
multi = append(multi, each)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(multi) == 1 {
|
||||||
|
err = multi[0]
|
||||||
|
} else if len(multi) > 1 {
|
||||||
|
err = multi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &decodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
fieldIndicesByName: fieldIndicesByName,
|
||||||
|
err: err,
|
||||||
|
toArray: toArray,
|
||||||
|
}
|
||||||
|
decodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType
|
||||||
|
}
|
||||||
|
|
||||||
|
type encodingStructType struct {
|
||||||
|
fields fields
|
||||||
|
bytewiseFields fields
|
||||||
|
lengthFirstFields fields
|
||||||
|
omitEmptyFieldsIdx []int
|
||||||
|
err error
|
||||||
|
toArray bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (st *encodingStructType) getFields(em *encMode) fields {
|
||||||
|
switch em.sort {
|
||||||
|
case SortNone, SortFastShuffle:
|
||||||
|
return st.fields
|
||||||
|
case SortLengthFirst:
|
||||||
|
return st.lengthFirstFields
|
||||||
|
default:
|
||||||
|
return st.bytewiseFields
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytewiseFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *bytewiseFieldSorter) Less(i, j int) bool {
|
||||||
|
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type lengthFirstFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *lengthFirstFieldSorter) Less(i, j int) bool {
|
||||||
|
if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
|
||||||
|
return len(x.fields[i].cborName) < len(x.fields[j].cborName)
|
||||||
|
}
|
||||||
|
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||||
|
if v, _ := encodingStructTypeCache.Load(t); v != nil {
|
||||||
|
structType := v.(*encodingStructType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
flds, structOptions := getFields(t)
|
||||||
|
|
||||||
|
if hasToArrayOption(structOptions) {
|
||||||
|
return getEncodingStructToArrayType(t, flds)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var hasKeyAsInt bool
|
||||||
|
var hasKeyAsStr bool
|
||||||
|
var omitEmptyIdx []int
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
// Get field's encodeFunc
|
||||||
|
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||||
|
if flds[i].ef == nil {
|
||||||
|
err = &UnsupportedTypeError{t}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode field name
|
||||||
|
if flds[i].keyAsInt {
|
||||||
|
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||||
|
if numErr != nil {
|
||||||
|
err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
flds[i].nameAsInt = int64(nameAsInt)
|
||||||
|
if nameAsInt >= 0 {
|
||||||
|
encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
|
||||||
|
} else {
|
||||||
|
n := nameAsInt*(-1) - 1
|
||||||
|
encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
|
||||||
|
}
|
||||||
|
flds[i].cborName = make([]byte, e.Len())
|
||||||
|
copy(flds[i].cborName, e.Bytes())
|
||||||
|
e.Reset()
|
||||||
|
|
||||||
|
hasKeyAsInt = true
|
||||||
|
} else {
|
||||||
|
encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
|
||||||
|
flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
|
||||||
|
n := copy(flds[i].cborName, e.Bytes())
|
||||||
|
copy(flds[i].cborName[n:], flds[i].name)
|
||||||
|
e.Reset()
|
||||||
|
|
||||||
|
// If cborName contains a text string, then cborNameByteString contains a
|
||||||
|
// string that has the byte string major type but is otherwise identical to
|
||||||
|
// cborName.
|
||||||
|
flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
|
||||||
|
copy(flds[i].cborNameByteString, flds[i].cborName)
|
||||||
|
// Reset encoded CBOR type to byte string, preserving the "additional
|
||||||
|
// information" bits:
|
||||||
|
flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
|
||||||
|
getAdditionalInformation(flds[i].cborNameByteString[0])
|
||||||
|
|
||||||
|
hasKeyAsStr = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if field can be omitted when empty
|
||||||
|
if flds[i].omitEmpty {
|
||||||
|
omitEmptyIdx = append(omitEmptyIdx, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
structType := &encodingStructType{err: err}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort fields by canonical order
|
||||||
|
bytewiseFields := make(fields, len(flds))
|
||||||
|
copy(bytewiseFields, flds)
|
||||||
|
sort.Sort(&bytewiseFieldSorter{bytewiseFields})
|
||||||
|
|
||||||
|
lengthFirstFields := bytewiseFields
|
||||||
|
if hasKeyAsInt && hasKeyAsStr {
|
||||||
|
lengthFirstFields = make(fields, len(flds))
|
||||||
|
copy(lengthFirstFields, flds)
|
||||||
|
sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &encodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
bytewiseFields: bytewiseFields,
|
||||||
|
lengthFirstFields: lengthFirstFields,
|
||||||
|
omitEmptyFieldsIdx: omitEmptyIdx,
|
||||||
|
}
|
||||||
|
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||||
|
for i := 0; i < len(flds); i++ {
|
||||||
|
// Get field's encodeFunc
|
||||||
|
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||||
|
if flds[i].ef == nil {
|
||||||
|
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
structType := &encodingStructType{
|
||||||
|
fields: flds,
|
||||||
|
toArray: true,
|
||||||
|
}
|
||||||
|
encodingStructTypeCache.Store(t, structType)
|
||||||
|
return structType, structType.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||||
|
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||||
|
fs := v.(encodeFuncs)
|
||||||
|
return fs.ef, fs.ief
|
||||||
|
}
|
||||||
|
ef, ief := getEncodeFuncInternal(t)
|
||||||
|
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||||
|
return ef, ief
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||||
|
if v, _ := typeInfoCache.Load(t); v != nil {
|
||||||
|
return v.(*typeInfo)
|
||||||
|
}
|
||||||
|
tInfo := newTypeInfo(t)
|
||||||
|
typeInfoCache.Store(t, tInfo)
|
||||||
|
return tInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasToArrayOption(tag string) bool {
|
||||||
|
s := ",toarray"
|
||||||
|
idx := strings.Index(tag, s)
|
||||||
|
return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
|
||||||
|
}
|
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cborType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborTypePositiveInt cborType = 0x00
|
||||||
|
cborTypeNegativeInt cborType = 0x20
|
||||||
|
cborTypeByteString cborType = 0x40
|
||||||
|
cborTypeTextString cborType = 0x60
|
||||||
|
cborTypeArray cborType = 0x80
|
||||||
|
cborTypeMap cborType = 0xa0
|
||||||
|
cborTypeTag cborType = 0xc0
|
||||||
|
cborTypePrimitives cborType = 0xe0
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t cborType) String() string {
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt:
|
||||||
|
return "positive integer"
|
||||||
|
case cborTypeNegativeInt:
|
||||||
|
return "negative integer"
|
||||||
|
case cborTypeByteString:
|
||||||
|
return "byte string"
|
||||||
|
case cborTypeTextString:
|
||||||
|
return "UTF-8 text string"
|
||||||
|
case cborTypeArray:
|
||||||
|
return "array"
|
||||||
|
case cborTypeMap:
|
||||||
|
return "map"
|
||||||
|
case cborTypeTag:
|
||||||
|
return "tag"
|
||||||
|
case cborTypePrimitives:
|
||||||
|
return "primitives"
|
||||||
|
default:
|
||||||
|
return "Invalid type " + strconv.Itoa(int(t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type additionalInformation uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxAdditionalInformationWithoutArgument = 23
|
||||||
|
additionalInformationWith1ByteArgument = 24
|
||||||
|
additionalInformationWith2ByteArgument = 25
|
||||||
|
additionalInformationWith4ByteArgument = 26
|
||||||
|
additionalInformationWith8ByteArgument = 27
|
||||||
|
|
||||||
|
// For major type 7.
|
||||||
|
additionalInformationAsFalse = 20
|
||||||
|
additionalInformationAsTrue = 21
|
||||||
|
additionalInformationAsNull = 22
|
||||||
|
additionalInformationAsUndefined = 23
|
||||||
|
additionalInformationAsFloat16 = 25
|
||||||
|
additionalInformationAsFloat32 = 26
|
||||||
|
additionalInformationAsFloat64 = 27
|
||||||
|
|
||||||
|
// For major type 2, 3, 4, 5.
|
||||||
|
additionalInformationAsIndefiniteLengthFlag = 31
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxSimpleValueInAdditionalInformation = 23
|
||||||
|
minSimpleValueIn1ByteArgument = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
func (ai additionalInformation) isIndefiniteLength() bool {
|
||||||
|
return ai == additionalInformationAsIndefiniteLengthFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// From RFC 8949 Section 3:
|
||||||
|
// "The initial byte of each encoded data item contains both information about the major type
|
||||||
|
// (the high-order 3 bits, described in Section 3.1) and additional information
|
||||||
|
// (the low-order 5 bits)."
|
||||||
|
|
||||||
|
// typeMask is used to extract major type in initial byte of encoded data item.
|
||||||
|
typeMask = 0xe0
|
||||||
|
|
||||||
|
// additionalInformationMask is used to extract additional information in initial byte of encoded data item.
|
||||||
|
additionalInformationMask = 0x1f
|
||||||
|
)
|
||||||
|
|
||||||
|
func getType(raw byte) cborType {
|
||||||
|
return cborType(raw & typeMask)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAdditionalInformation(raw byte) byte {
|
||||||
|
return raw & additionalInformationMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBreakFlag(raw byte) bool {
|
||||||
|
return raw == cborBreakFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInitialByte(b byte) (t cborType, ai byte) {
|
||||||
|
return getType(b), getAdditionalInformation(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
tagNumRFC3339Time = 0
|
||||||
|
tagNumEpochTime = 1
|
||||||
|
tagNumUnsignedBignum = 2
|
||||||
|
tagNumNegativeBignum = 3
|
||||||
|
tagNumExpectedLaterEncodingBase64URL = 21
|
||||||
|
tagNumExpectedLaterEncodingBase64 = 22
|
||||||
|
tagNumExpectedLaterEncodingBase16 = 23
|
||||||
|
tagNumSelfDescribedCBOR = 55799
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cborBreakFlag = byte(0xff)
|
||||||
|
cborByteStringWithIndefiniteLengthHead = byte(0x5f)
|
||||||
|
cborTextStringWithIndefiniteLengthHead = byte(0x7f)
|
||||||
|
cborArrayWithIndefiniteLengthHead = byte(0x9f)
|
||||||
|
cborMapWithIndefiniteLengthHead = byte(0xbf)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cborFalse = []byte{0xf4}
|
||||||
|
cborTrue = []byte{0xf5}
|
||||||
|
cborNil = []byte{0xf6}
|
||||||
|
cborNaN = []byte{0xf9, 0x7e, 0x00}
|
||||||
|
cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
|
||||||
|
cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
|
||||||
|
)
|
||||||
|
|
||||||
|
// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
|
||||||
|
func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||||
|
t := getType(contentHead)
|
||||||
|
switch tagNum {
|
||||||
|
case tagNumRFC3339Time:
|
||||||
|
// Tag content (date/time text string in RFC 3339 format) must be string type.
|
||||||
|
if t != cborTypeTextString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumRFC3339Time,
|
||||||
|
"text string",
|
||||||
|
t.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumEpochTime:
|
||||||
|
// Tag content (epoch date/time) must be uint, int, or float type.
|
||||||
|
if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumEpochTime,
|
||||||
|
"integer or floating-point number",
|
||||||
|
t.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumUnsignedBignum, tagNumNegativeBignum:
|
||||||
|
// Tag content (bignum) must be byte type.
|
||||||
|
if t != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeErrorf(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"tag number %d or %d must be followed by byte string, got %s",
|
||||||
|
tagNumUnsignedBignum,
|
||||||
|
tagNumNegativeBignum,
|
||||||
|
t.String(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
|
||||||
|
// From RFC 8949 3.4.5.2:
|
||||||
|
// The data item tagged can be a byte string or any other data item. In the latter
|
||||||
|
// case, the tag applies to all of the byte string data items contained in the data
|
||||||
|
// item, except for those contained in a nested data item tagged with an expected
|
||||||
|
// conversion.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
@ -0,0 +1,724 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/x448/float16"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DiagMode is the main interface for CBOR diagnostic notation.
|
||||||
|
type DiagMode interface {
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
|
||||||
|
Diagnose([]byte) (string, error)
|
||||||
|
|
||||||
|
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
DiagnoseFirst([]byte) (string, []byte, error)
|
||||||
|
|
||||||
|
// DiagOptions returns user specified options used to create this DiagMode.
|
||||||
|
DiagOptions() DiagOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||||
|
type ByteStringEncoding uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ByteStringBase16Encoding encodes byte strings in base16, without padding.
|
||||||
|
ByteStringBase16Encoding ByteStringEncoding = iota
|
||||||
|
|
||||||
|
// ByteStringBase32Encoding encodes byte strings in base32, without padding.
|
||||||
|
ByteStringBase32Encoding
|
||||||
|
|
||||||
|
// ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
|
||||||
|
ByteStringBase32HexEncoding
|
||||||
|
|
||||||
|
// ByteStringBase64Encoding encodes byte strings in base64url, without padding.
|
||||||
|
ByteStringBase64Encoding
|
||||||
|
|
||||||
|
maxByteStringEncoding
|
||||||
|
)
|
||||||
|
|
||||||
|
func (bse ByteStringEncoding) valid() error {
|
||||||
|
if bse >= maxByteStringEncoding {
|
||||||
|
return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagOptions specifies Diag options.
|
||||||
|
type DiagOptions struct {
|
||||||
|
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||||
|
// Default is ByteStringBase16Encoding.
|
||||||
|
ByteStringEncoding ByteStringEncoding
|
||||||
|
|
||||||
|
// ByteStringHexWhitespace specifies notating with whitespace in byte string
|
||||||
|
// when ByteStringEncoding is ByteStringBase16Encoding.
|
||||||
|
ByteStringHexWhitespace bool
|
||||||
|
|
||||||
|
// ByteStringText specifies notating with text in byte string
|
||||||
|
// if it is a valid UTF-8 text.
|
||||||
|
ByteStringText bool
|
||||||
|
|
||||||
|
// ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
|
||||||
|
// if it is a valid CBOR bytes.
|
||||||
|
ByteStringEmbeddedCBOR bool
|
||||||
|
|
||||||
|
// CBORSequence specifies notating CBOR sequences.
|
||||||
|
// otherwise, it returns an error if there are more bytes after the first CBOR.
|
||||||
|
CBORSequence bool
|
||||||
|
|
||||||
|
// FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
|
||||||
|
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
|
||||||
|
FloatPrecisionIndicator bool
|
||||||
|
|
||||||
|
// MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
|
||||||
|
// Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
|
||||||
|
// require larger amounts of stack to deserialize. Don't increase this higher than you require.
|
||||||
|
MaxNestedLevels int
|
||||||
|
|
||||||
|
// MaxArrayElements specifies the max number of elements for CBOR arrays.
|
||||||
|
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||||
|
MaxArrayElements int
|
||||||
|
|
||||||
|
// MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
|
||||||
|
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||||
|
MaxMapPairs int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagMode returns a DiagMode with immutable options.
|
||||||
|
func (opts DiagOptions) DiagMode() (DiagMode, error) {
|
||||||
|
return opts.diagMode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts DiagOptions) diagMode() (*diagMode, error) {
|
||||||
|
if err := opts.ByteStringEncoding.valid(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
decMode, err := DecOptions{
|
||||||
|
MaxNestedLevels: opts.MaxNestedLevels,
|
||||||
|
MaxArrayElements: opts.MaxArrayElements,
|
||||||
|
MaxMapPairs: opts.MaxMapPairs,
|
||||||
|
}.decMode()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &diagMode{
|
||||||
|
byteStringEncoding: opts.ByteStringEncoding,
|
||||||
|
byteStringHexWhitespace: opts.ByteStringHexWhitespace,
|
||||||
|
byteStringText: opts.ByteStringText,
|
||||||
|
byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
|
||||||
|
cborSequence: opts.CBORSequence,
|
||||||
|
floatPrecisionIndicator: opts.FloatPrecisionIndicator,
|
||||||
|
decMode: decMode,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type diagMode struct {
|
||||||
|
byteStringEncoding ByteStringEncoding
|
||||||
|
byteStringHexWhitespace bool
|
||||||
|
byteStringText bool
|
||||||
|
byteStringEmbeddedCBOR bool
|
||||||
|
cborSequence bool
|
||||||
|
floatPrecisionIndicator bool
|
||||||
|
decMode *decMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagOptions returns user specified options used to create this DiagMode.
|
||||||
|
func (dm *diagMode) DiagOptions() DiagOptions {
|
||||||
|
return DiagOptions{
|
||||||
|
ByteStringEncoding: dm.byteStringEncoding,
|
||||||
|
ByteStringHexWhitespace: dm.byteStringHexWhitespace,
|
||||||
|
ByteStringText: dm.byteStringText,
|
||||||
|
ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
|
||||||
|
CBORSequence: dm.cborSequence,
|
||||||
|
FloatPrecisionIndicator: dm.floatPrecisionIndicator,
|
||||||
|
MaxNestedLevels: dm.decMode.maxNestedLevels,
|
||||||
|
MaxArrayElements: dm.decMode.maxArrayElements,
|
||||||
|
MaxMapPairs: dm.decMode.maxMapPairs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
|
||||||
|
func (dm *diagMode) Diagnose(data []byte) (string, error) {
|
||||||
|
return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||||
|
return newDiagnose(data, dm.decMode, dm).diagFirst()
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultDiagMode, _ = DiagOptions{}.diagMode()
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
|
||||||
|
// using the default diagnostic mode.
|
||||||
|
//
|
||||||
|
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
|
||||||
|
func Diagnose(data []byte) (string, error) {
|
||||||
|
return defaultDiagMode.Diagnose(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||||
|
func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||||
|
return defaultDiagMode.DiagnoseFirst(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
type diagnose struct {
|
||||||
|
dm *diagMode
|
||||||
|
d *decoder
|
||||||
|
w *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
|
||||||
|
return &diagnose{
|
||||||
|
dm: diagm,
|
||||||
|
d: &decoder{data: data, dm: decm},
|
||||||
|
w: &bytes.Buffer{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) diag(cborSequence bool) (string, error) {
|
||||||
|
// CBOR Sequence
|
||||||
|
firstItem := true
|
||||||
|
for {
|
||||||
|
switch err := di.wellformed(cborSequence); err {
|
||||||
|
case nil:
|
||||||
|
if !firstItem {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
firstItem = false
|
||||||
|
if itemErr := di.item(); itemErr != nil {
|
||||||
|
return di.w.String(), itemErr
|
||||||
|
}
|
||||||
|
|
||||||
|
case io.EOF:
|
||||||
|
if firstItem {
|
||||||
|
return di.w.String(), err
|
||||||
|
}
|
||||||
|
return di.w.String(), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return di.w.String(), err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
|
||||||
|
err = di.wellformed(true)
|
||||||
|
if err == nil {
|
||||||
|
err = di.item()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
// Return EDN and the rest of the data slice (which might be len 0)
|
||||||
|
return di.w.String(), di.d.data[di.d.off:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return di.w.String(), nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) wellformed(allowExtraData bool) error {
|
||||||
|
off := di.d.off
|
||||||
|
err := di.d.wellformed(allowExtraData, false)
|
||||||
|
di.d.off = off
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) item() error { //nolint:gocyclo
|
||||||
|
initialByte := di.d.data[di.d.off]
|
||||||
|
switch initialByte {
|
||||||
|
case cborByteStringWithIndefiniteLengthHead,
|
||||||
|
cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
|
||||||
|
di.d.off++
|
||||||
|
if isBreakFlag(di.d.data[di.d.off]) {
|
||||||
|
di.d.off++
|
||||||
|
switch initialByte {
|
||||||
|
case cborByteStringWithIndefiniteLengthHead:
|
||||||
|
// indefinite-length bytes with no chunks.
|
||||||
|
di.w.WriteString(`''_`)
|
||||||
|
return nil
|
||||||
|
case cborTextStringWithIndefiniteLengthHead:
|
||||||
|
// indefinite-length text with no chunks.
|
||||||
|
di.w.WriteString(`""_`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString("(_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
// wellformedIndefiniteString() already checked that the next item is a byte/text string.
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborArrayWithIndefiniteLengthHead: // indefinite-length array
|
||||||
|
di.d.off++
|
||||||
|
di.w.WriteString("[_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(']')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborMapWithIndefiniteLengthHead: // indefinite-length map
|
||||||
|
di.d.off++
|
||||||
|
di.w.WriteString("{_ ")
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for !di.d.foundBreak() {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
// key
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString(": ")
|
||||||
|
|
||||||
|
// value
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte('}')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := di.d.nextCBORType()
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeNegativeInt:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
if val > math.MaxInt64 {
|
||||||
|
// CBOR negative integer overflows int64, use big.Int to store value.
|
||||||
|
bi := new(big.Int)
|
||||||
|
bi.SetUint64(val)
|
||||||
|
bi.Add(bi, big.NewInt(1))
|
||||||
|
bi.Neg(bi)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nValue := int64(-1) ^ int64(val)
|
||||||
|
di.w.WriteString(strconv.FormatInt(nValue, 10))
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeByteString:
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
return di.encodeByteString(b)
|
||||||
|
|
||||||
|
case cborTypeTextString:
|
||||||
|
b, err := di.d.parseTextString()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return di.encodeTextString(string(b), '"')
|
||||||
|
|
||||||
|
case cborTypeArray:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
count := int(val)
|
||||||
|
di.w.WriteByte('[')
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
di.w.WriteByte(']')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeMap:
|
||||||
|
_, _, val := di.d.getHead()
|
||||||
|
count := int(val)
|
||||||
|
di.w.WriteByte('{')
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
di.w.WriteString(", ")
|
||||||
|
}
|
||||||
|
// key
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
di.w.WriteString(": ")
|
||||||
|
// value
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
di.w.WriteByte('}')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case cborTypeTag:
|
||||||
|
_, _, tagNum := di.d.getHead()
|
||||||
|
switch tagNum {
|
||||||
|
case tagNumUnsignedBignum:
|
||||||
|
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumUnsignedBignum,
|
||||||
|
"byte string",
|
||||||
|
nt.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
bi := new(big.Int).SetBytes(b)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case tagNumNegativeBignum:
|
||||||
|
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||||
|
return newInadmissibleTagContentTypeError(
|
||||||
|
tagNumNegativeBignum,
|
||||||
|
"byte string",
|
||||||
|
nt.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := di.d.parseByteString()
|
||||||
|
bi := new(big.Int).SetBytes(b)
|
||||||
|
bi.Add(bi, big.NewInt(1))
|
||||||
|
bi.Neg(bi)
|
||||||
|
di.w.WriteString(bi.String())
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.w.WriteString(strconv.FormatUint(tagNum, 10))
|
||||||
|
di.w.WriteByte('(')
|
||||||
|
if err := di.item(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case cborTypePrimitives:
|
||||||
|
_, ai, val := di.d.getHead()
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFalse:
|
||||||
|
di.w.WriteString("false")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsTrue:
|
||||||
|
di.w.WriteString("true")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsNull:
|
||||||
|
di.w.WriteString("null")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsUndefined:
|
||||||
|
di.w.WriteString("undefined")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat16,
|
||||||
|
additionalInformationAsFloat32,
|
||||||
|
additionalInformationAsFloat64:
|
||||||
|
return di.encodeFloat(ai, val)
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.w.WriteString("simple(")
|
||||||
|
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||||
|
di.w.WriteByte(')')
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeU16 format a rune as "\uxxxx"
|
||||||
|
func (di *diagnose) writeU16(val rune) {
|
||||||
|
di.w.WriteString("\\u")
|
||||||
|
var in [2]byte
|
||||||
|
in[0] = byte(val >> 8)
|
||||||
|
in[1] = byte(val)
|
||||||
|
sz := hex.EncodedLen(len(in))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
hex.Encode(dst, in[:])
|
||||||
|
di.w.Write(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||||
|
var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||||
|
|
||||||
|
func (di *diagnose) encodeByteString(val []byte) error {
|
||||||
|
if len(val) > 0 {
|
||||||
|
if di.dm.byteStringText && utf8.Valid(val) {
|
||||||
|
return di.encodeTextString(string(val), '\'')
|
||||||
|
}
|
||||||
|
|
||||||
|
if di.dm.byteStringEmbeddedCBOR {
|
||||||
|
di2 := newDiagnose(val, di.dm.decMode, di.dm)
|
||||||
|
// should always notating embedded CBOR sequence.
|
||||||
|
if str, err := di2.diag(true); err == nil {
|
||||||
|
di.w.WriteString("<<")
|
||||||
|
di.w.WriteString(str)
|
||||||
|
di.w.WriteString(">>")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch di.dm.byteStringEncoding {
|
||||||
|
case ByteStringBase16Encoding:
|
||||||
|
di.w.WriteString("h'")
|
||||||
|
if di.dm.byteStringHexWhitespace {
|
||||||
|
sz := hex.EncodedLen(len(val))
|
||||||
|
if len(val) > 0 {
|
||||||
|
sz += len(val) - 1
|
||||||
|
}
|
||||||
|
di.w.Grow(sz)
|
||||||
|
|
||||||
|
dst := di.w.Bytes()[di.w.Len():]
|
||||||
|
for i := range val {
|
||||||
|
if i > 0 {
|
||||||
|
dst = append(dst, ' ')
|
||||||
|
}
|
||||||
|
hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
|
||||||
|
dst = dst[:len(dst)+2]
|
||||||
|
}
|
||||||
|
di.w.Write(dst)
|
||||||
|
} else {
|
||||||
|
sz := hex.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
hex.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
}
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase32Encoding:
|
||||||
|
di.w.WriteString("b32'")
|
||||||
|
sz := rawBase32Encoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
rawBase32Encoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase32HexEncoding:
|
||||||
|
di.w.WriteString("h32'")
|
||||||
|
sz := rawBase32HexEncoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
rawBase32HexEncoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case ByteStringBase64Encoding:
|
||||||
|
di.w.WriteString("b64'")
|
||||||
|
sz := base64.RawURLEncoding.EncodedLen(len(val))
|
||||||
|
di.w.Grow(sz)
|
||||||
|
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||||
|
base64.RawURLEncoding.Encode(dst, val)
|
||||||
|
di.w.Write(dst)
|
||||||
|
di.w.WriteByte('\'')
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
// It should not be possible for users to construct a *diagMode with an invalid byte
|
||||||
|
// string encoding.
|
||||||
|
panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const utf16SurrSelf = rune(0x10000)
|
||||||
|
|
||||||
|
// quote should be either `'` or `"`
|
||||||
|
func (di *diagnose) encodeTextString(val string, quote byte) error {
|
||||||
|
di.w.WriteByte(quote)
|
||||||
|
|
||||||
|
for i := 0; i < len(val); {
|
||||||
|
if b := val[i]; b < utf8.RuneSelf {
|
||||||
|
switch {
|
||||||
|
case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
|
||||||
|
di.w.WriteByte('\\')
|
||||||
|
|
||||||
|
switch b {
|
||||||
|
case '\t':
|
||||||
|
b = 't'
|
||||||
|
case '\n':
|
||||||
|
b = 'n'
|
||||||
|
case '\r':
|
||||||
|
b = 'r'
|
||||||
|
}
|
||||||
|
di.w.WriteByte(b)
|
||||||
|
|
||||||
|
case b >= ' ' && b <= '~':
|
||||||
|
di.w.WriteByte(b)
|
||||||
|
|
||||||
|
default:
|
||||||
|
di.writeU16(rune(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c, size := utf8.DecodeRuneInString(val[i:])
|
||||||
|
switch {
|
||||||
|
case c == utf8.RuneError:
|
||||||
|
return &SemanticError{"cbor: invalid UTF-8 string"}
|
||||||
|
|
||||||
|
case c < utf16SurrSelf:
|
||||||
|
di.writeU16(c)
|
||||||
|
|
||||||
|
default:
|
||||||
|
c1, c2 := utf16.EncodeRune(c)
|
||||||
|
di.writeU16(c1)
|
||||||
|
di.writeU16(c2)
|
||||||
|
}
|
||||||
|
|
||||||
|
i += size
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteByte(quote)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *diagnose) encodeFloat(ai byte, val uint64) error {
|
||||||
|
f64 := float64(0)
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFloat16:
|
||||||
|
f16 := float16.Frombits(uint16(val))
|
||||||
|
switch {
|
||||||
|
case f16.IsNaN():
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f16.IsInf(1):
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f16.IsInf(-1):
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
f64 = float64(f16.Float32())
|
||||||
|
}
|
||||||
|
|
||||||
|
case additionalInformationAsFloat32:
|
||||||
|
f32 := math.Float32frombits(uint32(val))
|
||||||
|
switch {
|
||||||
|
case f32 != f32:
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f32 > math.MaxFloat32:
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f32 < -math.MaxFloat32:
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
f64 = float64(f32)
|
||||||
|
}
|
||||||
|
|
||||||
|
case additionalInformationAsFloat64:
|
||||||
|
f64 = math.Float64frombits(val)
|
||||||
|
switch {
|
||||||
|
case f64 != f64:
|
||||||
|
di.w.WriteString("NaN")
|
||||||
|
return nil
|
||||||
|
case f64 > math.MaxFloat64:
|
||||||
|
di.w.WriteString("Infinity")
|
||||||
|
return nil
|
||||||
|
case f64 < -math.MaxFloat64:
|
||||||
|
di.w.WriteString("-Infinity")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Use ES6 number to string conversion which should match most JSON generators.
|
||||||
|
// Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
|
||||||
|
const bitSize = 64
|
||||||
|
b := make([]byte, 0, 32)
|
||||||
|
if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||||
|
b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
|
||||||
|
// clean up e-09 to e-9
|
||||||
|
n := len(b)
|
||||||
|
if n >= 4 && string(b[n-4:n-1]) == "e-0" {
|
||||||
|
b = append(b[:n-2], b[n-1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add decimal point and trailing zero if needed
|
||||||
|
if bytes.IndexByte(b, '.') < 0 {
|
||||||
|
if i := bytes.IndexByte(b, 'e'); i < 0 {
|
||||||
|
b = append(b, '.', '0')
|
||||||
|
} else {
|
||||||
|
b = append(b[:i+2], b[i:]...)
|
||||||
|
b[i] = '.'
|
||||||
|
b[i+1] = '0'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
di.w.WriteString(string(b))
|
||||||
|
|
||||||
|
if di.dm.floatPrecisionIndicator {
|
||||||
|
switch ai {
|
||||||
|
case additionalInformationAsFloat16:
|
||||||
|
di.w.WriteString("_1")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat32:
|
||||||
|
di.w.WriteString("_2")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case additionalInformationAsFloat64:
|
||||||
|
di.w.WriteString("_3")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||||
|
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||||
|
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||||
|
|
||||||
|
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||||
|
to their smallest forms (e.g. float16) when values fit.
|
||||||
|
|
||||||
|
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||||
|
and easier to use with structs.
|
||||||
|
|
||||||
|
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||||
|
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||||
|
|
||||||
|
Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
|
||||||
|
|
||||||
|
# Basics
|
||||||
|
|
||||||
|
The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||||
|
|
||||||
|
Function signatures identical to encoding/json include:
|
||||||
|
|
||||||
|
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||||
|
|
||||||
|
Standard interfaces include:
|
||||||
|
|
||||||
|
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||||
|
|
||||||
|
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||||
|
user-defined Go types.
|
||||||
|
|
||||||
|
Codec functions are available at package-level (using defaults options) or by
|
||||||
|
creating modes from options at runtime.
|
||||||
|
|
||||||
|
"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
|
||||||
|
|
||||||
|
EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
|
||||||
|
|
||||||
|
em, err := cbor.EncOptions{...}.EncMode()
|
||||||
|
em, err := cbor.CanonicalEncOptions().EncMode()
|
||||||
|
em, err := cbor.CTAP2EncOptions().EncMode()
|
||||||
|
|
||||||
|
Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
|
||||||
|
modes won't accidentally change at runtime after they're created.
|
||||||
|
|
||||||
|
Modes are intended to be reused and are safe for concurrent use.
|
||||||
|
|
||||||
|
EncMode and DecMode Interfaces
|
||||||
|
|
||||||
|
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||||
|
type EncMode interface {
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
NewEncoder(w io.Writer) *Encoder
|
||||||
|
EncOptions() EncOptions // returns copy of options
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||||
|
type DecMode interface {
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
NewDecoder(r io.Reader) *Decoder
|
||||||
|
DecOptions() DecOptions // returns copy of options
|
||||||
|
}
|
||||||
|
|
||||||
|
Using Default Encoding Mode
|
||||||
|
|
||||||
|
b, err := cbor.Marshal(v)
|
||||||
|
|
||||||
|
encoder := cbor.NewEncoder(w)
|
||||||
|
err = encoder.Encode(v)
|
||||||
|
|
||||||
|
Using Default Decoding Mode
|
||||||
|
|
||||||
|
err := cbor.Unmarshal(b, &v)
|
||||||
|
|
||||||
|
decoder := cbor.NewDecoder(r)
|
||||||
|
err = decoder.Decode(&v)
|
||||||
|
|
||||||
|
Creating and Using Encoding Modes
|
||||||
|
|
||||||
|
// Create EncOptions using either struct literal or a function.
|
||||||
|
opts := cbor.CanonicalEncOptions()
|
||||||
|
|
||||||
|
// If needed, modify encoding options
|
||||||
|
opts.Time = cbor.TimeUnix
|
||||||
|
|
||||||
|
// Create reusable EncMode interface with immutable options, safe for concurrent use.
|
||||||
|
em, err := opts.EncMode()
|
||||||
|
|
||||||
|
// Use EncMode like encoding/json, with same function signatures.
|
||||||
|
b, err := em.Marshal(v)
|
||||||
|
// or
|
||||||
|
encoder := em.NewEncoder(w)
|
||||||
|
err := encoder.Encode(v)
|
||||||
|
|
||||||
|
// NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
|
||||||
|
// specified during creation of em (encoding mode).
|
||||||
|
|
||||||
|
# CBOR Options
|
||||||
|
|
||||||
|
Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
|
||||||
|
|
||||||
|
Encoding Options: https://github.com/fxamacker/cbor#encoding-options
|
||||||
|
|
||||||
|
Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||||
|
|
||||||
|
# Struct Tags
|
||||||
|
|
||||||
|
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||||
|
If both struct tags are specified then `cbor` is used.
|
||||||
|
|
||||||
|
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||||
|
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||||
|
|
||||||
|
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||||
|
makes struct fields encode to elements of CBOR map with int keys.
|
||||||
|
|
||||||
|
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||||
|
|
||||||
|
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||||
|
|
||||||
|
# Tests and Fuzzing
|
||||||
|
|
||||||
|
Over 375 tests are included in this package. Cover-guided fuzzing is handled by
|
||||||
|
a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
|
||||||
|
*/
|
||||||
|
package cbor
|
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
//go:build go1.20
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mapKeyValueEncodeFunc struct {
|
||||||
|
kf, ef encodeFunc
|
||||||
|
kpool, vpool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||||
|
iterk := me.kpool.Get().(*reflect.Value)
|
||||||
|
defer func() {
|
||||||
|
iterk.SetZero()
|
||||||
|
me.kpool.Put(iterk)
|
||||||
|
}()
|
||||||
|
iterv := me.vpool.Get().(*reflect.Value)
|
||||||
|
defer func() {
|
||||||
|
iterv.SetZero()
|
||||||
|
me.vpool.Put(iterv)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if kvs == nil {
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
iterk.SetIterKey(iter)
|
||||||
|
iterv.SetIterValue(iter)
|
||||||
|
|
||||||
|
if err := me.kf(e, em, *iterk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := me.ef(e, em, *iterv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
initial := e.Len()
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
iterk.SetIterKey(iter)
|
||||||
|
iterv.SetIterValue(iter)
|
||||||
|
|
||||||
|
offset := e.Len()
|
||||||
|
if err := me.kf(e, em, *iterk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valueOffset := e.Len()
|
||||||
|
if err := me.ef(e, em, *iterv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kvs[i] = keyValue{
|
||||||
|
offset: offset - initial,
|
||||||
|
valueOffset: valueOffset - initial,
|
||||||
|
nextOffset: e.Len() - initial,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||||
|
kf, _ := getEncodeFunc(t.Key())
|
||||||
|
ef, _ := getEncodeFunc(t.Elem())
|
||||||
|
if kf == nil || ef == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mkv := &mapKeyValueEncodeFunc{
|
||||||
|
kf: kf,
|
||||||
|
ef: ef,
|
||||||
|
kpool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
rk := reflect.New(t.Key()).Elem()
|
||||||
|
return &rk
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vpool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
rv := reflect.New(t.Elem()).Elem()
|
||||||
|
return &rv
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return mapEncodeFunc{
|
||||||
|
e: mkv.encodeKeyValues,
|
||||||
|
}.encode
|
||||||
|
}
|
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
//go:build !go1.20
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mapKeyValueEncodeFunc struct {
|
||||||
|
kf, ef encodeFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||||
|
if kvs == nil {
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
initial := e.Len()
|
||||||
|
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||||
|
offset := e.Len()
|
||||||
|
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
valueOffset := e.Len()
|
||||||
|
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kvs[i] = keyValue{
|
||||||
|
offset: offset - initial,
|
||||||
|
valueOffset: valueOffset - initial,
|
||||||
|
nextOffset: e.Len() - initial,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||||
|
kf, _ := getEncodeFunc(t.Key())
|
||||||
|
ef, _ := getEncodeFunc(t.Elem())
|
||||||
|
if kf == nil || ef == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||||
|
return mapEncodeFunc{
|
||||||
|
e: mkv.encodeKeyValues,
|
||||||
|
}.encode
|
||||||
|
}
|
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SimpleValue represents CBOR simple value.
|
||||||
|
// CBOR simple value is:
|
||||||
|
// - an extension point like CBOR tag.
|
||||||
|
// - a subset of CBOR major type 7 that isn't floating-point.
|
||||||
|
// - "identified by a number between 0 and 255, but distinct from that number itself".
|
||||||
|
// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
|
||||||
|
//
|
||||||
|
// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
|
||||||
|
// Other CBOR simple values are currently unassigned/reserved by IANA.
|
||||||
|
type SimpleValue uint8
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeSimpleValue = reflect.TypeOf(SimpleValue(0))
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
|
||||||
|
func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||||
|
// RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
|
||||||
|
// "An encoder MUST NOT issue two-byte sequences that start with 0xf8
|
||||||
|
// (major type 7, additional information 24) and continue with a byte
|
||||||
|
// less than 0x20 (32 decimal). Such sequences are not well-formed.
|
||||||
|
// (This implies that an encoder cannot encode false, true, null, or
|
||||||
|
// undefined in two-byte sequences and that only the one-byte variants
|
||||||
|
// of these are well-formed; more generally speaking, each simple value
|
||||||
|
// only has a single representation variant)."
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case sv <= maxSimpleValueInAdditionalInformation:
|
||||||
|
return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
|
||||||
|
|
||||||
|
case sv >= minSimpleValueIn1ByteArgument:
|
||||||
|
return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||||
|
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||||
|
if sv == nil {
|
||||||
|
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
typ, ai, val := d.getHead()
|
||||||
|
|
||||||
|
if typ != cborTypePrimitives {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
|
||||||
|
}
|
||||||
|
if ai > additionalInformationWith1ByteArgument {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is safe to cast val to uint8 here because
|
||||||
|
// - data is already verified to be well-formed CBOR simple value and
|
||||||
|
// - val is <= math.MaxUint8.
|
||||||
|
*sv = SimpleValue(val)
|
||||||
|
return nil
|
||||||
|
}
|
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decoder reads and decodes CBOR values from io.Reader.
|
||||||
|
type Decoder struct {
|
||||||
|
r io.Reader
|
||||||
|
d decoder
|
||||||
|
buf []byte
|
||||||
|
off int // next read offset in buf
|
||||||
|
bytesRead int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder that reads and decodes from r using
|
||||||
|
// the default decoding options.
|
||||||
|
func NewDecoder(r io.Reader) *Decoder {
|
||||||
|
return defaultDecMode.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||||
|
func (dec *Decoder) Decode(v interface{}) error {
|
||||||
|
_, err := dec.readNext()
|
||||||
|
if err != nil {
|
||||||
|
// Return validation error or read error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dec.d.reset(dec.buf[dec.off:])
|
||||||
|
err = dec.d.value(v)
|
||||||
|
|
||||||
|
// Increment dec.off even if decoding err is not nil because
|
||||||
|
// dec.d.off points to the next CBOR data item if current
|
||||||
|
// CBOR data item is valid but failed to be decoded into v.
|
||||||
|
// This allows next CBOR data item to be decoded in next
|
||||||
|
// call to this function.
|
||||||
|
dec.off += dec.d.off
|
||||||
|
dec.bytesRead += dec.d.off
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip skips to the next CBOR data item (if there is any),
|
||||||
|
// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
|
||||||
|
func (dec *Decoder) Skip() error {
|
||||||
|
n, err := dec.readNext()
|
||||||
|
if err != nil {
|
||||||
|
// Return validation error or read error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dec.off += n
|
||||||
|
dec.bytesRead += n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumBytesRead returns the number of bytes read.
|
||||||
|
func (dec *Decoder) NumBytesRead() int {
|
||||||
|
return dec.bytesRead
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffered returns a reader for data remaining in Decoder's buffer.
|
||||||
|
// Returned reader is valid until the next call to Decode or Skip.
|
||||||
|
func (dec *Decoder) Buffered() io.Reader {
|
||||||
|
return bytes.NewReader(dec.buf[dec.off:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readNext() reads next CBOR data item from Reader to buffer.
|
||||||
|
// It returns the size of next CBOR data item.
|
||||||
|
// It also returns validation error or read error if any.
|
||||||
|
func (dec *Decoder) readNext() (int, error) {
|
||||||
|
var readErr error
|
||||||
|
var validErr error
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Process any unread data in dec.buf.
|
||||||
|
if dec.off < len(dec.buf) {
|
||||||
|
dec.d.reset(dec.buf[dec.off:])
|
||||||
|
off := dec.off // Save offset before data validation
|
||||||
|
validErr = dec.d.wellformed(true, false)
|
||||||
|
dec.off = off // Restore offset
|
||||||
|
|
||||||
|
if validErr == nil {
|
||||||
|
return dec.d.off, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if validErr != io.ErrUnexpectedEOF {
|
||||||
|
return 0, validErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process last read error on io.ErrUnexpectedEOF.
|
||||||
|
if readErr != nil {
|
||||||
|
if readErr == io.EOF {
|
||||||
|
// current CBOR data item is incomplete.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return 0, readErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// More data is needed and there was no read error.
|
||||||
|
var n int
|
||||||
|
for n == 0 {
|
||||||
|
n, readErr = dec.read()
|
||||||
|
if n == 0 && readErr != nil {
|
||||||
|
// No more data can be read and read error is encountered.
|
||||||
|
// At this point, validErr is either nil or io.ErrUnexpectedEOF.
|
||||||
|
if readErr == io.EOF {
|
||||||
|
if validErr == io.ErrUnexpectedEOF {
|
||||||
|
// current CBOR data item is incomplete.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, readErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, dec.buf contains new data from last read (n > 0).
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read() reads data from Reader to buffer.
|
||||||
|
// It returns number of bytes read and any read error encountered.
|
||||||
|
// Postconditions:
|
||||||
|
// - dec.buf contains previously unread data and new data.
|
||||||
|
// - dec.off is 0.
|
||||||
|
func (dec *Decoder) read() (int, error) {
|
||||||
|
// Grow buf if needed.
|
||||||
|
const minRead = 512
|
||||||
|
if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
|
||||||
|
oldUnreadBuf := dec.buf[dec.off:]
|
||||||
|
dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
|
||||||
|
dec.overwriteBuf(oldUnreadBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy unread data over read data and reset off to 0.
|
||||||
|
if dec.off > 0 {
|
||||||
|
dec.overwriteBuf(dec.buf[dec.off:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read from reader and reslice buf.
|
||||||
|
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||||
|
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) overwriteBuf(newBuf []byte) {
|
||||||
|
n := copy(dec.buf, newBuf)
|
||||||
|
dec.buf = dec.buf[:n]
|
||||||
|
dec.off = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder writes CBOR values to io.Writer.
|
||||||
|
type Encoder struct {
|
||||||
|
w io.Writer
|
||||||
|
em *encMode
|
||||||
|
indefTypes []cborType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder that writes to w using the default encoding options.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
return defaultEncMode.NewEncoder(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes the CBOR encoding of v.
|
||||||
|
func (enc *Encoder) Encode(v interface{}) error {
|
||||||
|
if len(enc.indefTypes) > 0 && v != nil {
|
||||||
|
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||||
|
if indefType == cborTypeTextString {
|
||||||
|
k := reflect.TypeOf(v).Kind()
|
||||||
|
if k != reflect.String {
|
||||||
|
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
|
||||||
|
}
|
||||||
|
} else if indefType == cborTypeByteString {
|
||||||
|
t := reflect.TypeOf(v)
|
||||||
|
k := t.Kind()
|
||||||
|
if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
|
||||||
|
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := getEncodeBuffer()
|
||||||
|
|
||||||
|
err := encode(buf, enc.em, reflect.ValueOf(v))
|
||||||
|
if err == nil {
|
||||||
|
_, err = enc.w.Write(buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
putEncodeBuffer(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteByteString starts byte string encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
|
||||||
|
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteByteString() error {
|
||||||
|
return enc.startIndefinite(cborTypeByteString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteTextString starts text string encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
|
||||||
|
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteTextString() error {
|
||||||
|
return enc.startIndefinite(cborTypeTextString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteArray starts array encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes elements of the array
|
||||||
|
// until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteArray() error {
|
||||||
|
return enc.startIndefinite(cborTypeArray)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartIndefiniteMap starts array encoding of indefinite length.
|
||||||
|
// Subsequent calls of (*Encoder).Encode() encodes elements of the map
|
||||||
|
// until EndIndefinite is called.
|
||||||
|
func (enc *Encoder) StartIndefiniteMap() error {
|
||||||
|
return enc.startIndefinite(cborTypeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndIndefinite closes last opened indefinite length value.
|
||||||
|
func (enc *Encoder) EndIndefinite() error {
|
||||||
|
if len(enc.indefTypes) == 0 {
|
||||||
|
return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
|
||||||
|
}
|
||||||
|
_, err := enc.w.Write([]byte{cborBreakFlag})
|
||||||
|
if err == nil {
|
||||||
|
enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cborIndefHeader = map[cborType][]byte{
|
||||||
|
cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
|
||||||
|
cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
|
||||||
|
cborTypeArray: {cborArrayWithIndefiniteLengthHead},
|
||||||
|
cborTypeMap: {cborMapWithIndefiniteLengthHead},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (enc *Encoder) startIndefinite(typ cborType) error {
|
||||||
|
if enc.em.indefLength == IndefLengthForbidden {
|
||||||
|
return &IndefiniteLengthError{typ}
|
||||||
|
}
|
||||||
|
_, err := enc.w.Write(cborIndefHeader[typ])
|
||||||
|
if err == nil {
|
||||||
|
enc.indefTypes = append(enc.indefTypes, typ)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawMessage is a raw encoded CBOR value.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
// MarshalCBOR returns m or CBOR nil if m is nil.
|
||||||
|
func (m RawMessage) MarshalCBOR() ([]byte, error) {
|
||||||
|
if len(m) == 0 {
|
||||||
|
return cborNil, nil
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR creates a copy of data and saves to *m.
|
||||||
|
func (m *RawMessage) UnmarshalCBOR(data []byte) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
*m = append((*m)[0:0], data...)
|
||||||
|
return nil
|
||||||
|
}
|
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type field struct {
|
||||||
|
name string
|
||||||
|
nameAsInt int64 // used to decoder to match field name with CBOR int
|
||||||
|
cborName []byte
|
||||||
|
cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
|
||||||
|
idx []int
|
||||||
|
typ reflect.Type
|
||||||
|
ef encodeFunc
|
||||||
|
ief isEmptyFunc
|
||||||
|
typInfo *typeInfo // used to decoder to reuse type info
|
||||||
|
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||||
|
omitEmpty bool // used to skip empty field
|
||||||
|
keyAsInt bool // used to encode/decode field name as int
|
||||||
|
}
|
||||||
|
|
||||||
|
type fields []*field
|
||||||
|
|
||||||
|
// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
|
||||||
|
type indexFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *indexFieldSorter) Less(i, j int) bool {
|
||||||
|
iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
|
||||||
|
for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
|
||||||
|
if iIdx[k] != jIdx[k] {
|
||||||
|
return iIdx[k] < jIdx[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(iIdx) <= len(jIdx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
|
||||||
|
type nameLevelAndTagFieldSorter struct {
|
||||||
|
fields fields
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Len() int {
|
||||||
|
return len(x.fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
|
||||||
|
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
|
||||||
|
fi, fj := x.fields[i], x.fields[j]
|
||||||
|
if fi.name != fj.name {
|
||||||
|
return fi.name < fj.name
|
||||||
|
}
|
||||||
|
if len(fi.idx) != len(fj.idx) {
|
||||||
|
return len(fi.idx) < len(fj.idx)
|
||||||
|
}
|
||||||
|
if fi.tagged != fj.tagged {
|
||||||
|
return fi.tagged
|
||||||
|
}
|
||||||
|
return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
|
||||||
|
func getFields(t reflect.Type) (flds fields, structOptions string) {
|
||||||
|
// Get special field "_" tag options
|
||||||
|
if f, ok := t.FieldByName("_"); ok {
|
||||||
|
tag := f.Tag.Get("cbor")
|
||||||
|
if tag != "-" {
|
||||||
|
structOptions = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// nTypes contains next level anonymous fields' types and indexes
|
||||||
|
// (there can be multiple fields of the same type at the same level)
|
||||||
|
flds, nTypes := appendFields(t, nil, nil, nil)
|
||||||
|
|
||||||
|
if len(nTypes) > 0 {
|
||||||
|
|
||||||
|
var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
|
||||||
|
vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
|
||||||
|
|
||||||
|
for len(nTypes) > 0 {
|
||||||
|
cTypes, nTypes = nTypes, nil
|
||||||
|
|
||||||
|
for t, idx := range cTypes {
|
||||||
|
// If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
|
||||||
|
if len(idx) > 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Anonymous field of the same type at deeper nested level is ignored.
|
||||||
|
if vTypes[t] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vTypes[t] = true
|
||||||
|
|
||||||
|
flds, nTypes = appendFields(t, idx[0], flds, nTypes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(&nameLevelAndTagFieldSorter{flds})
|
||||||
|
|
||||||
|
// Keep visible fields.
|
||||||
|
j := 0 // index of next unique field
|
||||||
|
for i := 0; i < len(flds); {
|
||||||
|
name := flds[i].name
|
||||||
|
if i == len(flds)-1 || // last field
|
||||||
|
name != flds[i+1].name || // field i has unique field name
|
||||||
|
len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
|
||||||
|
(flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
|
||||||
|
flds[j] = flds[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip fields with the same field name.
|
||||||
|
for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if j != len(flds) {
|
||||||
|
flds = flds[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort fields by field index
|
||||||
|
sort.Sort(&indexFieldSorter{flds})
|
||||||
|
|
||||||
|
return flds, structOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
|
||||||
|
func appendFields(
|
||||||
|
t reflect.Type,
|
||||||
|
idx []int,
|
||||||
|
flds fields,
|
||||||
|
nTypes map[reflect.Type][][]int,
|
||||||
|
) (
|
||||||
|
_flds fields,
|
||||||
|
_nTypes map[reflect.Type][][]int,
|
||||||
|
) {
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
|
||||||
|
ft := f.Type
|
||||||
|
for ft.Kind() == reflect.Ptr {
|
||||||
|
ft = ft.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFieldExportable(f, ft.Kind()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tag := f.Tag.Get("cbor")
|
||||||
|
if tag == "" {
|
||||||
|
tag = f.Tag.Get("json")
|
||||||
|
}
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tagged := tag != ""
|
||||||
|
|
||||||
|
// Parse field tag options
|
||||||
|
var tagFieldName string
|
||||||
|
var omitempty, keyasint bool
|
||||||
|
for j := 0; tag != ""; j++ {
|
||||||
|
var token string
|
||||||
|
idx := strings.IndexByte(tag, ',')
|
||||||
|
if idx == -1 {
|
||||||
|
token, tag = tag, ""
|
||||||
|
} else {
|
||||||
|
token, tag = tag[:idx], tag[idx+1:]
|
||||||
|
}
|
||||||
|
if j == 0 {
|
||||||
|
tagFieldName = token
|
||||||
|
} else {
|
||||||
|
switch token {
|
||||||
|
case "omitempty":
|
||||||
|
omitempty = true
|
||||||
|
case "keyasint":
|
||||||
|
keyasint = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldName := tagFieldName
|
||||||
|
if tagFieldName == "" {
|
||||||
|
fieldName = f.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
fIdx := make([]int, len(idx)+1)
|
||||||
|
copy(fIdx, idx)
|
||||||
|
fIdx[len(fIdx)-1] = i
|
||||||
|
|
||||||
|
if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
|
||||||
|
flds = append(flds, &field{
|
||||||
|
name: fieldName,
|
||||||
|
idx: fIdx,
|
||||||
|
typ: f.Type,
|
||||||
|
omitEmpty: omitempty,
|
||||||
|
keyAsInt: keyasint,
|
||||||
|
tagged: tagged})
|
||||||
|
} else {
|
||||||
|
if nTypes == nil {
|
||||||
|
nTypes = make(map[reflect.Type][][]int)
|
||||||
|
}
|
||||||
|
nTypes[ft] = append(nTypes[ft], fIdx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flds, nTypes
|
||||||
|
}
|
||||||
|
|
||||||
|
// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
|
||||||
|
// a nonexportable anonymous field of struct type.
|
||||||
|
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||||
|
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||||
|
exportable := f.PkgPath == ""
|
||||||
|
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||||
|
}
|
||||||
|
|
||||||
|
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||||
|
|
||||||
|
// getFieldValue returns field value of struct v by index. When encountering null pointer
|
||||||
|
// to anonymous (embedded) struct field, f is called with the last traversed field value.
|
||||||
|
func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
|
||||||
|
fv = v
|
||||||
|
for i, n := range idx {
|
||||||
|
fv = fv.Field(n)
|
||||||
|
|
||||||
|
if i < len(idx)-1 {
|
||||||
|
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||||
|
if fv.IsNil() {
|
||||||
|
// Null pointer to embedded struct field
|
||||||
|
fv, err = f(fv)
|
||||||
|
if err != nil || !fv.IsValid() {
|
||||||
|
return fv, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fv = fv.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fv, nil
|
||||||
|
}
|
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||||
|
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||||
|
// enclosed data item if it were to appear outside of a tag.
|
||||||
|
type Tag struct {
|
||||||
|
Number uint64
|
||||||
|
Content interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||||
|
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||||
|
type RawTag struct {
|
||||||
|
Number uint64
|
||||||
|
Content RawMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||||
|
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||||
|
if t == nil {
|
||||||
|
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||||
|
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d := decoder{data: data, dm: defaultDecMode}
|
||||||
|
|
||||||
|
// Unmarshal tag number.
|
||||||
|
typ, _, num := d.getHead()
|
||||||
|
if typ != cborTypeTag {
|
||||||
|
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
|
||||||
|
}
|
||||||
|
t.Number = num
|
||||||
|
|
||||||
|
// Unmarshal tag content.
|
||||||
|
c := d.data[d.off:]
|
||||||
|
t.Content = make([]byte, len(c))
|
||||||
|
copy(t.Content, c)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalCBOR returns CBOR encoding of t.
|
||||||
|
func (t RawTag) MarshalCBOR() ([]byte, error) {
|
||||||
|
if t.Number == 0 && len(t.Content) == 0 {
|
||||||
|
// Marshal uninitialized cbor.RawTag
|
||||||
|
b := make([]byte, len(cborNil))
|
||||||
|
copy(b, cborNil)
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
|
||||||
|
encodeHead(e, byte(cborTypeTag), t.Number)
|
||||||
|
|
||||||
|
content := t.Content
|
||||||
|
if len(content) == 0 {
|
||||||
|
content = cborNil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, len(e.Bytes())+len(content))
|
||||||
|
n := copy(buf, e.Bytes())
|
||||||
|
copy(buf[n:], content)
|
||||||
|
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecTagMode specifies how decoder handles tag number.
|
||||||
|
type DecTagMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DecTagIgnored makes decoder ignore tag number (skips if present).
|
||||||
|
DecTagIgnored DecTagMode = iota
|
||||||
|
|
||||||
|
// DecTagOptional makes decoder verify tag number if it's present.
|
||||||
|
DecTagOptional
|
||||||
|
|
||||||
|
// DecTagRequired makes decoder verify tag number and tag number must be present.
|
||||||
|
DecTagRequired
|
||||||
|
|
||||||
|
maxDecTagMode
|
||||||
|
)
|
||||||
|
|
||||||
|
func (dtm DecTagMode) valid() bool {
|
||||||
|
return dtm >= 0 && dtm < maxDecTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncTagMode specifies how encoder handles tag number.
|
||||||
|
type EncTagMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// EncTagNone makes encoder not encode tag number.
|
||||||
|
EncTagNone EncTagMode = iota
|
||||||
|
|
||||||
|
// EncTagRequired makes encoder encode tag number.
|
||||||
|
EncTagRequired
|
||||||
|
|
||||||
|
maxEncTagMode
|
||||||
|
)
|
||||||
|
|
||||||
|
func (etm EncTagMode) valid() bool {
|
||||||
|
return etm >= 0 && etm < maxEncTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagOptions specifies how encoder and decoder handle tag number.
|
||||||
|
type TagOptions struct {
|
||||||
|
DecTag DecTagMode
|
||||||
|
EncTag EncTagMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
|
||||||
|
// to provide CBOR tag support.
|
||||||
|
type TagSet interface {
|
||||||
|
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||||
|
Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
|
||||||
|
|
||||||
|
// Remove removes given tag content type from TagSet.
|
||||||
|
Remove(contentType reflect.Type)
|
||||||
|
|
||||||
|
tagProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagProvider interface {
|
||||||
|
getTagItemFromType(t reflect.Type) *tagItem
|
||||||
|
getTypeFromTagNum(num []uint64) reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
type tagItem struct {
|
||||||
|
num []uint64
|
||||||
|
cborTagNum []byte
|
||||||
|
contentType reflect.Type
|
||||||
|
opts TagOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tagItem) equalTagNum(num []uint64) bool {
|
||||||
|
// Fast path to compare 1 tag number
|
||||||
|
if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(t.num) != len(num) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(t.num); i++ {
|
||||||
|
if t.num[i] != num[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
tagSet map[reflect.Type]*tagItem
|
||||||
|
|
||||||
|
syncTagSet struct {
|
||||||
|
sync.RWMutex
|
||||||
|
t tagSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||||
|
return t[typ]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||||
|
for typ, tag := range t {
|
||||||
|
if tag.equalTagNum(num) {
|
||||||
|
return typ
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTagSet returns TagSet (safe for concurrency).
|
||||||
|
func NewTagSet() TagSet {
|
||||||
|
return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||||
|
func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
|
||||||
|
if contentType == nil {
|
||||||
|
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||||
|
}
|
||||||
|
for contentType.Kind() == reflect.Ptr {
|
||||||
|
contentType = contentType.Elem()
|
||||||
|
}
|
||||||
|
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Lock()
|
||||||
|
defer t.Unlock()
|
||||||
|
for typ, ti := range t.t {
|
||||||
|
if typ == contentType {
|
||||||
|
return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
|
||||||
|
}
|
||||||
|
if ti.equalTagNum(tag.num) {
|
||||||
|
return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.t[contentType] = tag
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes given tag content type from TagSet.
|
||||||
|
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||||
|
for contentType.Kind() == reflect.Ptr {
|
||||||
|
contentType = contentType.Elem()
|
||||||
|
}
|
||||||
|
t.Lock()
|
||||||
|
delete(t.t, contentType)
|
||||||
|
t.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||||
|
t.RLock()
|
||||||
|
ti := t.t[typ]
|
||||||
|
t.RUnlock()
|
||||||
|
return ti
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||||
|
t.RLock()
|
||||||
|
rt := t.t.getTypeFromTagNum(num)
|
||||||
|
t.RUnlock()
|
||||||
|
return rt
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
|
||||||
|
if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
|
||||||
|
return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
|
||||||
|
}
|
||||||
|
if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
|
||||||
|
return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
|
||||||
|
}
|
||||||
|
if contentType == typeTime {
|
||||||
|
return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||||
|
}
|
||||||
|
if contentType == typeBigInt {
|
||||||
|
return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
|
||||||
|
}
|
||||||
|
if contentType == typeTag {
|
||||||
|
return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
|
||||||
|
}
|
||||||
|
if contentType == typeRawTag {
|
||||||
|
return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
|
||||||
|
}
|
||||||
|
if num == 0 || num == 1 {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||||
|
}
|
||||||
|
if num == 2 || num == 3 {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
|
||||||
|
}
|
||||||
|
if num == tagNumSelfDescribedCBOR {
|
||||||
|
return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
|
||||||
|
}
|
||||||
|
|
||||||
|
te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
|
||||||
|
te.num = append(te.num, nestedNum...)
|
||||||
|
|
||||||
|
// Cache encoded tag numbers
|
||||||
|
e := getEncodeBuffer()
|
||||||
|
for _, n := range te.num {
|
||||||
|
encodeHead(e, byte(cborTypeTag), n)
|
||||||
|
}
|
||||||
|
te.cborTagNum = make([]byte, e.Len())
|
||||||
|
copy(te.cborTagNum, e.Bytes())
|
||||||
|
putEncodeBuffer(e)
|
||||||
|
|
||||||
|
return &te, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typeTag = reflect.TypeOf(Tag{})
|
||||||
|
typeRawTag = reflect.TypeOf(RawTag{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// WrongTagError describes mismatch between CBOR tag and registered tag.
|
||||||
|
type WrongTagError struct {
|
||||||
|
RegisteredType reflect.Type
|
||||||
|
RegisteredTagNum []uint64
|
||||||
|
TagNum []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *WrongTagError) Error() string {
|
||||||
|
return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
|
||||||
|
}
|
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
@ -0,0 +1,394 @@
|
|||||||
|
// Copyright (c) Faye Amacker. All rights reserved.
|
||||||
|
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||||
|
|
||||||
|
package cbor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/x448/float16"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SyntaxError is a description of a CBOR syntax error.
|
||||||
|
type SyntaxError struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// SemanticError is a description of a CBOR semantic error.
|
||||||
|
type SemanticError struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SemanticError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
|
||||||
|
type MaxNestedLevelError struct {
|
||||||
|
maxNestedLevels int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxNestedLevelError) Error() string {
|
||||||
|
return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
|
||||||
|
type MaxArrayElementsError struct {
|
||||||
|
maxArrayElements int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxArrayElementsError) Error() string {
|
||||||
|
return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
|
||||||
|
type MaxMapPairsError struct {
|
||||||
|
maxMapPairs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MaxMapPairsError) Error() string {
|
||||||
|
return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndefiniteLengthError indicates found disallowed indefinite length items.
|
||||||
|
type IndefiniteLengthError struct {
|
||||||
|
t cborType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *IndefiniteLengthError) Error() string {
|
||||||
|
return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
|
||||||
|
}
|
||||||
|
|
||||||
|
// TagsMdError indicates found disallowed CBOR tags.
|
||||||
|
type TagsMdError struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TagsMdError) Error() string {
|
||||||
|
return "cbor: CBOR tag isn't allowed"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
|
||||||
|
type ExtraneousDataError struct {
|
||||||
|
numOfBytes int // number of bytes of extraneous data
|
||||||
|
index int // location of extraneous data
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ExtraneousDataError) Error() string {
|
||||||
|
return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformed checks whether the CBOR data item is well-formed.
|
||||||
|
// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
|
||||||
|
// - use allowExtraData = true when using Decoder.Decode()
|
||||||
|
// - use allowExtraData = false when using Unmarshal()
|
||||||
|
func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
_, err := d.wellformedInternal(0, checkBuiltinTags)
|
||||||
|
if err == nil {
|
||||||
|
if !allowExtraData && d.off != len(d.data) {
|
||||||
|
err = &ExtraneousDataError{len(d.data) - d.off, d.off}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedInternal checks data's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
|
||||||
|
t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case cborTypeByteString, cborTypeTextString:
|
||||||
|
if indefiniteLength {
|
||||||
|
if d.dm.indefLength == IndefLengthForbidden {
|
||||||
|
return 0, &IndefiniteLengthError{t}
|
||||||
|
}
|
||||||
|
return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
valInt := int(val)
|
||||||
|
if valInt < 0 {
|
||||||
|
// Detect integer overflow
|
||||||
|
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
|
||||||
|
}
|
||||||
|
if len(d.data)-d.off < valInt { // valInt+off may overflow integer
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
d.off += valInt
|
||||||
|
|
||||||
|
case cborTypeArray, cborTypeMap:
|
||||||
|
depth++
|
||||||
|
if depth > d.dm.maxNestedLevels {
|
||||||
|
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||||
|
}
|
||||||
|
|
||||||
|
if indefiniteLength {
|
||||||
|
if d.dm.indefLength == IndefLengthForbidden {
|
||||||
|
return 0, &IndefiniteLengthError{t}
|
||||||
|
}
|
||||||
|
return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
valInt := int(val)
|
||||||
|
if valInt < 0 {
|
||||||
|
// Detect integer overflow
|
||||||
|
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
|
||||||
|
}
|
||||||
|
|
||||||
|
if t == cborTypeArray {
|
||||||
|
if valInt > d.dm.maxArrayElements {
|
||||||
|
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if valInt > d.dm.maxMapPairs {
|
||||||
|
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
count := 1
|
||||||
|
if t == cborTypeMap {
|
||||||
|
count = 2
|
||||||
|
}
|
||||||
|
maxDepth := depth
|
||||||
|
for j := 0; j < count; j++ {
|
||||||
|
for i := 0; i < valInt; i++ {
|
||||||
|
var dpt int
|
||||||
|
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if dpt > maxDepth {
|
||||||
|
maxDepth = dpt // Save max depth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
depth = maxDepth
|
||||||
|
|
||||||
|
case cborTypeTag:
|
||||||
|
if d.dm.tagsMd == TagsForbidden {
|
||||||
|
return 0, &TagsMdError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
tagNum := val
|
||||||
|
|
||||||
|
// Scan nested tag numbers to avoid recursion.
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off { // Tag number must be followed by tag content.
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if checkBuiltinTags {
|
||||||
|
err = validBuiltinTag(tagNum, d.data[d.off])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
|
||||||
|
return 0, &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypeTag.String(),
|
||||||
|
Message: "bignum",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if getType(d.data[d.off]) != cborTypeTag {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, _, tagNum, err = d.wellformedHead(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
depth++
|
||||||
|
if depth > d.dm.maxNestedLevels {
|
||||||
|
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check tag content.
|
||||||
|
return d.wellformedInternal(depth, checkBuiltinTags)
|
||||||
|
}
|
||||||
|
|
||||||
|
return depth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||||
|
var err error
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if isBreakFlag(d.data[d.off]) {
|
||||||
|
d.off++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Peek ahead to get next type and indefinite length status.
|
||||||
|
nt, ai := parseInitialByte(d.data[d.off])
|
||||||
|
if t != nt {
|
||||||
|
return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
|
||||||
|
}
|
||||||
|
if additionalInformation(ai).isIndefiniteLength() {
|
||||||
|
return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
|
||||||
|
}
|
||||||
|
if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return depth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
|
||||||
|
func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||||
|
var err error
|
||||||
|
maxDepth := depth
|
||||||
|
i := 0
|
||||||
|
for {
|
||||||
|
if len(d.data) == d.off {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
if isBreakFlag(d.data[d.off]) {
|
||||||
|
d.off++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var dpt int
|
||||||
|
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if dpt > maxDepth {
|
||||||
|
maxDepth = dpt
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if t == cborTypeArray {
|
||||||
|
if i > d.dm.maxArrayElements {
|
||||||
|
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if i%2 == 0 && i/2 > d.dm.maxMapPairs {
|
||||||
|
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t == cborTypeMap && i%2 == 1 {
|
||||||
|
return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||||
|
}
|
||||||
|
return maxDepth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
|
||||||
|
t cborType,
|
||||||
|
ai byte,
|
||||||
|
val uint64,
|
||||||
|
indefiniteLength bool,
|
||||||
|
err error,
|
||||||
|
) {
|
||||||
|
t, ai, val, err = d.wellformedHead()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
indefiniteLength = additionalInformation(ai).isIndefiniteLength()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
|
||||||
|
dataLen := len(d.data) - d.off
|
||||||
|
if dataLen == 0 {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
|
||||||
|
t, ai = parseInitialByte(d.data[d.off])
|
||||||
|
val = uint64(ai)
|
||||||
|
d.off++
|
||||||
|
dataLen--
|
||||||
|
|
||||||
|
if ai <= maxAdditionalInformationWithoutArgument {
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith1ByteArgument {
|
||||||
|
const argumentSize = 1
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(d.data[d.off])
|
||||||
|
d.off++
|
||||||
|
if t == cborTypePrimitives && val < 32 {
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith2ByteArgument {
|
||||||
|
const argumentSize = 2
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith4ByteArgument {
|
||||||
|
const argumentSize = 4
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ai == additionalInformationWith8ByteArgument {
|
||||||
|
const argumentSize = 8
|
||||||
|
if dataLen < argumentSize {
|
||||||
|
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
|
||||||
|
d.off += argumentSize
|
||||||
|
if t == cborTypePrimitives {
|
||||||
|
if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if additionalInformation(ai).isIndefiniteLength() {
|
||||||
|
switch t {
|
||||||
|
case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||||
|
case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||||
|
}
|
||||||
|
return t, ai, val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ai == 28, 29, 30
|
||||||
|
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) acceptableFloat(f float64) error {
|
||||||
|
switch {
|
||||||
|
case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
|
||||||
|
return &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypePrimitives.String(),
|
||||||
|
Message: "floating-point NaN",
|
||||||
|
}
|
||||||
|
case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
|
||||||
|
return &UnacceptableDataItemError{
|
||||||
|
CBORType: cborTypePrimitives.String(),
|
||||||
|
Message: "floating-point infinity",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
43
vendor/github.com/moby/spdystream/connection.go
generated
vendored
43
vendor/github.com/moby/spdystream/connection.go
generated
vendored
@ -208,9 +208,10 @@ type Connection struct {
|
|||||||
nextStreamId spdy.StreamId
|
nextStreamId spdy.StreamId
|
||||||
receivedStreamId spdy.StreamId
|
receivedStreamId spdy.StreamId
|
||||||
|
|
||||||
pingIdLock sync.Mutex
|
// pingLock protects pingChans and pingId
|
||||||
pingId uint32
|
pingLock sync.Mutex
|
||||||
pingChans map[uint32]chan error
|
pingId uint32
|
||||||
|
pingChans map[uint32]chan error
|
||||||
|
|
||||||
shutdownLock sync.Mutex
|
shutdownLock sync.Mutex
|
||||||
shutdownChan chan error
|
shutdownChan chan error
|
||||||
@ -274,16 +275,20 @@ func NewConnection(conn net.Conn, server bool) (*Connection, error) {
|
|||||||
// returns the response time
|
// returns the response time
|
||||||
func (s *Connection) Ping() (time.Duration, error) {
|
func (s *Connection) Ping() (time.Duration, error) {
|
||||||
pid := s.pingId
|
pid := s.pingId
|
||||||
s.pingIdLock.Lock()
|
s.pingLock.Lock()
|
||||||
if s.pingId > 0x7ffffffe {
|
if s.pingId > 0x7ffffffe {
|
||||||
s.pingId = s.pingId - 0x7ffffffe
|
s.pingId = s.pingId - 0x7ffffffe
|
||||||
} else {
|
} else {
|
||||||
s.pingId = s.pingId + 2
|
s.pingId = s.pingId + 2
|
||||||
}
|
}
|
||||||
s.pingIdLock.Unlock()
|
|
||||||
pingChan := make(chan error)
|
pingChan := make(chan error)
|
||||||
s.pingChans[pid] = pingChan
|
s.pingChans[pid] = pingChan
|
||||||
defer delete(s.pingChans, pid)
|
s.pingLock.Unlock()
|
||||||
|
defer func() {
|
||||||
|
s.pingLock.Lock()
|
||||||
|
delete(s.pingChans, pid)
|
||||||
|
s.pingLock.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
frame := &spdy.PingFrame{Id: pid}
|
frame := &spdy.PingFrame{Id: pid}
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
@ -612,10 +617,14 @@ func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
|
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
|
||||||
if s.pingId&0x01 != frame.Id&0x01 {
|
s.pingLock.Lock()
|
||||||
|
pingId := s.pingId
|
||||||
|
pingChan, pingOk := s.pingChans[frame.Id]
|
||||||
|
s.pingLock.Unlock()
|
||||||
|
|
||||||
|
if pingId&0x01 != frame.Id&0x01 {
|
||||||
return s.framer.WriteFrame(frame)
|
return s.framer.WriteFrame(frame)
|
||||||
}
|
}
|
||||||
pingChan, pingOk := s.pingChans[frame.Id]
|
|
||||||
if pingOk {
|
if pingOk {
|
||||||
close(pingChan)
|
close(pingChan)
|
||||||
}
|
}
|
||||||
@ -731,16 +740,14 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
duration := 10 * time.Minute
|
duration := 10 * time.Minute
|
||||||
time.AfterFunc(duration, func() {
|
timer := time.NewTimer(duration)
|
||||||
select {
|
defer timer.Stop()
|
||||||
case err, ok := <-s.shutdownChan:
|
select {
|
||||||
if ok {
|
case s.shutdownChan <- err:
|
||||||
debugMessage("Unhandled close error after %s: %s", duration, err)
|
// error was handled
|
||||||
}
|
case <-timer.C:
|
||||||
default:
|
debugMessage("Unhandled close error after %s: %s", duration, err)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
s.shutdownChan <- err
|
|
||||||
}
|
}
|
||||||
close(s.shutdownChan)
|
close(s.shutdownChan)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/moby/spdystream/stream.go
generated
vendored
2
vendor/github.com/moby/spdystream/stream.go
generated
vendored
@ -305,6 +305,8 @@ func (s *Stream) Identifier() uint32 {
|
|||||||
// IsFinished returns whether the stream has finished
|
// IsFinished returns whether the stream has finished
|
||||||
// sending data
|
// sending data
|
||||||
func (s *Stream) IsFinished() bool {
|
func (s *Stream) IsFinished() bool {
|
||||||
|
s.finishLock.Lock()
|
||||||
|
defer s.finishLock.Unlock()
|
||||||
return s.finished
|
return s.finished
|
||||||
}
|
}
|
||||||
|
|
||||||
|
31
vendor/github.com/munnerz/goautoneg/LICENSE
generated
vendored
Normal file
31
vendor/github.com/munnerz/goautoneg/LICENSE
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||||
|
names of its contributors may be used to endorse or promote
|
||||||
|
products derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
13
vendor/github.com/munnerz/goautoneg/Makefile
generated
vendored
Normal file
13
vendor/github.com/munnerz/goautoneg/Makefile
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
include $(GOROOT)/src/Make.inc
|
||||||
|
|
||||||
|
TARG=bitbucket.org/ww/goautoneg
|
||||||
|
GOFILES=autoneg.go
|
||||||
|
|
||||||
|
include $(GOROOT)/src/Make.pkg
|
||||||
|
|
||||||
|
format:
|
||||||
|
gofmt -w *.go
|
||||||
|
|
||||||
|
docs:
|
||||||
|
gomake clean
|
||||||
|
godoc ${TARG} > README.txt
|
@ -1,28 +1,28 @@
|
|||||||
/*
|
/*
|
||||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
HTTP Content-Type Autonegotiation.
|
HTTP Content-Type Autonegotiation.
|
||||||
|
|
||||||
The functions in this package implement the behaviour specified in
|
The functions in this package implement the behaviour specified in
|
||||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
|
|
||||||
|
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
met:
|
met:
|
||||||
|
|
||||||
Redistributions of source code must retain the above copyright
|
Redistributions of source code must retain the above copyright
|
||||||
notice, this list of conditions and the following disclaimer.
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
Redistributions in binary form must reproduce the above copyright
|
Redistributions in binary form must reproduce the above copyright
|
||||||
notice, this list of conditions and the following disclaimer in
|
notice, this list of conditions and the following disclaimer in
|
||||||
the documentation and/or other materials provided with the
|
the documentation and/or other materials provided with the
|
||||||
distribution.
|
distribution.
|
||||||
|
|
||||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||||
names of its contributors may be used to endorse or promote
|
names of its contributors may be used to endorse or promote
|
||||||
products derived from this software without specific prior written
|
products derived from this software without specific prior written
|
||||||
permission.
|
permission.
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
@ -36,6 +36,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package goautoneg
|
package goautoneg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -51,16 +52,14 @@ type Accept struct {
|
|||||||
Params map[string]string
|
Params map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// For internal use, so that we can use the sort interface
|
// acceptSlice is defined to implement sort interface.
|
||||||
type accept_slice []Accept
|
type acceptSlice []Accept
|
||||||
|
|
||||||
func (accept accept_slice) Len() int {
|
func (slice acceptSlice) Len() int {
|
||||||
slice := []Accept(accept)
|
|
||||||
return len(slice)
|
return len(slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (accept accept_slice) Less(i, j int) bool {
|
func (slice acceptSlice) Less(i, j int) bool {
|
||||||
slice := []Accept(accept)
|
|
||||||
ai, aj := slice[i], slice[j]
|
ai, aj := slice[i], slice[j]
|
||||||
if ai.Q > aj.Q {
|
if ai.Q > aj.Q {
|
||||||
return true
|
return true
|
||||||
@ -74,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (accept accept_slice) Swap(i, j int) {
|
func (slice acceptSlice) Swap(i, j int) {
|
||||||
slice := []Accept(accept)
|
|
||||||
slice[i], slice[j] = slice[j], slice[i]
|
slice[i], slice[j] = slice[j], slice[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stringTrimSpaceCutset(r rune) bool {
|
||||||
|
return r == ' '
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextSplitElement(s, sep string) (item string, remaining string) {
|
||||||
|
if index := strings.Index(s, sep); index != -1 {
|
||||||
|
return s[:index], s[index+1:]
|
||||||
|
}
|
||||||
|
return s, ""
|
||||||
|
}
|
||||||
|
|
||||||
// Parse an Accept Header string returning a sorted list
|
// Parse an Accept Header string returning a sorted list
|
||||||
// of clauses
|
// of clauses
|
||||||
func ParseAccept(header string) (accept []Accept) {
|
func ParseAccept(header string) acceptSlice {
|
||||||
parts := strings.Split(header, ",")
|
partsCount := 0
|
||||||
accept = make([]Accept, 0, len(parts))
|
remaining := header
|
||||||
for _, part := range parts {
|
for len(remaining) > 0 {
|
||||||
part := strings.Trim(part, " ")
|
partsCount++
|
||||||
|
_, remaining = nextSplitElement(remaining, ",")
|
||||||
|
}
|
||||||
|
accept := make(acceptSlice, 0, partsCount)
|
||||||
|
|
||||||
a := Accept{}
|
remaining = header
|
||||||
a.Params = make(map[string]string)
|
var part string
|
||||||
a.Q = 1.0
|
for len(remaining) > 0 {
|
||||||
|
part, remaining = nextSplitElement(remaining, ",")
|
||||||
|
part = strings.TrimFunc(part, stringTrimSpaceCutset)
|
||||||
|
|
||||||
mrp := strings.Split(part, ";")
|
a := Accept{
|
||||||
|
Q: 1.0,
|
||||||
media_range := mrp[0]
|
|
||||||
sp := strings.Split(media_range, "/")
|
|
||||||
a.Type = strings.Trim(sp[0], " ")
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case len(sp) == 1 && a.Type == "*":
|
|
||||||
a.SubType = "*"
|
|
||||||
case len(sp) == 2:
|
|
||||||
a.SubType = strings.Trim(sp[1], " ")
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(mrp) == 1 {
|
sp, remainingPart := nextSplitElement(part, ";")
|
||||||
|
|
||||||
|
sp0, spRemaining := nextSplitElement(sp, "/")
|
||||||
|
a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case len(spRemaining) == 0:
|
||||||
|
if a.Type == "*" {
|
||||||
|
a.SubType = "*"
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
var sp1 string
|
||||||
|
sp1, spRemaining = nextSplitElement(spRemaining, "/")
|
||||||
|
if len(spRemaining) > 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(remainingPart) == 0 {
|
||||||
accept = append(accept, a)
|
accept = append(accept, a)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, param := range mrp[1:] {
|
a.Params = make(map[string]string)
|
||||||
sp := strings.SplitN(param, "=", 2)
|
for len(remainingPart) > 0 {
|
||||||
if len(sp) != 2 {
|
sp, remainingPart = nextSplitElement(remainingPart, ";")
|
||||||
|
sp0, spRemaining = nextSplitElement(sp, "=")
|
||||||
|
if len(spRemaining) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
token := strings.Trim(sp[0], " ")
|
var sp1 string
|
||||||
|
sp1, spRemaining = nextSplitElement(spRemaining, "=")
|
||||||
|
if len(spRemaining) != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
|
||||||
if token == "q" {
|
if token == "q" {
|
||||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
a.Q, _ = strconv.ParseFloat(sp1, 32)
|
||||||
} else {
|
} else {
|
||||||
a.Params[token] = strings.Trim(sp[1], " ")
|
a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
accept = append(accept, a)
|
accept = append(accept, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
slice := accept_slice(accept)
|
sort.Sort(accept)
|
||||||
sort.Sort(slice)
|
return accept
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Negotiate the most appropriate content_type given the accept header
|
// Negotiate the most appropriate content_type given the accept header
|
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format {
|
|||||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||||
switch format.FormatType() {
|
switch format.FormatType() {
|
||||||
case TypeProtoDelim:
|
case TypeProtoDelim:
|
||||||
return &protoDecoder{r: r}
|
return &protoDecoder{r: bufio.NewReader(r)}
|
||||||
}
|
}
|
||||||
return &textDecoder{r: r}
|
return &textDecoder{r: r}
|
||||||
}
|
}
|
||||||
|
|
||||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||||
type protoDecoder struct {
|
type protoDecoder struct {
|
||||||
r io.Reader
|
r protodelim.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode implements the Decoder interface.
|
// Decode implements the Decoder interface.
|
||||||
@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
|||||||
opts := protodelim.UnmarshalOptions{
|
opts := protodelim.UnmarshalOptions{
|
||||||
MaxSize: -1,
|
MaxSize: -1,
|
||||||
}
|
}
|
||||||
if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
|
if err := opts.UnmarshalFrom(d.r, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||||
|
13
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
13
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -21,9 +21,10 @@ import (
|
|||||||
"google.golang.org/protobuf/encoding/protodelim"
|
"google.golang.org/protobuf/encoding/protodelim"
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
"google.golang.org/protobuf/encoding/prototext"
|
||||||
|
|
||||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/munnerz/goautoneg"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -139,7 +140,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
|||||||
// interface is kept for backwards compatibility.
|
// interface is kept for backwards compatibility.
|
||||||
// In cases where the Format does not allow for UTF-8 names, the global
|
// In cases where the Format does not allow for UTF-8 names, the global
|
||||||
// NameEscapingScheme will be applied.
|
// NameEscapingScheme will be applied.
|
||||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
//
|
||||||
|
// NewEncoder can be called with additional options to customize the OpenMetrics text output.
|
||||||
|
// For example:
|
||||||
|
// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
|
||||||
|
//
|
||||||
|
// Extra options are ignored for all other formats.
|
||||||
|
func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
|
||||||
escapingScheme := format.ToEscapingScheme()
|
escapingScheme := format.ToEscapingScheme()
|
||||||
|
|
||||||
switch format.FormatType() {
|
switch format.FormatType() {
|
||||||
@ -178,7 +185,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
|
|||||||
case TypeOpenMetrics:
|
case TypeOpenMetrics:
|
||||||
return encoderCloser{
|
return encoderCloser{
|
||||||
encode: func(v *dto.MetricFamily) error {
|
encode: func(v *dto.MetricFamily) error {
|
||||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme))
|
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
close: func() error {
|
close: func() error {
|
||||||
|
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@ -15,6 +15,7 @@
|
|||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
@ -63,7 +64,7 @@ const (
|
|||||||
type FormatType int
|
type FormatType int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TypeUnknown = iota
|
TypeUnknown FormatType = iota
|
||||||
TypeProtoCompact
|
TypeProtoCompact
|
||||||
TypeProtoDelim
|
TypeProtoDelim
|
||||||
TypeProtoText
|
TypeProtoText
|
||||||
@ -73,7 +74,8 @@ const (
|
|||||||
|
|
||||||
// NewFormat generates a new Format from the type provided. Mostly used for
|
// NewFormat generates a new Format from the type provided. Mostly used for
|
||||||
// tests, most Formats should be generated as part of content negotiation in
|
// tests, most Formats should be generated as part of content negotiation in
|
||||||
// encode.go.
|
// encode.go. If a type has more than one version, the latest version will be
|
||||||
|
// returned.
|
||||||
func NewFormat(t FormatType) Format {
|
func NewFormat(t FormatType) Format {
|
||||||
switch t {
|
switch t {
|
||||||
case TypeProtoCompact:
|
case TypeProtoCompact:
|
||||||
@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewOpenMetricsFormat generates a new OpenMetrics format matching the
|
||||||
|
// specified version number.
|
||||||
|
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||||
|
if version == OpenMetricsVersion_0_0_1 {
|
||||||
|
return fmtOpenMetrics_0_0_1, nil
|
||||||
|
}
|
||||||
|
if version == OpenMetricsVersion_1_0_0 {
|
||||||
|
return fmtOpenMetrics_1_0_0, nil
|
||||||
|
}
|
||||||
|
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||||
|
}
|
||||||
|
|
||||||
// FormatType deduces an overall FormatType for the given format.
|
// FormatType deduces an overall FormatType for the given format.
|
||||||
func (f Format) FormatType() FormatType {
|
func (f Format) FormatType() FormatType {
|
||||||
toks := strings.Split(string(f), ";")
|
toks := strings.Split(string(f), ";")
|
||||||
if len(toks) < 2 {
|
|
||||||
return TypeUnknown
|
|
||||||
}
|
|
||||||
|
|
||||||
params := make(map[string]string)
|
params := make(map[string]string)
|
||||||
for i, t := range toks {
|
for i, t := range toks {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
202
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
202
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@ -22,11 +22,47 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type encoderOption struct {
|
||||||
|
withCreatedLines bool
|
||||||
|
withUnit bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type EncoderOption func(*encoderOption)
|
||||||
|
|
||||||
|
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
|
||||||
|
// to include _created lines (See
|
||||||
|
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
|
||||||
|
// Created timestamps can improve the accuracy of series reset detection, but
|
||||||
|
// come with a bandwidth cost.
|
||||||
|
//
|
||||||
|
// At the time of writing, created timestamp ingestion is still experimental in
|
||||||
|
// Prometheus and need to be enabled with the feature-flag
|
||||||
|
// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
|
||||||
|
// still possible. Therefore, it is recommended to use this feature with caution.
|
||||||
|
func WithCreatedLines() EncoderOption {
|
||||||
|
return func(t *encoderOption) {
|
||||||
|
t.withCreatedLines = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUnit is an EncoderOption enabling a set unit to be written to the output
|
||||||
|
// and to be added to the metric name, if it's not there already, as a suffix.
|
||||||
|
// Without opting in this way, the unit will not be added to the metric name and,
|
||||||
|
// on top of that, the unit will not be passed onto the output, even if it
|
||||||
|
// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
|
||||||
|
func WithUnit() EncoderOption {
|
||||||
|
return func(t *encoderOption) {
|
||||||
|
t.withUnit = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||||
// the number of bytes written and any error encountered. The output will have
|
// the number of bytes written and any error encountered. The output will have
|
||||||
@ -59,20 +95,34 @@ import (
|
|||||||
// Prometheus to OpenMetrics or vice versa:
|
// Prometheus to OpenMetrics or vice versa:
|
||||||
//
|
//
|
||||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
|
||||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
// lines. A counter with a missing `_total` suffix is not an error. However,
|
||||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||||
// output.
|
// output.
|
||||||
//
|
//
|
||||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
|
||||||
// line, info type, stateset type, gaugehistogram type.
|
// the unit has to be present in the metric name as its suffix:
|
||||||
|
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
|
||||||
|
// However, in order to accommodate any potential scenario where such a change in the
|
||||||
|
// metric name is not desirable, the users are here given the choice of either explicitly
|
||||||
|
// opt in, in case they wish for the unit to be included in the output AND in the metric name
|
||||||
|
// as a suffix (see the description of the WithUnit function above),
|
||||||
|
// or not to opt in, in case they don't want for any of that to happen.
|
||||||
|
//
|
||||||
|
// - No support for the following (optional) features: info type,
|
||||||
|
// stateset type, gaugehistogram type.
|
||||||
//
|
//
|
||||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||||
//
|
//
|
||||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||||
// with a `NaN` value.)
|
// with a `NaN` value.)
|
||||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
|
||||||
|
toOM := encoderOption{}
|
||||||
|
for _, option := range options {
|
||||||
|
option(&toOM)
|
||||||
|
}
|
||||||
|
|
||||||
name := in.GetName()
|
name := in.GetName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||||
@ -95,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
n int
|
n int
|
||||||
metricType = in.GetType()
|
metricType = in.GetType()
|
||||||
shortName = name
|
compliantName = name
|
||||||
)
|
)
|
||||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
|
||||||
shortName = name[:len(name)-6]
|
compliantName = name[:len(name)-6]
|
||||||
|
}
|
||||||
|
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
|
||||||
|
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comments, first HELP, then TYPE.
|
// Comments, first HELP, then TYPE.
|
||||||
@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n, err = writeName(w, shortName)
|
n, err = writeName(w, compliantName)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n, err = writeName(w, shortName)
|
n, err = writeName(w, compliantName)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if toOM.withUnit && in.Unit != nil {
|
||||||
|
n, err = w.WriteString("# UNIT ")
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeName(w, compliantName)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n, err = writeEscapedString(w, *in.Unit, true)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var createdTsBytesWritten int
|
||||||
|
|
||||||
// Finally the samples, one line for each.
|
// Finally the samples, one line for each.
|
||||||
|
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
|
||||||
|
compliantName = compliantName + "_total"
|
||||||
|
}
|
||||||
for _, metric := range in.Metric {
|
for _, metric := range in.Metric {
|
||||||
switch metricType {
|
switch metricType {
|
||||||
case dto.MetricType_COUNTER:
|
case dto.MetricType_COUNTER:
|
||||||
if metric.Counter == nil {
|
if metric.Counter == nil {
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"expected counter in metric %s %s", name, metric,
|
"expected counter in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
// Note that we have ensured above that either the name
|
|
||||||
// ends on `_total` or that the rendered type is
|
|
||||||
// `unknown`. Therefore, no `_total` must be added here.
|
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "", metric, "", 0,
|
w, compliantName, "", metric, "", 0,
|
||||||
metric.Counter.GetValue(), 0, false,
|
metric.Counter.GetValue(), 0, false,
|
||||||
metric.Counter.Exemplar,
|
metric.Counter.Exemplar,
|
||||||
)
|
)
|
||||||
|
if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
|
||||||
|
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
|
||||||
|
n += createdTsBytesWritten
|
||||||
|
}
|
||||||
case dto.MetricType_GAUGE:
|
case dto.MetricType_GAUGE:
|
||||||
if metric.Gauge == nil {
|
if metric.Gauge == nil {
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"expected gauge in metric %s %s", name, metric,
|
"expected gauge in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "", metric, "", 0,
|
w, compliantName, "", metric, "", 0,
|
||||||
metric.Gauge.GetValue(), 0, false,
|
metric.Gauge.GetValue(), 0, false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
case dto.MetricType_UNTYPED:
|
case dto.MetricType_UNTYPED:
|
||||||
if metric.Untyped == nil {
|
if metric.Untyped == nil {
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"expected untyped in metric %s %s", name, metric,
|
"expected untyped in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "", metric, "", 0,
|
w, compliantName, "", metric, "", 0,
|
||||||
metric.Untyped.GetValue(), 0, false,
|
metric.Untyped.GetValue(), 0, false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
case dto.MetricType_SUMMARY:
|
case dto.MetricType_SUMMARY:
|
||||||
if metric.Summary == nil {
|
if metric.Summary == nil {
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"expected summary in metric %s %s", name, metric,
|
"expected summary in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
for _, q := range metric.Summary.Quantile {
|
for _, q := range metric.Summary.Quantile {
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "", metric,
|
w, compliantName, "", metric,
|
||||||
model.QuantileLabel, q.GetQuantile(),
|
model.QuantileLabel, q.GetQuantile(),
|
||||||
q.GetValue(), 0, false,
|
q.GetValue(), 0, false,
|
||||||
nil,
|
nil,
|
||||||
@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_sum", metric, "", 0,
|
w, compliantName, "_sum", metric, "", 0,
|
||||||
metric.Summary.GetSampleSum(), 0, false,
|
metric.Summary.GetSampleSum(), 0, false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_count", metric, "", 0,
|
w, compliantName, "_count", metric, "", 0,
|
||||||
0, metric.Summary.GetSampleCount(), true,
|
0, metric.Summary.GetSampleCount(), true,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
|
||||||
|
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
|
||||||
|
n += createdTsBytesWritten
|
||||||
|
}
|
||||||
case dto.MetricType_HISTOGRAM:
|
case dto.MetricType_HISTOGRAM:
|
||||||
if metric.Histogram == nil {
|
if metric.Histogram == nil {
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"expected histogram in metric %s %s", name, metric,
|
"expected histogram in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
infSeen := false
|
infSeen := false
|
||||||
for _, b := range metric.Histogram.Bucket {
|
for _, b := range metric.Histogram.Bucket {
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_bucket", metric,
|
w, compliantName, "_bucket", metric,
|
||||||
model.BucketLabel, b.GetUpperBound(),
|
model.BucketLabel, b.GetUpperBound(),
|
||||||
0, b.GetCumulativeCount(), true,
|
0, b.GetCumulativeCount(), true,
|
||||||
b.Exemplar,
|
b.Exemplar,
|
||||||
@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
}
|
}
|
||||||
if !infSeen {
|
if !infSeen {
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_bucket", metric,
|
w, compliantName, "_bucket", metric,
|
||||||
model.BucketLabel, math.Inf(+1),
|
model.BucketLabel, math.Inf(+1),
|
||||||
0, metric.Histogram.GetSampleCount(), true,
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
nil,
|
nil,
|
||||||
@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_sum", metric, "", 0,
|
w, compliantName, "_sum", metric, "", 0,
|
||||||
metric.Histogram.GetSampleSum(), 0, false,
|
metric.Histogram.GetSampleSum(), 0, false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
n, err = writeOpenMetricsSample(
|
n, err = writeOpenMetricsSample(
|
||||||
w, name, "_count", metric, "", 0,
|
w, compliantName, "_count", metric, "", 0,
|
||||||
0, metric.Histogram.GetSampleCount(), true,
|
0, metric.Histogram.GetSampleCount(), true,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
|
||||||
|
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
|
||||||
|
n += createdTsBytesWritten
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return written, fmt.Errorf(
|
return written, fmt.Errorf(
|
||||||
"unexpected type in metric %s %s", name, metric,
|
"unexpected type in metric %s %s", compliantName, metric,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
written += n
|
written += n
|
||||||
@ -350,7 +445,7 @@ func writeOpenMetricsSample(
|
|||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if exemplar != nil {
|
if exemplar != nil && len(exemplar.Label) > 0 {
|
||||||
n, err = writeExemplar(w, exemplar)
|
n, err = writeExemplar(w, exemplar)
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs(
|
|||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeOpenMetricsCreated writes the created timestamp for a single time series
|
||||||
|
// following OpenMetrics text format to w, given the metric name, the metric proto
|
||||||
|
// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
|
||||||
|
// an additional label name with a float64 value (use empty string as label name if
|
||||||
|
// not required) and the timestamp that represents the created timestamp.
|
||||||
|
// The function returns the number of bytes written and any error encountered.
|
||||||
|
func writeOpenMetricsCreated(w enhancedWriter,
|
||||||
|
name, suffixToTrim string, metric *dto.Metric,
|
||||||
|
additionalLabelName string, additionalLabelValue float64,
|
||||||
|
createdTimestamp *timestamppb.Timestamp,
|
||||||
|
) (int, error) {
|
||||||
|
written := 0
|
||||||
|
n, err := writeOpenMetricsNameAndLabelPairs(
|
||||||
|
w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
|
||||||
|
)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.WriteByte(' ')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(beorn7): Format this directly from components of ts to
|
||||||
|
// avoid overflow/underflow and precision issues of the float
|
||||||
|
// conversion.
|
||||||
|
n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.WriteByte('\n')
|
||||||
|
written++
|
||||||
|
if err != nil {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||||
// function returns the number of bytes written and any error encountered.
|
// function returns the number of bytes written and any error encountered.
|
||||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||||
|
27
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
27
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
|
|||||||
|
|
||||||
// Status returns the status of the alert.
|
// Status returns the status of the alert.
|
||||||
func (a *Alert) Status() AlertStatus {
|
func (a *Alert) Status() AlertStatus {
|
||||||
if a.Resolved() {
|
return a.StatusAt(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusAt returns the status of the alert at the given timestamp.
|
||||||
|
func (a *Alert) StatusAt(ts time.Time) AlertStatus {
|
||||||
|
if a.ResolvedAt(ts) {
|
||||||
return AlertResolved
|
return AlertResolved
|
||||||
}
|
}
|
||||||
return AlertFiring
|
return AlertFiring
|
||||||
@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasFiringAt returns true iff one of the alerts is not resolved
|
||||||
|
// at the time ts.
|
||||||
|
func (as Alerts) HasFiringAt(ts time.Time) bool {
|
||||||
|
for _, a := range as {
|
||||||
|
if !a.ResolvedAt(ts) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||||
func (as Alerts) Status() AlertStatus {
|
func (as Alerts) Status() AlertStatus {
|
||||||
if as.HasFiring() {
|
if as.HasFiring() {
|
||||||
@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus {
|
|||||||
}
|
}
|
||||||
return AlertResolved
|
return AlertResolved
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StatusAt returns StatusFiring iff at least one of the alerts is firing
|
||||||
|
// at the time ts.
|
||||||
|
func (as Alerts) StatusAt(ts time.Time) AlertStatus {
|
||||||
|
if as.HasFiringAt(ts) {
|
||||||
|
return AlertFiring
|
||||||
|
}
|
||||||
|
return AlertResolved
|
||||||
|
}
|
||||||
|
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||||
@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LabelSet) String() string {
|
|
||||||
lstrs := make([]string, 0, len(l))
|
|
||||||
for l, v := range l {
|
|
||||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(lstrs)
|
|
||||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fingerprint returns the LabelSet's fingerprint.
|
// Fingerprint returns the LabelSet's fingerprint.
|
||||||
func (ls LabelSet) Fingerprint() Fingerprint {
|
func (ls LabelSet) Fingerprint() Fingerprint {
|
||||||
return labelSetToFingerprint(ls)
|
return labelSetToFingerprint(ls)
|
||||||
|
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.21
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
|
||||||
|
func (l LabelSet) String() string {
|
||||||
|
var lna [32]string // On stack to avoid memory allocation for sorting names.
|
||||||
|
labelNames := lna[:0]
|
||||||
|
for name := range l {
|
||||||
|
labelNames = append(labelNames, string(name))
|
||||||
|
}
|
||||||
|
slices.Sort(labelNames)
|
||||||
|
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
|
||||||
|
b := bytes.NewBuffer(bytea[:0])
|
||||||
|
b.WriteByte('{')
|
||||||
|
for i, name := range labelNames {
|
||||||
|
if i > 0 {
|
||||||
|
b.WriteString(", ")
|
||||||
|
}
|
||||||
|
b.WriteString(name)
|
||||||
|
b.WriteByte('=')
|
||||||
|
b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
|
||||||
|
}
|
||||||
|
b.WriteByte('}')
|
||||||
|
return b.String()
|
||||||
|
}
|
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !go1.21
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String was optimized using functions not available for go 1.20
|
||||||
|
// or lower. We keep the old implementation for compatibility with client_golang.
|
||||||
|
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
||||||
|
// file can be removed.
|
||||||
|
func (l LabelSet) String() string {
|
||||||
|
labelNames := make([]string, 0, len(l))
|
||||||
|
for name := range l {
|
||||||
|
labelNames = append(labelNames, string(name))
|
||||||
|
}
|
||||||
|
sort.Strings(labelNames)
|
||||||
|
lstrs := make([]string, 0, len(l))
|
||||||
|
for _, name := range labelNames {
|
||||||
|
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||||
|
}
|
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
|||||||
out := &dto.MetricFamily{
|
out := &dto.MetricFamily{
|
||||||
Help: v.Help,
|
Help: v.Help,
|
||||||
Type: v.Type,
|
Type: v.Type,
|
||||||
|
Unit: v.Unit,
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the name is nil, copy as-is, don't try to escape.
|
// If the name is nil, copy as-is, don't try to escape.
|
||||||
|
7
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
7
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@ -1,9 +1,16 @@
|
|||||||
---
|
---
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
- errcheck
|
||||||
- godot
|
- godot
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- testifylint
|
||||||
|
- unused
|
||||||
|
|
||||||
linter-settings:
|
linter-settings:
|
||||||
godot:
|
godot:
|
||||||
|
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
@ -1,2 +1,3 @@
|
|||||||
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
|
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
|
||||||
* Paul Gier <pgier@redhat.com> @pgier
|
* Paul Gier <paulgier@gmail.com> @pgier
|
||||||
|
* Ben Kochie <superq@gmail.com> @SuperQ
|
||||||
|
26
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
26
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -49,23 +49,23 @@ endif
|
|||||||
GOTEST := $(GO) test
|
GOTEST := $(GO) test
|
||||||
GOTEST_DIR :=
|
GOTEST_DIR :=
|
||||||
ifneq ($(CIRCLE_JOB),)
|
ifneq ($(CIRCLE_JOB),)
|
||||||
ifneq ($(shell command -v gotestsum > /dev/null),)
|
ifneq ($(shell command -v gotestsum 2> /dev/null),)
|
||||||
GOTEST_DIR := test-results
|
GOTEST_DIR := test-results
|
||||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.15.0
|
PROMU_VERSION ?= 0.17.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.54.2
|
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
|
||||||
# If we're in CI and there is an Actions file, that means the linter
|
# If we're in CI and there is an Actions file, that means the linter
|
||||||
# is being run in Actions, so we don't need to run it here.
|
# is being run in Actions, so we don't need to run it here.
|
||||||
ifneq (,$(SKIP_GOLANGCI_LINT))
|
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||||
@ -169,16 +169,20 @@ common-vet:
|
|||||||
common-lint: $(GOLANGCI_LINT)
|
common-lint: $(GOLANGCI_LINT)
|
||||||
ifdef GOLANGCI_LINT
|
ifdef GOLANGCI_LINT
|
||||||
@echo ">> running golangci-lint"
|
@echo ">> running golangci-lint"
|
||||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
|
||||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
|
||||||
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
|
||||||
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
.PHONY: common-lint-fix
|
||||||
|
common-lint-fix: $(GOLANGCI_LINT)
|
||||||
|
ifdef GOLANGCI_LINT
|
||||||
|
@echo ">> running golangci-lint fix"
|
||||||
|
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: common-yamllint
|
.PHONY: common-yamllint
|
||||||
common-yamllint:
|
common-yamllint:
|
||||||
@echo ">> running yamllint on all YAML files in the repository"
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
ifeq (, $(shell command -v yamllint > /dev/null))
|
ifeq (, $(shell command -v yamllint 2> /dev/null))
|
||||||
@echo "yamllint not installed so skipping"
|
@echo "yamllint not installed so skipping"
|
||||||
else
|
else
|
||||||
yamllint .
|
yamllint .
|
||||||
@ -204,6 +208,10 @@ common-tarball: promu
|
|||||||
@echo ">> building release tarball"
|
@echo ">> building release tarball"
|
||||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||||
|
|
||||||
|
.PHONY: common-docker-repo-name
|
||||||
|
common-docker-repo-name:
|
||||||
|
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
||||||
|
|
||||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
|
6
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
6
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@ -55,7 +55,7 @@ type ARPEntry struct {
|
|||||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||||
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return parseARPEntries(data)
|
return parseARPEntries(data)
|
||||||
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
|||||||
} else if width == expectedDataWidth {
|
} else if width == expectedDataWidth {
|
||||||
entry, err := parseARPEntry(columns)
|
entry, err := parseARPEntry(columns)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
} else {
|
} else {
|
||||||
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
6
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
|||||||
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
|
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
|
||||||
}
|
}
|
||||||
|
|
||||||
node := strings.TrimRight(parts[1], ",")
|
node := strings.TrimSuffix(parts[1], ",")
|
||||||
zone := strings.TrimRight(parts[3], ",")
|
zone := strings.TrimSuffix(parts[3], ",")
|
||||||
arraySize := len(parts[4:])
|
arraySize := len(parts[4:])
|
||||||
|
|
||||||
if bucketCount == -1 {
|
if bucketCount == -1 {
|
||||||
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
|||||||
for i := 0; i < arraySize; i++ {
|
for i := 0; i < arraySize; i++ {
|
||||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
4
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
|
|||||||
firstLine := firstNonEmptyLine(scanner)
|
firstLine := firstNonEmptyLine(scanner)
|
||||||
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
|
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
|
||||||
if !match || !strings.Contains(firstLine, ":") {
|
if !match || !strings.Contains(firstLine, ":") {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
field := strings.SplitN(firstLine, ": ", 2)
|
field := strings.SplitN(firstLine, ": ", 2)
|
||||||
@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
|
|||||||
// find the first "processor" line
|
// find the first "processor" line
|
||||||
firstLine := firstNonEmptyLine(scanner)
|
firstLine := firstNonEmptyLine(scanner)
|
||||||
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
|
||||||
}
|
}
|
||||||
field := strings.SplitN(firstLine, ": ", 2)
|
field := strings.SplitN(firstLine, ": ", 2)
|
||||||
cpuinfo := []CPUInfo{}
|
cpuinfo := []CPUInfo{}
|
||||||
|
6
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
6
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
|
|||||||
path := fs.proc.Path("crypto")
|
path := fs.proc.Path("crypto")
|
||||||
b, err := util.ReadFileNoStat(path)
|
b, err := util.ReadFileNoStat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
|
return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return crypto, nil
|
return crypto, nil
|
||||||
@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
|
|||||||
|
|
||||||
kv := strings.Split(text, ":")
|
kv := strings.Split(text, ":")
|
||||||
if len(kv) != 2 {
|
if len(kv) != 2 {
|
||||||
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
|
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
k := strings.TrimSpace(kv[0])
|
k := strings.TrimSpace(kv[0])
|
||||||
|
4
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
|||||||
|
|
||||||
m, err := parseFscacheinfo(bytes.NewReader(b))
|
m, err := parseFscacheinfo(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
|
return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return *m, nil
|
return *m, nil
|
||||||
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
|||||||
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
||||||
var err error
|
var err error
|
||||||
if len(fields) < len(setFields) {
|
if len(fields) < len(setFields) {
|
||||||
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range setFields {
|
for i := range setFields {
|
||||||
|
6
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
6
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
|
|||||||
case 46:
|
case 46:
|
||||||
ip = net.ParseIP(s[1:40])
|
ip = net.ParseIP(s[1:40])
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
portString := s[len(s)-4:]
|
portString := s[len(s)-4:]
|
||||||
if len(portString) != 4 {
|
if len(portString) != 4 {
|
||||||
return nil, 0,
|
return nil, 0,
|
||||||
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
||||||
}
|
}
|
||||||
port, err := strconv.ParseUint(portString, 16, 16)
|
port, err := strconv.ParseUint(portString, 16, 16)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
|
|||||||
for i, load := range parts[0:3] {
|
for i, load := range parts[0:3] {
|
||||||
loads[i], err = strconv.ParseFloat(load, 64)
|
loads[i], err = strconv.ParseFloat(load, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &LoadAvg{
|
return &LoadAvg{
|
||||||
|
60
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
60
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
||||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
|
||||||
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
||||||
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
||||||
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
||||||
@ -50,6 +50,8 @@ type MDStat struct {
|
|||||||
BlocksTotal int64
|
BlocksTotal int64
|
||||||
// Number of blocks on the device that are in sync.
|
// Number of blocks on the device that are in sync.
|
||||||
BlocksSynced int64
|
BlocksSynced int64
|
||||||
|
// Number of blocks on the device that need to be synced.
|
||||||
|
BlocksToBeSynced int64
|
||||||
// progress percentage of current sync
|
// progress percentage of current sync
|
||||||
BlocksSyncedPct float64
|
BlocksSyncedPct float64
|
||||||
// estimated finishing time for current sync (in minutes)
|
// estimated finishing time for current sync (in minutes)
|
||||||
@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
|
|||||||
}
|
}
|
||||||
mdstat, err := parseMDStat(data)
|
mdstat, err := parseMDStat(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
||||||
}
|
}
|
||||||
return mdstat, nil
|
return mdstat, nil
|
||||||
}
|
}
|
||||||
@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
|
|
||||||
deviceFields := strings.Fields(line)
|
deviceFields := strings.Fields(line)
|
||||||
if len(deviceFields) < 3 {
|
if len(deviceFields) < 3 {
|
||||||
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
|
return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
|
||||||
}
|
}
|
||||||
mdName := deviceFields[0] // mdx
|
mdName := deviceFields[0] // mdx
|
||||||
state := deviceFields[2] // active or inactive
|
state := deviceFields[2] // active or inactive
|
||||||
@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
syncLineIdx := i + 2
|
syncLineIdx := i + 2
|
||||||
@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently
|
// If device is syncing at the moment, get the number of currently
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
blocksSynced := size
|
||||||
|
blocksToBeSynced := size
|
||||||
speed := float64(0)
|
speed := float64(0)
|
||||||
finish := float64(0)
|
finish := float64(0)
|
||||||
pct := float64(0)
|
pct := float64(0)
|
||||||
@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
// Handle case when resync=PENDING or resync=DELAYED.
|
// Handle case when resync=PENDING or resync=DELAYED.
|
||||||
if strings.Contains(lines[syncLineIdx], "PENDING") ||
|
if strings.Contains(lines[syncLineIdx], "PENDING") ||
|
||||||
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||||
syncedBlocks = 0
|
blocksSynced = 0
|
||||||
} else {
|
} else {
|
||||||
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
DisksSpare: spare,
|
DisksSpare: spare,
|
||||||
DisksTotal: total,
|
DisksTotal: total,
|
||||||
BlocksTotal: size,
|
BlocksTotal: size,
|
||||||
BlocksSynced: syncedBlocks,
|
BlocksSynced: blocksSynced,
|
||||||
|
BlocksToBeSynced: blocksToBeSynced,
|
||||||
BlocksSyncedPct: pct,
|
BlocksSyncedPct: pct,
|
||||||
BlocksSyncedFinishTime: finish,
|
BlocksSyncedFinishTime: finish,
|
||||||
BlocksSyncedSpeed: speed,
|
BlocksSyncedSpeed: speed,
|
||||||
@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||||
statusFields := strings.Fields(statusLine)
|
statusFields := strings.Fields(statusLine)
|
||||||
if len(statusFields) < 1 {
|
if len(statusFields) < 1 {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sizeStr := statusFields[0]
|
sizeStr := statusFields[0]
|
||||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||||
@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
|
|||||||
|
|
||||||
matches := statusLineRE.FindStringSubmatch(statusLine)
|
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||||
if len(matches) != 5 {
|
if len(matches) != 5 {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
|
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
|
||||||
}
|
}
|
||||||
down = int64(strings.Count(matches[4], "_"))
|
down = int64(strings.Count(matches[4], "_"))
|
||||||
|
|
||||||
return active, total, down, size, nil
|
return active, total, down, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
|
func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
|
||||||
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
|
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
blocks := strings.Split(matches[1], "/")
|
||||||
|
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
|
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get percentage complete
|
// Get percentage complete
|
||||||
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
||||||
}
|
}
|
||||||
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get time expected left to complete
|
// Get time expected left to complete
|
||||||
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
||||||
}
|
}
|
||||||
finish, err = strconv.ParseFloat(matches[1], 64)
|
finish, err = strconv.ParseFloat(matches[1], 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get recovery speed
|
// Get recovery speed
|
||||||
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
||||||
}
|
}
|
||||||
speed, err = strconv.ParseFloat(matches[1], 64)
|
speed, err = strconv.ParseFloat(matches[1], 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return syncedBlocks, pct, finish, speed, nil
|
return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalComponentDevices(deviceFields []string) []string {
|
func evalComponentDevices(deviceFields []string) []string {
|
||||||
|
220
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
220
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
@ -126,6 +126,7 @@ type Meminfo struct {
|
|||||||
VmallocUsed *uint64
|
VmallocUsed *uint64
|
||||||
// largest contiguous block of vmalloc area which is free
|
// largest contiguous block of vmalloc area which is free
|
||||||
VmallocChunk *uint64
|
VmallocChunk *uint64
|
||||||
|
Percpu *uint64
|
||||||
HardwareCorrupted *uint64
|
HardwareCorrupted *uint64
|
||||||
AnonHugePages *uint64
|
AnonHugePages *uint64
|
||||||
ShmemHugePages *uint64
|
ShmemHugePages *uint64
|
||||||
@ -140,6 +141,55 @@ type Meminfo struct {
|
|||||||
DirectMap4k *uint64
|
DirectMap4k *uint64
|
||||||
DirectMap2M *uint64
|
DirectMap2M *uint64
|
||||||
DirectMap1G *uint64
|
DirectMap1G *uint64
|
||||||
|
|
||||||
|
// The struct fields below are the byte-normalized counterparts to the
|
||||||
|
// existing struct fields. Values are normalized using the optional
|
||||||
|
// unit field in the meminfo line.
|
||||||
|
MemTotalBytes *uint64
|
||||||
|
MemFreeBytes *uint64
|
||||||
|
MemAvailableBytes *uint64
|
||||||
|
BuffersBytes *uint64
|
||||||
|
CachedBytes *uint64
|
||||||
|
SwapCachedBytes *uint64
|
||||||
|
ActiveBytes *uint64
|
||||||
|
InactiveBytes *uint64
|
||||||
|
ActiveAnonBytes *uint64
|
||||||
|
InactiveAnonBytes *uint64
|
||||||
|
ActiveFileBytes *uint64
|
||||||
|
InactiveFileBytes *uint64
|
||||||
|
UnevictableBytes *uint64
|
||||||
|
MlockedBytes *uint64
|
||||||
|
SwapTotalBytes *uint64
|
||||||
|
SwapFreeBytes *uint64
|
||||||
|
DirtyBytes *uint64
|
||||||
|
WritebackBytes *uint64
|
||||||
|
AnonPagesBytes *uint64
|
||||||
|
MappedBytes *uint64
|
||||||
|
ShmemBytes *uint64
|
||||||
|
SlabBytes *uint64
|
||||||
|
SReclaimableBytes *uint64
|
||||||
|
SUnreclaimBytes *uint64
|
||||||
|
KernelStackBytes *uint64
|
||||||
|
PageTablesBytes *uint64
|
||||||
|
NFSUnstableBytes *uint64
|
||||||
|
BounceBytes *uint64
|
||||||
|
WritebackTmpBytes *uint64
|
||||||
|
CommitLimitBytes *uint64
|
||||||
|
CommittedASBytes *uint64
|
||||||
|
VmallocTotalBytes *uint64
|
||||||
|
VmallocUsedBytes *uint64
|
||||||
|
VmallocChunkBytes *uint64
|
||||||
|
PercpuBytes *uint64
|
||||||
|
HardwareCorruptedBytes *uint64
|
||||||
|
AnonHugePagesBytes *uint64
|
||||||
|
ShmemHugePagesBytes *uint64
|
||||||
|
ShmemPmdMappedBytes *uint64
|
||||||
|
CmaTotalBytes *uint64
|
||||||
|
CmaFreeBytes *uint64
|
||||||
|
HugepagesizeBytes *uint64
|
||||||
|
DirectMap4kBytes *uint64
|
||||||
|
DirectMap2MBytes *uint64
|
||||||
|
DirectMap1GBytes *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Meminfo returns an information about current kernel/system memory statistics.
|
// Meminfo returns an information about current kernel/system memory statistics.
|
||||||
@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
|
|||||||
|
|
||||||
m, err := parseMemInfo(bytes.NewReader(b))
|
m, err := parseMemInfo(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
|
return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return *m, nil
|
return *m, nil
|
||||||
@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
|||||||
var m Meminfo
|
var m Meminfo
|
||||||
s := bufio.NewScanner(r)
|
s := bufio.NewScanner(r)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
// Each line has at least a name and value; we ignore the unit.
|
|
||||||
fields := strings.Fields(s.Text())
|
fields := strings.Fields(s.Text())
|
||||||
if len(fields) < 2 {
|
var val, valBytes uint64
|
||||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := strconv.ParseUint(fields[1], 0, 64)
|
val, err := strconv.ParseUint(fields[1], 0, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch len(fields) {
|
||||||
|
case 2:
|
||||||
|
// No unit present, use the parsed the value as bytes directly.
|
||||||
|
valBytes = val
|
||||||
|
case 3:
|
||||||
|
// Unit present in optional 3rd field, convert it to
|
||||||
|
// bytes. The only unit supported within the Linux
|
||||||
|
// kernel is `kB`.
|
||||||
|
if fields[2] != "kB" {
|
||||||
|
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
valBytes = 1024 * val
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
switch fields[0] {
|
switch fields[0] {
|
||||||
case "MemTotal:":
|
case "MemTotal:":
|
||||||
m.MemTotal = &v
|
m.MemTotal = &val
|
||||||
|
m.MemTotalBytes = &valBytes
|
||||||
case "MemFree:":
|
case "MemFree:":
|
||||||
m.MemFree = &v
|
m.MemFree = &val
|
||||||
|
m.MemFreeBytes = &valBytes
|
||||||
case "MemAvailable:":
|
case "MemAvailable:":
|
||||||
m.MemAvailable = &v
|
m.MemAvailable = &val
|
||||||
|
m.MemAvailableBytes = &valBytes
|
||||||
case "Buffers:":
|
case "Buffers:":
|
||||||
m.Buffers = &v
|
m.Buffers = &val
|
||||||
|
m.BuffersBytes = &valBytes
|
||||||
case "Cached:":
|
case "Cached:":
|
||||||
m.Cached = &v
|
m.Cached = &val
|
||||||
|
m.CachedBytes = &valBytes
|
||||||
case "SwapCached:":
|
case "SwapCached:":
|
||||||
m.SwapCached = &v
|
m.SwapCached = &val
|
||||||
|
m.SwapCachedBytes = &valBytes
|
||||||
case "Active:":
|
case "Active:":
|
||||||
m.Active = &v
|
m.Active = &val
|
||||||
|
m.ActiveBytes = &valBytes
|
||||||
case "Inactive:":
|
case "Inactive:":
|
||||||
m.Inactive = &v
|
m.Inactive = &val
|
||||||
|
m.InactiveBytes = &valBytes
|
||||||
case "Active(anon):":
|
case "Active(anon):":
|
||||||
m.ActiveAnon = &v
|
m.ActiveAnon = &val
|
||||||
|
m.ActiveAnonBytes = &valBytes
|
||||||
case "Inactive(anon):":
|
case "Inactive(anon):":
|
||||||
m.InactiveAnon = &v
|
m.InactiveAnon = &val
|
||||||
|
m.InactiveAnonBytes = &valBytes
|
||||||
case "Active(file):":
|
case "Active(file):":
|
||||||
m.ActiveFile = &v
|
m.ActiveFile = &val
|
||||||
|
m.ActiveFileBytes = &valBytes
|
||||||
case "Inactive(file):":
|
case "Inactive(file):":
|
||||||
m.InactiveFile = &v
|
m.InactiveFile = &val
|
||||||
|
m.InactiveFileBytes = &valBytes
|
||||||
case "Unevictable:":
|
case "Unevictable:":
|
||||||
m.Unevictable = &v
|
m.Unevictable = &val
|
||||||
|
m.UnevictableBytes = &valBytes
|
||||||
case "Mlocked:":
|
case "Mlocked:":
|
||||||
m.Mlocked = &v
|
m.Mlocked = &val
|
||||||
|
m.MlockedBytes = &valBytes
|
||||||
case "SwapTotal:":
|
case "SwapTotal:":
|
||||||
m.SwapTotal = &v
|
m.SwapTotal = &val
|
||||||
|
m.SwapTotalBytes = &valBytes
|
||||||
case "SwapFree:":
|
case "SwapFree:":
|
||||||
m.SwapFree = &v
|
m.SwapFree = &val
|
||||||
|
m.SwapFreeBytes = &valBytes
|
||||||
case "Dirty:":
|
case "Dirty:":
|
||||||
m.Dirty = &v
|
m.Dirty = &val
|
||||||
|
m.DirtyBytes = &valBytes
|
||||||
case "Writeback:":
|
case "Writeback:":
|
||||||
m.Writeback = &v
|
m.Writeback = &val
|
||||||
|
m.WritebackBytes = &valBytes
|
||||||
case "AnonPages:":
|
case "AnonPages:":
|
||||||
m.AnonPages = &v
|
m.AnonPages = &val
|
||||||
|
m.AnonPagesBytes = &valBytes
|
||||||
case "Mapped:":
|
case "Mapped:":
|
||||||
m.Mapped = &v
|
m.Mapped = &val
|
||||||
|
m.MappedBytes = &valBytes
|
||||||
case "Shmem:":
|
case "Shmem:":
|
||||||
m.Shmem = &v
|
m.Shmem = &val
|
||||||
|
m.ShmemBytes = &valBytes
|
||||||
case "Slab:":
|
case "Slab:":
|
||||||
m.Slab = &v
|
m.Slab = &val
|
||||||
|
m.SlabBytes = &valBytes
|
||||||
case "SReclaimable:":
|
case "SReclaimable:":
|
||||||
m.SReclaimable = &v
|
m.SReclaimable = &val
|
||||||
|
m.SReclaimableBytes = &valBytes
|
||||||
case "SUnreclaim:":
|
case "SUnreclaim:":
|
||||||
m.SUnreclaim = &v
|
m.SUnreclaim = &val
|
||||||
|
m.SUnreclaimBytes = &valBytes
|
||||||
case "KernelStack:":
|
case "KernelStack:":
|
||||||
m.KernelStack = &v
|
m.KernelStack = &val
|
||||||
|
m.KernelStackBytes = &valBytes
|
||||||
case "PageTables:":
|
case "PageTables:":
|
||||||
m.PageTables = &v
|
m.PageTables = &val
|
||||||
|
m.PageTablesBytes = &valBytes
|
||||||
case "NFS_Unstable:":
|
case "NFS_Unstable:":
|
||||||
m.NFSUnstable = &v
|
m.NFSUnstable = &val
|
||||||
|
m.NFSUnstableBytes = &valBytes
|
||||||
case "Bounce:":
|
case "Bounce:":
|
||||||
m.Bounce = &v
|
m.Bounce = &val
|
||||||
|
m.BounceBytes = &valBytes
|
||||||
case "WritebackTmp:":
|
case "WritebackTmp:":
|
||||||
m.WritebackTmp = &v
|
m.WritebackTmp = &val
|
||||||
|
m.WritebackTmpBytes = &valBytes
|
||||||
case "CommitLimit:":
|
case "CommitLimit:":
|
||||||
m.CommitLimit = &v
|
m.CommitLimit = &val
|
||||||
|
m.CommitLimitBytes = &valBytes
|
||||||
case "Committed_AS:":
|
case "Committed_AS:":
|
||||||
m.CommittedAS = &v
|
m.CommittedAS = &val
|
||||||
|
m.CommittedASBytes = &valBytes
|
||||||
case "VmallocTotal:":
|
case "VmallocTotal:":
|
||||||
m.VmallocTotal = &v
|
m.VmallocTotal = &val
|
||||||
|
m.VmallocTotalBytes = &valBytes
|
||||||
case "VmallocUsed:":
|
case "VmallocUsed:":
|
||||||
m.VmallocUsed = &v
|
m.VmallocUsed = &val
|
||||||
|
m.VmallocUsedBytes = &valBytes
|
||||||
case "VmallocChunk:":
|
case "VmallocChunk:":
|
||||||
m.VmallocChunk = &v
|
m.VmallocChunk = &val
|
||||||
|
m.VmallocChunkBytes = &valBytes
|
||||||
|
case "Percpu:":
|
||||||
|
m.Percpu = &val
|
||||||
|
m.PercpuBytes = &valBytes
|
||||||
case "HardwareCorrupted:":
|
case "HardwareCorrupted:":
|
||||||
m.HardwareCorrupted = &v
|
m.HardwareCorrupted = &val
|
||||||
|
m.HardwareCorruptedBytes = &valBytes
|
||||||
case "AnonHugePages:":
|
case "AnonHugePages:":
|
||||||
m.AnonHugePages = &v
|
m.AnonHugePages = &val
|
||||||
|
m.AnonHugePagesBytes = &valBytes
|
||||||
case "ShmemHugePages:":
|
case "ShmemHugePages:":
|
||||||
m.ShmemHugePages = &v
|
m.ShmemHugePages = &val
|
||||||
|
m.ShmemHugePagesBytes = &valBytes
|
||||||
case "ShmemPmdMapped:":
|
case "ShmemPmdMapped:":
|
||||||
m.ShmemPmdMapped = &v
|
m.ShmemPmdMapped = &val
|
||||||
|
m.ShmemPmdMappedBytes = &valBytes
|
||||||
case "CmaTotal:":
|
case "CmaTotal:":
|
||||||
m.CmaTotal = &v
|
m.CmaTotal = &val
|
||||||
|
m.CmaTotalBytes = &valBytes
|
||||||
case "CmaFree:":
|
case "CmaFree:":
|
||||||
m.CmaFree = &v
|
m.CmaFree = &val
|
||||||
|
m.CmaFreeBytes = &valBytes
|
||||||
case "HugePages_Total:":
|
case "HugePages_Total:":
|
||||||
m.HugePagesTotal = &v
|
m.HugePagesTotal = &val
|
||||||
case "HugePages_Free:":
|
case "HugePages_Free:":
|
||||||
m.HugePagesFree = &v
|
m.HugePagesFree = &val
|
||||||
case "HugePages_Rsvd:":
|
case "HugePages_Rsvd:":
|
||||||
m.HugePagesRsvd = &v
|
m.HugePagesRsvd = &val
|
||||||
case "HugePages_Surp:":
|
case "HugePages_Surp:":
|
||||||
m.HugePagesSurp = &v
|
m.HugePagesSurp = &val
|
||||||
case "Hugepagesize:":
|
case "Hugepagesize:":
|
||||||
m.Hugepagesize = &v
|
m.Hugepagesize = &val
|
||||||
|
m.HugepagesizeBytes = &valBytes
|
||||||
case "DirectMap4k:":
|
case "DirectMap4k:":
|
||||||
m.DirectMap4k = &v
|
m.DirectMap4k = &val
|
||||||
|
m.DirectMap4kBytes = &valBytes
|
||||||
case "DirectMap2M:":
|
case "DirectMap2M:":
|
||||||
m.DirectMap2M = &v
|
m.DirectMap2M = &val
|
||||||
|
m.DirectMap2MBytes = &valBytes
|
||||||
case "DirectMap1G:":
|
case "DirectMap1G:":
|
||||||
m.DirectMap1G = &v
|
m.DirectMap1G = &val
|
||||||
|
m.DirectMap1GBytes = &valBytes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
2
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
|
|||||||
if mountInfo[6] != "" {
|
if mountInfo[6] != "" {
|
||||||
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
|
return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mount, nil
|
return mount, nil
|
||||||
|
11
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
11
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -88,7 +88,7 @@ type MountStatsNFS struct {
|
|||||||
// Statistics broken down by filesystem operation.
|
// Statistics broken down by filesystem operation.
|
||||||
Operations []NFSOperationStats
|
Operations []NFSOperationStats
|
||||||
// Statistics about the NFS RPC transport.
|
// Statistics about the NFS RPC transport.
|
||||||
Transport NFSTransportStats
|
Transport []NFSTransportStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// mountStats implements MountStats.
|
// mountStats implements MountStats.
|
||||||
@ -194,8 +194,6 @@ type NFSOperationStats struct {
|
|||||||
CumulativeTotalResponseMilliseconds uint64
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestMilliseconds uint64
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
// The average time from the point the client sends RPC requests until it receives the response.
|
|
||||||
AverageRTTMilliseconds float64
|
|
||||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||||
Errors uint64
|
Errors uint64
|
||||||
}
|
}
|
||||||
@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
stats.Transport = *tstats
|
stats.Transport = append(stats.Transport, *tstats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// When encountering "per-operation statistics", we must break this
|
// When encountering "per-operation statistics", we must break this
|
||||||
@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
CumulativeTotalResponseMilliseconds: ns[6],
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestMilliseconds: ns[7],
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
}
|
}
|
||||||
if ns[0] != 0 {
|
|
||||||
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ns) > 8 {
|
if len(ns) > 8 {
|
||||||
opStats.Errors = ns[8]
|
opStats.Errors = ns[8]
|
||||||
@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||||||
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
|
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||||
|
4
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
|||||||
|
|
||||||
stat, err := parseConntrackStat(bytes.NewReader(b))
|
stat, err := parseConntrackStat(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
|
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stat, nil
|
return stat, nil
|
||||||
@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||||
entries, err := util.ParseHexUint64s(fields)
|
entries, err := util.ParseHexUint64s(fields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
||||||
}
|
}
|
||||||
numEntries := len(entries)
|
numEntries := len(entries)
|
||||||
if numEntries < 16 || numEntries > 17 {
|
if numEntries < 16 || numEntries > 17 {
|
||||||
|
46
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
46
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@ -50,10 +50,13 @@ type (
|
|||||||
// UsedSockets shows the total number of parsed lines representing the
|
// UsedSockets shows the total number of parsed lines representing the
|
||||||
// number of used sockets.
|
// number of used sockets.
|
||||||
UsedSockets uint64
|
UsedSockets uint64
|
||||||
|
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||||
|
Drops *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// netIPSocketLine represents the fields parsed from a single line
|
// netIPSocketLine represents the fields parsed from a single line
|
||||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||||
|
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||||
netIPSocketLine struct {
|
netIPSocketLine struct {
|
||||||
Sl uint64
|
Sl uint64
|
||||||
@ -66,6 +69,7 @@ type (
|
|||||||
RxQueue uint64
|
RxQueue uint64
|
||||||
UID uint64
|
UID uint64
|
||||||
Inode uint64
|
Inode uint64
|
||||||
|
Drops *uint64
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var netIPSocket NetIPSocket
|
var netIPSocket NetIPSocket
|
||||||
|
isUDP := strings.Contains(file, "udp")
|
||||||
|
|
||||||
lr := io.LimitReader(f, readLimit)
|
lr := io.LimitReader(f, readLimit)
|
||||||
s := bufio.NewScanner(lr)
|
s := bufio.NewScanner(lr)
|
||||||
s.Scan() // skip first line with headers
|
s.Scan() // skip first line with headers
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
fields := strings.Fields(s.Text())
|
fields := strings.Fields(s.Text())
|
||||||
line, err := parseNetIPSocketLine(fields)
|
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var netIPSocketSummary NetIPSocketSummary
|
var netIPSocketSummary NetIPSocketSummary
|
||||||
|
var udpPacketDrops uint64
|
||||||
|
isUDP := strings.Contains(file, "udp")
|
||||||
|
|
||||||
lr := io.LimitReader(f, readLimit)
|
lr := io.LimitReader(f, readLimit)
|
||||||
s := bufio.NewScanner(lr)
|
s := bufio.NewScanner(lr)
|
||||||
s.Scan() // skip first line with headers
|
s.Scan() // skip first line with headers
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
fields := strings.Fields(s.Text())
|
fields := strings.Fields(s.Text())
|
||||||
line, err := parseNetIPSocketLine(fields)
|
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
netIPSocketSummary.TxQueueLength += line.TxQueue
|
netIPSocketSummary.TxQueueLength += line.TxQueue
|
||||||
netIPSocketSummary.RxQueueLength += line.RxQueue
|
netIPSocketSummary.RxQueueLength += line.RxQueue
|
||||||
netIPSocketSummary.UsedSockets++
|
netIPSocketSummary.UsedSockets++
|
||||||
|
if isUDP {
|
||||||
|
udpPacketDrops += *line.Drops
|
||||||
|
netIPSocketSummary.Drops = &udpPacketDrops
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
|
|||||||
var byteIP []byte
|
var byteIP []byte
|
||||||
byteIP, err := hex.DecodeString(hexIP)
|
byteIP, err := hex.DecodeString(hexIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
||||||
}
|
}
|
||||||
switch len(byteIP) {
|
switch len(byteIP) {
|
||||||
case 4:
|
case 4:
|
||||||
@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
|
|||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
|
return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
|
||||||
line := &netIPSocketLine{}
|
line := &netIPSocketLine{}
|
||||||
if len(fields) < 10 {
|
if len(fields) < 10 {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
||||||
}
|
}
|
||||||
// local_address
|
// local_address
|
||||||
l := strings.Split(fields[1], ":")
|
l := strings.Split(fields[1], ":")
|
||||||
@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// remote_address
|
// remote_address
|
||||||
@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// st
|
// st
|
||||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tx_queue and rx_queue
|
// tx_queue and rx_queue
|
||||||
@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
||||||
}
|
}
|
||||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uid
|
// uid
|
||||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// inode
|
// inode
|
||||||
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// drops
|
||||||
|
if isUDP {
|
||||||
|
drops, err := strconv.ParseUint(fields[12], 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
|
||||||
|
}
|
||||||
|
line.Drops = &drops
|
||||||
}
|
}
|
||||||
|
|
||||||
return line, nil
|
return line, nil
|
||||||
|
4
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
|
|||||||
|
|
||||||
stat, err := parseSockstat(bytes.NewReader(b))
|
stat, err := parseSockstat(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
|
return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stat, nil
|
return stat, nil
|
||||||
@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
|
|||||||
// The remaining fields are key/value pairs.
|
// The remaining fields are key/value pairs.
|
||||||
kvs, err := parseSockstatKVs(fields[1:])
|
kvs, err := parseSockstatKVs(fields[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The first field is the protocol. We must trim its colon suffix.
|
// The first field is the protocol. We must trim its colon suffix.
|
||||||
|
2
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
2
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
|
|||||||
|
|
||||||
entries, err := parseSoftnet(bytes.NewReader(b))
|
entries, err := parseSoftnet(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
|
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Copyright 2023 Prometheus Team
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TLSStat struct represents data in /proc/net/tls_stat.
|
||||||
|
// See https://docs.kernel.org/networking/tls.html#statistics
|
||||||
|
type TLSStat struct {
|
||||||
|
// number of TX sessions currently installed where host handles cryptography
|
||||||
|
TLSCurrTxSw int
|
||||||
|
// number of RX sessions currently installed where host handles cryptography
|
||||||
|
TLSCurrRxSw int
|
||||||
|
// number of TX sessions currently installed where NIC handles cryptography
|
||||||
|
TLSCurrTxDevice int
|
||||||
|
// number of RX sessions currently installed where NIC handles cryptography
|
||||||
|
TLSCurrRxDevice int
|
||||||
|
//number of TX sessions opened with host cryptography
|
||||||
|
TLSTxSw int
|
||||||
|
//number of RX sessions opened with host cryptography
|
||||||
|
TLSRxSw int
|
||||||
|
// number of TX sessions opened with NIC cryptography
|
||||||
|
TLSTxDevice int
|
||||||
|
// number of RX sessions opened with NIC cryptography
|
||||||
|
TLSRxDevice int
|
||||||
|
// record decryption failed (e.g. due to incorrect authentication tag)
|
||||||
|
TLSDecryptError int
|
||||||
|
// number of RX resyncs sent to NICs handling cryptography
|
||||||
|
TLSRxDeviceResync int
|
||||||
|
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
|
||||||
|
TLSDecryptRetry int
|
||||||
|
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
|
||||||
|
TLSRxNoPadViolation int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLSStat reads the tls_stat statistics.
|
||||||
|
func NewTLSStat() (TLSStat, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return TLSStat{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewTLSStat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTLSStat reads the tls_stat statistics.
|
||||||
|
func (fs FS) NewTLSStat() (TLSStat, error) {
|
||||||
|
file, err := os.Open(fs.proc.Path("net/tls_stat"))
|
||||||
|
if err != nil {
|
||||||
|
return TLSStat{}, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
tlsstat = TLSStat{}
|
||||||
|
s = bufio.NewScanner(file)
|
||||||
|
)
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fields[0]
|
||||||
|
value, err := strconv.Atoi(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return TLSStat{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
case "TlsCurrTxSw":
|
||||||
|
tlsstat.TLSCurrTxSw = value
|
||||||
|
case "TlsCurrRxSw":
|
||||||
|
tlsstat.TLSCurrRxSw = value
|
||||||
|
case "TlsCurrTxDevice":
|
||||||
|
tlsstat.TLSCurrTxDevice = value
|
||||||
|
case "TlsCurrRxDevice":
|
||||||
|
tlsstat.TLSCurrRxDevice = value
|
||||||
|
case "TlsTxSw":
|
||||||
|
tlsstat.TLSTxSw = value
|
||||||
|
case "TlsRxSw":
|
||||||
|
tlsstat.TLSRxSw = value
|
||||||
|
case "TlsTxDevice":
|
||||||
|
tlsstat.TLSTxDevice = value
|
||||||
|
case "TlsRxDevice":
|
||||||
|
tlsstat.TLSRxDevice = value
|
||||||
|
case "TlsDecryptError":
|
||||||
|
tlsstat.TLSDecryptError = value
|
||||||
|
case "TlsRxDeviceResync":
|
||||||
|
tlsstat.TLSRxDeviceResync = value
|
||||||
|
case "TlsDecryptRetry":
|
||||||
|
tlsstat.TLSDecryptRetry = value
|
||||||
|
case "TlsRxNoPadViolation":
|
||||||
|
tlsstat.TLSRxNoPadViolation = value
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return tlsstat, s.Err()
|
||||||
|
}
|
14
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
14
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
|||||||
line := s.Text()
|
line := s.Text()
|
||||||
item, err := nu.parseLine(line, hasInode, minFields)
|
item, err := nu.parseLine(line, hasInode, minFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nu.Rows = append(nu.Rows, item)
|
nu.Rows = append(nu.Rows, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &nu, nil
|
return &nu, nil
|
||||||
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
|||||||
|
|
||||||
users, err := u.parseUsers(fields[1])
|
users, err := u.parseUsers(fields[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
|
return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
flags, err := u.parseFlags(fields[3])
|
flags, err := u.parseFlags(fields[3])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
typ, err := u.parseType(fields[4])
|
typ, err := u.parseType(fields[4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state, err := u.parseState(fields[5])
|
state, err := u.parseState(fields[5])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var inode uint64
|
var inode uint64
|
||||||
if hasInode {
|
if hasInode {
|
||||||
inode, err = u.parseInode(fields[6])
|
inode, err = u.parseInode(fields[6])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
22
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
22
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
|
|||||||
|
|
||||||
m, err := parseWireless(bytes.NewReader(b))
|
m, err := parseWireless(bytes.NewReader(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
|
return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
|||||||
|
|
||||||
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dnwid, err := strconv.Atoi(stats[4])
|
dnwid, err := strconv.Atoi(stats[4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dcrypt, err := strconv.Atoi(stats[5])
|
dcrypt, err := strconv.Atoi(stats[5])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dfrag, err := strconv.Atoi(stats[6])
|
dfrag, err := strconv.Atoi(stats[6])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dretry, err := strconv.Atoi(stats[7])
|
dretry, err := strconv.Atoi(stats[7])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dmisc, err := strconv.Atoi(stats[8])
|
dmisc, err := strconv.Atoi(stats[8])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mbeacon, err := strconv.Atoi(stats[9])
|
mbeacon, err := strconv.Atoi(stats[9])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
w := &Wireless{
|
w := &Wireless{
|
||||||
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return interfaces, nil
|
return interfaces, nil
|
||||||
|
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
|
|
||||||
names, err := d.Readdirnames(-1)
|
names, err := d.Readdirnames(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p := Procs{}
|
p := Procs{}
|
||||||
@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
|
|||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wchan returns the wchan (wait channel) of a process.
|
// Wchan returns the wchan (wait channel) of a process.
|
||||||
@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
|
|||||||
for i, n := range names {
|
for i, n := range names {
|
||||||
fd, err := strconv.ParseInt(n, 10, 32)
|
fd, err := strconv.ParseInt(n, 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
||||||
}
|
}
|
||||||
fds[i] = uintptr(fd)
|
fds[i] = uintptr(fd)
|
||||||
}
|
}
|
||||||
@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
|
|
||||||
names, err := d.Readdirnames(-1)
|
names, err := d.Readdirnames(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return names, nil
|
return names, nil
|
||||||
|
2
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
|
|||||||
}
|
}
|
||||||
i, err := strconv.ParseUint(s, 10, 64)
|
i, err := strconv.ParseUint(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
|
return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
|
||||||
}
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
|||||||
|
|
||||||
names, err := d.Readdirnames(-1)
|
names, err := d.Readdirnames(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
|
return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ns := make(Namespaces, len(names))
|
ns := make(Namespaces, len(names))
|
||||||
@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
|||||||
typ := fields[0]
|
typ := fields[0]
|
||||||
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
|
return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ns[name] = Namespace{typ, uint32(inode)}
|
ns[name] = Namespace{typ, uint32(inode)}
|
||||||
|
2
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
@ -61,7 +61,7 @@ type PSIStats struct {
|
|||||||
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||||
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return parsePSIStats(bytes.NewReader(data))
|
return parsePSIStats(bytes.NewReader(data))
|
||||||
|
2
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
v := strings.TrimSpace(kv[1])
|
v := strings.TrimSpace(kv[1])
|
||||||
v = strings.TrimRight(v, " kB")
|
v = strings.TrimSuffix(v, " kB")
|
||||||
|
|
||||||
vKBytes, err := strconv.ParseUint(v, 10, 64)
|
vKBytes, err := strconv.ParseUint(v, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -110,6 +110,11 @@ type ProcStat struct {
|
|||||||
Policy uint
|
Policy uint
|
||||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||||
DelayAcctBlkIOTicks uint64
|
DelayAcctBlkIOTicks uint64
|
||||||
|
// Guest time of the process (time spent running a virtual CPU for a guest
|
||||||
|
// operating system), measured in clock ticks.
|
||||||
|
GuestTime int
|
||||||
|
// Guest time of the process's children, measured in clock ticks.
|
||||||
|
CGuestTime int
|
||||||
|
|
||||||
proc FS
|
proc FS
|
||||||
}
|
}
|
||||||
@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||||||
&s.RTPriority,
|
&s.RTPriority,
|
||||||
&s.Policy,
|
&s.Policy,
|
||||||
&s.DelayAcctBlkIOTicks,
|
&s.DelayAcctBlkIOTicks,
|
||||||
|
&s.GuestTime,
|
||||||
|
&s.CGuestTime,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
|
29
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
29
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -15,6 +15,7 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"math/bits"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -76,9 +77,9 @@ type ProcStatus struct {
|
|||||||
NonVoluntaryCtxtSwitches uint64
|
NonVoluntaryCtxtSwitches uint64
|
||||||
|
|
||||||
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
|
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
|
||||||
UIDs [4]string
|
UIDs [4]uint64
|
||||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||||
GIDs [4]string
|
GIDs [4]uint64
|
||||||
|
|
||||||
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
||||||
CpusAllowedList []uint64
|
CpusAllowedList []uint64
|
||||||
@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
|
|||||||
// convert kB to B
|
// convert kB to B
|
||||||
vBytes := vKBytes * 1024
|
vBytes := vKBytes * 1024
|
||||||
|
|
||||||
s.fillStatus(k, v, vKBytes, vBytes)
|
err = s.fillStatus(k, v, vKBytes, vBytes)
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
|
||||||
switch k {
|
switch k {
|
||||||
case "Tgid":
|
case "Tgid":
|
||||||
s.TGID = int(vUint)
|
s.TGID = int(vUint)
|
||||||
case "Name":
|
case "Name":
|
||||||
s.Name = vString
|
s.Name = vString
|
||||||
case "Uid":
|
case "Uid":
|
||||||
copy(s.UIDs[:], strings.Split(vString, "\t"))
|
var err error
|
||||||
|
for i, v := range strings.Split(vString, "\t") {
|
||||||
|
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
case "Gid":
|
case "Gid":
|
||||||
copy(s.GIDs[:], strings.Split(vString, "\t"))
|
var err error
|
||||||
|
for i, v := range strings.Split(vString, "\t") {
|
||||||
|
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
case "NSpid":
|
case "NSpid":
|
||||||
s.NSpids = calcNSPidsList(vString)
|
s.NSpids = calcNSPidsList(vString)
|
||||||
case "VmPeak":
|
case "VmPeak":
|
||||||
@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
|||||||
s.CpusAllowedList = calcCpusAllowedList(vString)
|
s.CpusAllowedList = calcCpusAllowedList(vString)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalCtxtSwitches returns the total context switch.
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
|
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
|
|||||||
vp := util.NewValueParser(f)
|
vp := util.NewValueParser(f)
|
||||||
values[i] = vp.Int()
|
values[i] = vp.Int()
|
||||||
if err := vp.Err(); err != nil {
|
if err := vp.Err(); err != nil {
|
||||||
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return values, nil
|
return values, nil
|
||||||
|
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.Hi = make([]uint64, len(perCPU))
|
softirqs.Hi = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "TIMER:":
|
case parts[0] == "TIMER:":
|
||||||
@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.Timer = make([]uint64, len(perCPU))
|
softirqs.Timer = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "NET_TX:":
|
case parts[0] == "NET_TX:":
|
||||||
@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "NET_RX:":
|
case parts[0] == "NET_RX:":
|
||||||
@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "BLOCK:":
|
case parts[0] == "BLOCK:":
|
||||||
@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.Block = make([]uint64, len(perCPU))
|
softirqs.Block = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "IRQ_POLL:":
|
case parts[0] == "IRQ_POLL:":
|
||||||
@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "TASKLET:":
|
case parts[0] == "TASKLET:":
|
||||||
@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "SCHED:":
|
case parts[0] == "SCHED:":
|
||||||
@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.Sched = make([]uint64, len(perCPU))
|
softirqs.Sched = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "HRTIMER:":
|
case parts[0] == "HRTIMER:":
|
||||||
@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "RCU:":
|
case parts[0] == "RCU:":
|
||||||
@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||||||
softirqs.RCU = make([]uint64, len(perCPU))
|
softirqs.RCU = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return softirqs, scanner.Err()
|
return softirqs, scanner.Err()
|
||||||
|
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
|||||||
&cpuStat.Guest, &cpuStat.GuestNice)
|
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||||
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
|
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
|
||||||
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
|||||||
|
|
||||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cpuStat, cpuID, nil
|
return cpuStat, cpuID, nil
|
||||||
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
|||||||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return softIRQStat, total, nil
|
return softIRQStat, total, nil
|
||||||
@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
|||||||
switch {
|
switch {
|
||||||
case parts[0] == "btime":
|
case parts[0] == "btime":
|
||||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
case parts[0] == "intr":
|
case parts[0] == "intr":
|
||||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
numberedIRQs := parts[2:]
|
numberedIRQs := parts[2:]
|
||||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||||
for i, count := range numberedIRQs {
|
for i, count := range numberedIRQs {
|
||||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "ctxt":
|
case parts[0] == "ctxt":
|
||||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
case parts[0] == "processes":
|
case parts[0] == "processes":
|
||||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
case parts[0] == "procs_running":
|
case parts[0] == "procs_running":
|
||||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
case parts[0] == "procs_blocked":
|
case parts[0] == "procs_blocked":
|
||||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
||||||
}
|
}
|
||||||
case parts[0] == "softirq":
|
case parts[0] == "softirq":
|
||||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||||
@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stat, nil
|
return stat, nil
|
||||||
|
6
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
6
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
|
|||||||
|
|
||||||
swap.Size, err = strconv.Atoi(swapFields[2])
|
swap.Size, err = strconv.Atoi(swapFields[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
||||||
}
|
}
|
||||||
swap.Used, err = strconv.Atoi(swapFields[3])
|
swap.Used, err = strconv.Atoi(swapFields[3])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
||||||
}
|
}
|
||||||
swap.Priority, err = strconv.Atoi(swapFields[4])
|
swap.Priority, err = strconv.Atoi(swapFields[4])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return swap, nil
|
return swap, nil
|
||||||
|
2
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
2
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
|||||||
|
|
||||||
names, err := d.Readdirnames(-1)
|
names, err := d.Readdirnames(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
|
return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t := Procs{}
|
t := Procs{}
|
||||||
|
4
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
4
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
|||||||
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||||
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
|
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
||||||
}
|
}
|
||||||
zoneinfo, err := parseZoneinfo(data)
|
zoneinfo, err := parseZoneinfo(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
||||||
}
|
}
|
||||||
return zoneinfo, nil
|
return zoneinfo, nil
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.11.x
|
||||||
|
|
||||||
|
env:
|
||||||
|
- GO111MODULE=on
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -short -coverprofile=coverage.txt -covermode=count ./...
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
# Float16 (Binary16) in Go/Golang
|
||||||
|
[](https://travis-ci.org/x448/float16)
|
||||||
|
[](https://codecov.io/gh/x448/float16)
|
||||||
|
[](https://goreportcard.com/report/github.com/x448/float16)
|
||||||
|
[](https://github.com/x448/float16/releases)
|
||||||
|
[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
|
||||||
|
|
||||||
|
`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
|
||||||
|
|
||||||
|
IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
|
||||||
|
|
||||||
|
All possible 4+ billion floating-point conversions with this library are verified to be correct.
|
||||||
|
|
||||||
|
Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
Current features include:
|
||||||
|
|
||||||
|
* float16 to float32 conversions use lossless conversion.
|
||||||
|
* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
|
||||||
|
* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
|
||||||
|
* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
|
||||||
|
* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
|
||||||
|
* all functions in this library use zero allocs except String().
|
||||||
|
|
||||||
|
## Status
|
||||||
|
This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
|
||||||
|
|
||||||
|
Current status:
|
||||||
|
|
||||||
|
* core API is done and breaking API changes are unlikely.
|
||||||
|
* 100% of unit tests pass:
|
||||||
|
* short mode (`go test -short`) tests around 65765 conversions in 0.005s.
|
||||||
|
* normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
|
||||||
|
* 100% code coverage with both short mode and normal mode.
|
||||||
|
* tested on amd64 but it should work on all little-endian platforms supported by Go.
|
||||||
|
|
||||||
|
Roadmap:
|
||||||
|
|
||||||
|
* add functions for fast batch conversions leveraging SIMD when supported by hardware.
|
||||||
|
* speed up unit test when verifying all possible 4+ billion conversions.
|
||||||
|
* test on additional platforms.
|
||||||
|
|
||||||
|
## Float16 to Float32 Conversion
|
||||||
|
Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
|
||||||
|
|
||||||
|
Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
|
||||||
|
|
||||||
|
## Float32 to Float16 Conversion
|
||||||
|
Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
|
||||||
|
|
||||||
|
Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
|
||||||
|
|
||||||
|
Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
Install with `go get github.com/x448/float16`.
|
||||||
|
```
|
||||||
|
// Convert float32 to float16
|
||||||
|
pi := float32(math.Pi)
|
||||||
|
pi16 := float16.Fromfloat32(pi)
|
||||||
|
|
||||||
|
// Convert float16 to float32
|
||||||
|
pi32 := pi16.Float32()
|
||||||
|
|
||||||
|
// PrecisionFromfloat32() is faster than the overhead of calling a function.
|
||||||
|
// This example only converts if there's no data loss and input is not a subnormal.
|
||||||
|
if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
|
||||||
|
pi16 := float16.Fromfloat32(pi)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Float16 Type and API
|
||||||
|
Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
|
||||||
|
```
|
||||||
|
package float16 // import "github.com/x448/float16"
|
||||||
|
|
||||||
|
// Exported types and consts
|
||||||
|
type Float16 uint16
|
||||||
|
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||||
|
|
||||||
|
// Exported functions
|
||||||
|
Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
|
||||||
|
with identical results to AMD and Intel F16C hardware. NaN inputs
|
||||||
|
are converted with quiet bit always set on, to be like F16C.
|
||||||
|
|
||||||
|
FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
|
||||||
|
// The "ps" suffix means "preserve signaling".
|
||||||
|
// Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
|
||||||
|
|
||||||
|
Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
|
||||||
|
NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
|
||||||
|
Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
|
||||||
|
|
||||||
|
PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
|
||||||
|
// (inline and < 1 ns/op)
|
||||||
|
// Exported methods
|
||||||
|
(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
|
||||||
|
(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
|
||||||
|
(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
|
||||||
|
(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
|
||||||
|
(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
|
||||||
|
(f Float16) IsFinite() bool // true if f is not infinite or NaN
|
||||||
|
(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
|
||||||
|
(f Float16) Signbit() bool // true if f is negative or negative zero
|
||||||
|
(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
|
||||||
|
```
|
||||||
|
See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
|
||||||
|
|
||||||
|
```
|
||||||
|
All functions have zero allocations except float16.String().
|
||||||
|
|
||||||
|
FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
|
||||||
|
ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
|
||||||
|
Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
|
||||||
|
|
||||||
|
PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
|
||||||
|
* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
|
||||||
|
|
||||||
|
## Special Thanks
|
||||||
|
Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
|
||||||
|
|
||||||
|
## License
|
||||||
|
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
|
||||||
|
Licensed under [MIT License](LICENSE)
|
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||||
|
//
|
||||||
|
// Special thanks to Kathryn Long for her Rust implementation
|
||||||
|
// of float16 at github.com/starkat99/half-rs (MIT license)
|
||||||
|
|
||||||
|
package float16
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
|
||||||
|
type Float16 uint16
|
||||||
|
|
||||||
|
// Precision indicates whether the conversion to Float16 is
|
||||||
|
// exact, subnormal without dropped bits, inexact, underflow, or overflow.
|
||||||
|
type Precision int
|
||||||
|
|
||||||
|
const (
|
||||||
|
|
||||||
|
// PrecisionExact is for non-subnormals that don't drop bits during conversion.
|
||||||
|
// All of these can round-trip. Should always convert to float16.
|
||||||
|
PrecisionExact Precision = iota
|
||||||
|
|
||||||
|
// PrecisionUnknown is for subnormals that don't drop bits during conversion but
|
||||||
|
// not all of these can round-trip so precision is unknown without more effort.
|
||||||
|
// Only 2046 of these can round-trip and the rest cannot round-trip.
|
||||||
|
PrecisionUnknown
|
||||||
|
|
||||||
|
// PrecisionInexact is for dropped significand bits and cannot round-trip.
|
||||||
|
// Some of these are subnormals. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionInexact
|
||||||
|
|
||||||
|
// PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionUnderflow
|
||||||
|
|
||||||
|
// PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
|
||||||
|
PrecisionOverflow
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrecisionFromfloat32 returns Precision without performing
|
||||||
|
// the conversion. Conversions from both Infinity and NaN
|
||||||
|
// values will always report PrecisionExact even if NaN payload
|
||||||
|
// or NaN-Quiet-Bit is lost. This function is kept simple to
|
||||||
|
// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
|
||||||
|
func PrecisionFromfloat32(f32 float32) Precision {
|
||||||
|
u32 := math.Float32bits(f32)
|
||||||
|
|
||||||
|
if u32 == 0 || u32 == 0x80000000 {
|
||||||
|
// +- zero will always be exact conversion
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
const COEFMASK uint32 = 0x7fffff // 23 least significant bits
|
||||||
|
const EXPSHIFT uint32 = 23
|
||||||
|
const EXPBIAS uint32 = 127
|
||||||
|
const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
|
||||||
|
const DROPMASK uint32 = COEFMASK >> 10
|
||||||
|
|
||||||
|
exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
|
||||||
|
coef := u32 & COEFMASK
|
||||||
|
|
||||||
|
if exp == 128 {
|
||||||
|
// +- infinity or NaN
|
||||||
|
// apps may want to do extra checks for NaN separately
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
|
||||||
|
// "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
|
||||||
|
if exp < -24 {
|
||||||
|
return PrecisionUnderflow
|
||||||
|
}
|
||||||
|
if exp > 15 {
|
||||||
|
return PrecisionOverflow
|
||||||
|
}
|
||||||
|
if (coef & DROPMASK) != uint32(0) {
|
||||||
|
// these include subnormals and non-subnormals that dropped bits
|
||||||
|
return PrecisionInexact
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp < -14 {
|
||||||
|
// Subnormals. Caller may want to test these further.
|
||||||
|
// There are 2046 subnormals that can successfully round-trip f32->f16->f32
|
||||||
|
// and 20 of those 2046 have 32-bit input coef == 0.
|
||||||
|
// RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
|
||||||
|
// so some protocols and libraries will choose to handle subnormals differently
|
||||||
|
// when deciding to encode them to CBOR float32 vs float16.
|
||||||
|
return PrecisionUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
return PrecisionExact
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frombits returns the float16 number corresponding to the IEEE 754 binary16
|
||||||
|
// representation u16, with the sign bit of u16 and the result in the same bit
|
||||||
|
// position. Frombits(Bits(x)) == x.
|
||||||
|
func Frombits(u16 uint16) Float16 {
|
||||||
|
return Float16(u16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
|
||||||
|
// IEEE default rounding (nearest int, with ties to even).
|
||||||
|
func Fromfloat32(f32 float32) Float16 {
|
||||||
|
return Float16(f32bitsToF16bits(math.Float32bits(f32)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidNaNValue indicates a NaN was not received.
|
||||||
|
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||||
|
|
||||||
|
type float16Error string
|
||||||
|
|
||||||
|
func (e float16Error) Error() string { return string(e) }
|
||||||
|
|
||||||
|
// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
|
||||||
|
// signaling and payload. Unlike Fromfloat32(), which can only return
|
||||||
|
// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
|
||||||
|
// If the result is infinity (sNaN with empty payload), then the
|
||||||
|
// lowest bit of payload is set to make the result a NaN.
|
||||||
|
// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
|
||||||
|
// This function was kept simple to be able to inline.
|
||||||
|
func FromNaN32ps(nan float32) (Float16, error) {
|
||||||
|
const SNAN = Float16(uint16(0x7c01)) // signalling NaN
|
||||||
|
|
||||||
|
u32 := math.Float32bits(nan)
|
||||||
|
sign := u32 & 0x80000000
|
||||||
|
exp := u32 & 0x7f800000
|
||||||
|
coef := u32 & 0x007fffff
|
||||||
|
|
||||||
|
if (exp != 0x7f800000) || (coef == 0) {
|
||||||
|
return SNAN, ErrInvalidNaNValue
|
||||||
|
}
|
||||||
|
|
||||||
|
u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
|
||||||
|
|
||||||
|
if (u16 & 0x03ff) == 0 {
|
||||||
|
// result became infinity, make it NaN by setting lowest bit in payload
|
||||||
|
u16 = u16 | 0x0001
|
||||||
|
}
|
||||||
|
|
||||||
|
return Float16(u16), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
|
||||||
|
// Returned NaN value 0x7e01 has all exponent bits = 1 with the
|
||||||
|
// first and last bits = 1 in the significand. This is consistent
|
||||||
|
// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
|
||||||
|
func NaN() Float16 {
|
||||||
|
return Float16(0x7e01)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inf returns a Float16 with an infinity value with the specified sign.
|
||||||
|
// A sign >= returns positive infinity.
|
||||||
|
// A sign < 0 returns negative infinity.
|
||||||
|
func Inf(sign int) Float16 {
|
||||||
|
if sign >= 0 {
|
||||||
|
return Float16(0x7c00)
|
||||||
|
}
|
||||||
|
return Float16(0x8000 | 0x7c00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 returns a float32 converted from f (Float16).
|
||||||
|
// This is a lossless conversion.
|
||||||
|
func (f Float16) Float32() float32 {
|
||||||
|
u32 := f16bitsToF32bits(uint16(f))
|
||||||
|
return math.Float32frombits(u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
|
||||||
|
// of f and the result in the same bit position. Bits(Frombits(x)) == x.
|
||||||
|
func (f Float16) Bits() uint16 {
|
||||||
|
return uint16(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
|
||||||
|
func (f Float16) IsNaN() bool {
|
||||||
|
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
|
||||||
|
// “not-a-number” value.
|
||||||
|
func (f Float16) IsQuietNaN() bool {
|
||||||
|
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInf reports whether f is an infinity (inf).
|
||||||
|
// A sign > 0 reports whether f is positive inf.
|
||||||
|
// A sign < 0 reports whether f is negative inf.
|
||||||
|
// A sign == 0 reports whether f is either inf.
|
||||||
|
func (f Float16) IsInf(sign int) bool {
|
||||||
|
return ((f == 0x7c00) && sign >= 0) ||
|
||||||
|
(f == 0xfc00 && sign <= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFinite returns true if f is neither infinite nor NaN.
|
||||||
|
func (f Float16) IsFinite() bool {
|
||||||
|
return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
|
||||||
|
func (f Float16) IsNormal() bool {
|
||||||
|
exp := uint16(f) & uint16(0x7c00)
|
||||||
|
return (exp != uint16(0x7c00)) && (exp != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signbit reports whether f is negative or negative zero.
|
||||||
|
func (f Float16) Signbit() bool {
|
||||||
|
return (uint16(f) & uint16(0x8000)) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// String satisfies the fmt.Stringer interface.
|
||||||
|
func (f Float16) String() string {
|
||||||
|
return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
|
||||||
|
func f16bitsToF32bits(in uint16) uint32 {
|
||||||
|
// All 65536 conversions with this were confirmed to be correct
|
||||||
|
// by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||||
|
|
||||||
|
sign := uint32(in&0x8000) << 16 // sign for 32-bit
|
||||||
|
exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
|
||||||
|
coef := uint32(in&0x03ff) << 13 // significand for 32-bit
|
||||||
|
|
||||||
|
if exp == 0x1f {
|
||||||
|
if coef == 0 {
|
||||||
|
// infinity
|
||||||
|
return sign | 0x7f800000 | coef
|
||||||
|
}
|
||||||
|
// NaN
|
||||||
|
return sign | 0x7fc00000 | coef
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp == 0 {
|
||||||
|
if coef == 0 {
|
||||||
|
// zero
|
||||||
|
return sign
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize subnormal numbers
|
||||||
|
exp++
|
||||||
|
for coef&0x7f800000 == 0 {
|
||||||
|
coef <<= 1
|
||||||
|
exp--
|
||||||
|
}
|
||||||
|
coef &= 0x007fffff
|
||||||
|
}
|
||||||
|
|
||||||
|
return sign | ((exp + (0x7f - 0xf)) << 23) | coef
|
||||||
|
}
|
||||||
|
|
||||||
|
// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
|
||||||
|
// Conversion rounds to nearest integer with ties to even.
|
||||||
|
func f32bitsToF16bits(u32 uint32) uint16 {
|
||||||
|
// Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||||
|
// All 4294967296 conversions with this were confirmed to be correct by x448.
|
||||||
|
// Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
|
||||||
|
|
||||||
|
sign := u32 & 0x80000000
|
||||||
|
exp := u32 & 0x7f800000
|
||||||
|
coef := u32 & 0x007fffff
|
||||||
|
|
||||||
|
if exp == 0x7f800000 {
|
||||||
|
// NaN or Infinity
|
||||||
|
nanBit := uint32(0)
|
||||||
|
if coef != 0 {
|
||||||
|
nanBit = uint32(0x0200)
|
||||||
|
}
|
||||||
|
return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
|
||||||
|
}
|
||||||
|
|
||||||
|
halfSign := sign >> 16
|
||||||
|
|
||||||
|
unbiasedExp := int32(exp>>23) - 127
|
||||||
|
halfExp := unbiasedExp + 15
|
||||||
|
|
||||||
|
if halfExp >= 0x1f {
|
||||||
|
return uint16(halfSign | uint32(0x7c00))
|
||||||
|
}
|
||||||
|
|
||||||
|
if halfExp <= 0 {
|
||||||
|
if 14-halfExp > 24 {
|
||||||
|
return uint16(halfSign)
|
||||||
|
}
|
||||||
|
coef := coef | uint32(0x00800000)
|
||||||
|
halfCoef := coef >> uint32(14-halfExp)
|
||||||
|
roundBit := uint32(1) << uint32(13-halfExp)
|
||||||
|
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||||
|
halfCoef++
|
||||||
|
}
|
||||||
|
return uint16(halfSign | halfCoef)
|
||||||
|
}
|
||||||
|
|
||||||
|
uHalfExp := uint32(halfExp) << 10
|
||||||
|
halfCoef := coef >> 13
|
||||||
|
roundBit := uint32(0x00001000)
|
||||||
|
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||||
|
return uint16((halfSign | uHalfExp | halfCoef) + 1)
|
||||||
|
}
|
||||||
|
return uint16(halfSign | uHalfExp | halfCoef)
|
||||||
|
}
|
2
vendor/k8s.io/api/core/v1/doc.go
generated
vendored
2
vendor/k8s.io/api/core/v1/doc.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
// +k8s:openapi-gen=true
|
// +k8s:openapi-gen=true
|
||||||
// +k8s:deepcopy-gen=package
|
// +k8s:deepcopy-gen=package
|
||||||
// +k8s:protobuf-gen=package
|
// +k8s:protobuf-gen=package
|
||||||
|
// +k8s:prerelease-lifecycle-gen=true
|
||||||
|
// +groupName=
|
||||||
|
|
||||||
// Package v1 is the v1 version of the core API.
|
// Package v1 is the v1 version of the core API.
|
||||||
package v1 // import "k8s.io/api/core/v1"
|
package v1 // import "k8s.io/api/core/v1"
|
||||||
|
4170
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
4170
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
452
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
452
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
File diff suppressed because it is too large
Load Diff
353
vendor/k8s.io/api/core/v1/types.go
generated
vendored
353
vendor/k8s.io/api/core/v1/types.go
generated
vendored
@ -181,6 +181,23 @@ type VolumeSource struct {
|
|||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
||||||
|
// image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
|
||||||
|
// The volume is resolved at pod startup depending on which PullPolicy value is provided:
|
||||||
|
//
|
||||||
|
// - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||||
|
// - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||||
|
// - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||||
|
//
|
||||||
|
// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
|
||||||
|
// A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
|
||||||
|
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||||
|
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||||
|
// The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||||
|
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||||
|
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||||
|
// +featureGate=ImageVolume
|
||||||
|
// +optional
|
||||||
|
Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||||
@ -295,6 +312,7 @@ const (
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
|
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
|
||||||
// It is analogous to a node.
|
// It is analogous to a node.
|
||||||
@ -371,7 +389,7 @@ type PersistentVolumeSpec struct {
|
|||||||
// after a volume has been updated successfully to a new class.
|
// after a volume has been updated successfully to a new class.
|
||||||
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
||||||
// PersistentVolumeClaims during the binding process.
|
// PersistentVolumeClaims during the binding process.
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
|
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
|
||||||
@ -425,13 +443,12 @@ type PersistentVolumeStatus struct {
|
|||||||
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
|
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
|
||||||
// lastPhaseTransitionTime is the time the phase transitioned from one to another
|
// lastPhaseTransitionTime is the time the phase transitioned from one to another
|
||||||
// and automatically resets to current time everytime a volume phase transitions.
|
// and automatically resets to current time everytime a volume phase transitions.
|
||||||
// This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).
|
|
||||||
// +featureGate=PersistentVolumeLastPhaseTransitionTime
|
|
||||||
// +optional
|
// +optional
|
||||||
LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
|
LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeList is a list of PersistentVolume items.
|
// PersistentVolumeList is a list of PersistentVolume items.
|
||||||
type PersistentVolumeList struct {
|
type PersistentVolumeList struct {
|
||||||
@ -447,6 +464,7 @@ type PersistentVolumeList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
|
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
|
||||||
type PersistentVolumeClaim struct {
|
type PersistentVolumeClaim struct {
|
||||||
@ -469,6 +487,7 @@ type PersistentVolumeClaim struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
|
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
|
||||||
type PersistentVolumeClaimList struct {
|
type PersistentVolumeClaimList struct {
|
||||||
@ -557,7 +576,7 @@ type PersistentVolumeClaimSpec struct {
|
|||||||
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||||
// exists.
|
// exists.
|
||||||
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
|
||||||
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
|
// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
||||||
@ -581,15 +600,29 @@ type TypedObjectReference struct {
|
|||||||
Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
|
Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
|
// PersistentVolumeClaimConditionType defines the condition of PV claim.
|
||||||
|
// Valid values are:
|
||||||
|
// - "Resizing", "FileSystemResizePending"
|
||||||
|
//
|
||||||
|
// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
|
||||||
|
// - "ControllerResizeError", "NodeResizeError"
|
||||||
|
//
|
||||||
|
// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
|
||||||
|
// - "ModifyVolumeError", "ModifyingVolume"
|
||||||
type PersistentVolumeClaimConditionType string
|
type PersistentVolumeClaimConditionType string
|
||||||
|
|
||||||
|
// These are valid conditions of PVC
|
||||||
const (
|
const (
|
||||||
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
|
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
|
||||||
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
|
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
|
||||||
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
|
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
|
||||||
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
||||||
|
|
||||||
|
// PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller
|
||||||
|
PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError"
|
||||||
|
// PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node.
|
||||||
|
PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError"
|
||||||
|
|
||||||
// Applying the target VolumeAttributesClass encountered an error
|
// Applying the target VolumeAttributesClass encountered an error
|
||||||
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
|
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
|
||||||
// Volume is being modified
|
// Volume is being modified
|
||||||
@ -606,18 +639,19 @@ const (
|
|||||||
// State set when resize controller starts resizing the volume in control-plane.
|
// State set when resize controller starts resizing the volume in control-plane.
|
||||||
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
|
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
|
||||||
|
|
||||||
// State set when resize has failed in resize controller with a terminal error.
|
// State set when resize has failed in resize controller with a terminal unrecoverable error.
|
||||||
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
|
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
|
||||||
// unmodified, so as resize controller can resume the volume expansion.
|
// unmodified, so as resize controller can resume the volume expansion.
|
||||||
PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
|
PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible"
|
||||||
|
|
||||||
// State set when resize controller has finished resizing the volume but further resizing of volume
|
// State set when resize controller has finished resizing the volume but further resizing of volume
|
||||||
// is needed on the node.
|
// is needed on the node.
|
||||||
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
|
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
|
||||||
// State set when kubelet starts resizing the volume.
|
// State set when kubelet starts resizing the volume.
|
||||||
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
|
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
|
||||||
// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
|
// State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors
|
||||||
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
|
// shouldn't set this status
|
||||||
|
PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible"
|
||||||
)
|
)
|
||||||
|
|
||||||
// +enum
|
// +enum
|
||||||
@ -763,13 +797,13 @@ type PersistentVolumeClaimStatus struct {
|
|||||||
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
|
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
|
||||||
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||||
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
|
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
|
||||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||||
// When this is unset, there is no ModifyVolume operation being attempted.
|
// When this is unset, there is no ModifyVolume operation being attempted.
|
||||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
|
||||||
// +featureGate=VolumeAttributesClass
|
// +featureGate=VolumeAttributesClass
|
||||||
// +optional
|
// +optional
|
||||||
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
|
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
|
||||||
@ -943,16 +977,19 @@ type RBDVolumeSource struct {
|
|||||||
// Default is rbd.
|
// Default is rbd.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="rbd"
|
||||||
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
||||||
// user is the rados user name.
|
// user is the rados user name.
|
||||||
// Default is admin.
|
// Default is admin.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="admin"
|
||||||
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
||||||
// keyring is the path to key ring for RBDUser.
|
// keyring is the path to key ring for RBDUser.
|
||||||
// Default is /etc/ceph/keyring.
|
// Default is /etc/ceph/keyring.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="/etc/ceph/keyring"
|
||||||
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
||||||
// secretRef is name of the authentication secret for RBDUser. If provided
|
// secretRef is name of the authentication secret for RBDUser. If provided
|
||||||
// overrides keyring.
|
// overrides keyring.
|
||||||
@ -988,16 +1025,19 @@ type RBDPersistentVolumeSource struct {
|
|||||||
// Default is rbd.
|
// Default is rbd.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="rbd"
|
||||||
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
|
||||||
// user is the rados user name.
|
// user is the rados user name.
|
||||||
// Default is admin.
|
// Default is admin.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="admin"
|
||||||
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
|
||||||
// keyring is the path to key ring for RBDUser.
|
// keyring is the path to key ring for RBDUser.
|
||||||
// Default is /etc/ceph/keyring.
|
// Default is /etc/ceph/keyring.
|
||||||
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="/etc/ceph/keyring"
|
||||||
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
|
||||||
// secretRef is name of the authentication secret for RBDUser. If provided
|
// secretRef is name of the authentication secret for RBDUser. If provided
|
||||||
// overrides keyring.
|
// overrides keyring.
|
||||||
@ -1426,6 +1466,7 @@ type ISCSIVolumeSource struct {
|
|||||||
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
||||||
// Defaults to 'default' (tcp).
|
// Defaults to 'default' (tcp).
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="default"
|
||||||
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
||||||
// fsType is the filesystem type of the volume that you want to mount.
|
// fsType is the filesystem type of the volume that you want to mount.
|
||||||
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
||||||
@ -1473,6 +1514,7 @@ type ISCSIPersistentVolumeSource struct {
|
|||||||
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
// iscsiInterface is the interface Name that uses an iSCSI transport.
|
||||||
// Defaults to 'default' (tcp).
|
// Defaults to 'default' (tcp).
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="default"
|
||||||
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
|
||||||
// fsType is the filesystem type of the volume that you want to mount.
|
// fsType is the filesystem type of the volume that you want to mount.
|
||||||
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
// Tip: Ensure that the filesystem type is supported by the host operating system.
|
||||||
@ -1613,17 +1655,21 @@ type AzureDiskVolumeSource struct {
|
|||||||
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
|
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
|
||||||
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
|
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default=ref(AzureDataDiskCachingReadWrite)
|
||||||
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
|
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
|
||||||
// fsType is Filesystem type to mount.
|
// fsType is Filesystem type to mount.
|
||||||
// Must be a filesystem type supported by the host operating system.
|
// Must be a filesystem type supported by the host operating system.
|
||||||
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ext4"
|
||||||
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
|
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
|
||||||
// readOnly Defaults to false (read/write). ReadOnly here will force
|
// readOnly Defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default=false
|
||||||
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
|
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
|
||||||
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
|
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
|
||||||
|
// +default=ref(AzureSharedBlobDisk)
|
||||||
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
|
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1662,6 +1708,7 @@ type ScaleIOVolumeSource struct {
|
|||||||
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
||||||
// Default is ThinProvisioned.
|
// Default is ThinProvisioned.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ThinProvisioned"
|
||||||
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
||||||
// volumeName is the name of a volume already created in the ScaleIO system
|
// volumeName is the name of a volume already created in the ScaleIO system
|
||||||
// that is associated with this volume source.
|
// that is associated with this volume source.
|
||||||
@ -1671,6 +1718,7 @@ type ScaleIOVolumeSource struct {
|
|||||||
// Ex. "ext4", "xfs", "ntfs".
|
// Ex. "ext4", "xfs", "ntfs".
|
||||||
// Default is "xfs".
|
// Default is "xfs".
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="xfs"
|
||||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
||||||
// readOnly Defaults to false (read/write). ReadOnly here will force
|
// readOnly Defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
@ -1699,6 +1747,7 @@ type ScaleIOPersistentVolumeSource struct {
|
|||||||
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
|
||||||
// Default is ThinProvisioned.
|
// Default is ThinProvisioned.
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="ThinProvisioned"
|
||||||
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
|
||||||
// volumeName is the name of a volume already created in the ScaleIO system
|
// volumeName is the name of a volume already created in the ScaleIO system
|
||||||
// that is associated with this volume source.
|
// that is associated with this volume source.
|
||||||
@ -1708,6 +1757,7 @@ type ScaleIOPersistentVolumeSource struct {
|
|||||||
// Ex. "ext4", "xfs", "ntfs".
|
// Ex. "ext4", "xfs", "ntfs".
|
||||||
// Default is "xfs"
|
// Default is "xfs"
|
||||||
// +optional
|
// +optional
|
||||||
|
// +default="xfs"
|
||||||
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
|
||||||
// readOnly defaults to false (read/write). ReadOnly here will force
|
// readOnly defaults to false (read/write). ReadOnly here will force
|
||||||
// the ReadOnly setting in VolumeMounts.
|
// the ReadOnly setting in VolumeMounts.
|
||||||
@ -1891,7 +1941,8 @@ type ClusterTrustBundleProjection struct {
|
|||||||
|
|
||||||
// Represents a projected volume source
|
// Represents a projected volume source
|
||||||
type ProjectedVolumeSource struct {
|
type ProjectedVolumeSource struct {
|
||||||
// sources is the list of volume projections
|
// sources is the list of volume projections. Each entry in this list
|
||||||
|
// handles one source.
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
|
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
|
||||||
@ -1905,10 +1956,9 @@ type ProjectedVolumeSource struct {
|
|||||||
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
|
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Projection that may be projected along with other supported volume types
|
// Projection that may be projected along with other supported volume types.
|
||||||
|
// Exactly one of these fields must be set.
|
||||||
type VolumeProjection struct {
|
type VolumeProjection struct {
|
||||||
// all types below are the supported types for projection into the same volume
|
|
||||||
|
|
||||||
// secret information about the secret data to project
|
// secret information about the secret data to project
|
||||||
// +optional
|
// +optional
|
||||||
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
|
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
|
||||||
@ -2631,6 +2681,13 @@ type ResourceClaim struct {
|
|||||||
// the Pod where this field is used. It makes that resource available
|
// the Pod where this field is used. It makes that resource available
|
||||||
// inside a container.
|
// inside a container.
|
||||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||||
|
|
||||||
|
// Request is the name chosen for a request in the referenced claim.
|
||||||
|
// If empty, everything from the claim is made available, otherwise
|
||||||
|
// only the result of this request.
|
||||||
|
//
|
||||||
|
// +optional
|
||||||
|
Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -3030,6 +3087,93 @@ type ContainerStatus struct {
|
|||||||
// +listMapKey=mountPath
|
// +listMapKey=mountPath
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
|
VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
|
||||||
|
// User represents user identity information initially attached to the first process of the container
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"`
|
||||||
|
// AllocatedResourcesStatus represents the status of various resources
|
||||||
|
// allocated for this Pod.
|
||||||
|
// +featureGate=ResourceHealthStatus
|
||||||
|
// +optional
|
||||||
|
// +patchMergeKey=name
|
||||||
|
// +patchStrategy=merge
|
||||||
|
// +listType=map
|
||||||
|
// +listMapKey=name
|
||||||
|
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceStatus struct {
|
||||||
|
// Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
|
||||||
|
// +required
|
||||||
|
Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||||
|
// List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
|
||||||
|
// At a minimum, ResourceID must uniquely identify the Resource
|
||||||
|
// allocated to the Pod on the Node for the lifetime of a Pod.
|
||||||
|
// See ResourceID type for it's definition.
|
||||||
|
// +listType=map
|
||||||
|
// +listMapKey=resourceID
|
||||||
|
Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResourceHealthStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy"
|
||||||
|
ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy"
|
||||||
|
ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceID is calculated based on the source of this resource health information.
|
||||||
|
// For DevicePlugin:
|
||||||
|
//
|
||||||
|
// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
|
||||||
|
//
|
||||||
|
// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
|
||||||
|
// For DRA:
|
||||||
|
//
|
||||||
|
// dra:<driver name>/<pool name>/<device name>: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
|
||||||
|
type ResourceID string
|
||||||
|
|
||||||
|
// ResourceHealth represents the health of a resource. It has the latest device health information.
|
||||||
|
// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
|
||||||
|
type ResourceHealth struct {
|
||||||
|
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
|
||||||
|
ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
|
||||||
|
// Health of the resource.
|
||||||
|
// can be one of:
|
||||||
|
// - Healthy: operates as normal
|
||||||
|
// - Unhealthy: reported unhealthy. We consider this a temporary health issue
|
||||||
|
// since we do not have a mechanism today to distinguish
|
||||||
|
// temporary and permanent issues.
|
||||||
|
// - Unknown: The status cannot be determined.
|
||||||
|
// For example, Device Plugin got unregistered and hasn't been re-registered since.
|
||||||
|
//
|
||||||
|
// In future we may want to introduce the PermanentlyUnhealthy Status.
|
||||||
|
Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerUser represents user identity information
|
||||||
|
type ContainerUser struct {
|
||||||
|
// Linux holds user identity information initially attached to the first process of the containers in Linux.
|
||||||
|
// Note that the actual running identity can be changed if the process has enough privilege to do so.
|
||||||
|
// +optional
|
||||||
|
Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"`
|
||||||
|
|
||||||
|
// Windows holds user identity information initially attached to the first process of the containers in Windows
|
||||||
|
// This is just reserved for future use.
|
||||||
|
// Windows *WindowsContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinuxContainerUser represents user identity information in Linux containers
|
||||||
|
type LinuxContainerUser struct {
|
||||||
|
// UID is the primary uid initially attached to the first process in the container
|
||||||
|
UID int64 `json:"uid" protobuf:"varint,1,name=uid"`
|
||||||
|
// GID is the primary gid initially attached to the first process in the container
|
||||||
|
GID int64 `json:"gid" protobuf:"varint,2,name=gid"`
|
||||||
|
// SupplementalGroups are the supplemental groups initially attached to the first process in the container
|
||||||
|
// +optional
|
||||||
|
// +listType=atomic
|
||||||
|
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodPhase is a label for the condition of a pod at the current time.
|
// PodPhase is a label for the condition of a pod at the current time.
|
||||||
@ -3426,7 +3570,8 @@ type PodAffinityTerm struct {
|
|||||||
// pod labels will be ignored. The default value is empty.
|
// pod labels will be ignored. The default value is empty.
|
||||||
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||||
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||||
|
//
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
// +optional
|
// +optional
|
||||||
MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
|
MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
|
||||||
@ -3438,7 +3583,8 @@ type PodAffinityTerm struct {
|
|||||||
// pod labels will be ignored. The default value is empty.
|
// pod labels will be ignored. The default value is empty.
|
||||||
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||||
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||||
|
//
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
// +optional
|
// +optional
|
||||||
MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
|
MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
|
||||||
@ -3667,9 +3813,11 @@ type PodSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
|
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
|
||||||
|
|
||||||
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
|
// NodeName indicates in which node this pod is scheduled.
|
||||||
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
|
// If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
|
||||||
// requirements.
|
// Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
|
||||||
|
// This field should not be used to express a desire for the pod to be scheduled on a specific node.
|
||||||
|
// https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
|
||||||
// +optional
|
// +optional
|
||||||
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
|
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
|
||||||
// Host networking requested for this pod. Use the host's network namespace.
|
// Host networking requested for this pod. Use the host's network namespace.
|
||||||
@ -3826,6 +3974,7 @@ type PodSpec struct {
|
|||||||
// - spec.securityContext.runAsUser
|
// - spec.securityContext.runAsUser
|
||||||
// - spec.securityContext.runAsGroup
|
// - spec.securityContext.runAsGroup
|
||||||
// - spec.securityContext.supplementalGroups
|
// - spec.securityContext.supplementalGroups
|
||||||
|
// - spec.securityContext.supplementalGroupsPolicy
|
||||||
// - spec.containers[*].securityContext.appArmorProfile
|
// - spec.containers[*].securityContext.appArmorProfile
|
||||||
// - spec.containers[*].securityContext.seLinuxOptions
|
// - spec.containers[*].securityContext.seLinuxOptions
|
||||||
// - spec.containers[*].securityContext.seccompProfile
|
// - spec.containers[*].securityContext.seccompProfile
|
||||||
@ -3883,7 +4032,10 @@ type PodSpec struct {
|
|||||||
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
|
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
|
// PodResourceClaim references exactly one ResourceClaim, either directly
|
||||||
|
// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
|
||||||
|
// for the pod.
|
||||||
|
//
|
||||||
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
|
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
|
||||||
// Containers that need access to the ResourceClaim reference it with this name.
|
// Containers that need access to the ResourceClaim reference it with this name.
|
||||||
type PodResourceClaim struct {
|
type PodResourceClaim struct {
|
||||||
@ -3891,18 +4043,17 @@ type PodResourceClaim struct {
|
|||||||
// This must be a DNS_LABEL.
|
// This must be a DNS_LABEL.
|
||||||
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
||||||
|
|
||||||
// Source describes where to find the ResourceClaim.
|
// Source is tombstoned since Kubernetes 1.31 where it got replaced with
|
||||||
Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
|
// the inlined fields below.
|
||||||
}
|
//
|
||||||
|
// Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
|
||||||
|
|
||||||
// ClaimSource describes a reference to a ResourceClaim.
|
|
||||||
//
|
|
||||||
// Exactly one of these fields should be set. Consumers of this type must
|
|
||||||
// treat an empty object as if it has an unknown value.
|
|
||||||
type ClaimSource struct {
|
|
||||||
// ResourceClaimName is the name of a ResourceClaim object in the same
|
// ResourceClaimName is the name of a ResourceClaim object in the same
|
||||||
// namespace as this pod.
|
// namespace as this pod.
|
||||||
ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"`
|
//
|
||||||
|
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
|
||||||
|
// be set.
|
||||||
|
ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"`
|
||||||
|
|
||||||
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
|
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
|
||||||
// object in the same namespace as this pod.
|
// object in the same namespace as this pod.
|
||||||
@ -3916,7 +4067,10 @@ type ClaimSource struct {
|
|||||||
// This field is immutable and no changes will be made to the
|
// This field is immutable and no changes will be made to the
|
||||||
// corresponding ResourceClaim by the control plane after creating the
|
// corresponding ResourceClaim by the control plane after creating the
|
||||||
// ResourceClaim.
|
// ResourceClaim.
|
||||||
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"`
|
//
|
||||||
|
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
|
||||||
|
// be set.
|
||||||
|
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
|
// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
|
||||||
@ -3929,7 +4083,7 @@ type PodResourceClaimStatus struct {
|
|||||||
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
Name string `json:"name" protobuf:"bytes,1,name=name"`
|
||||||
|
|
||||||
// ResourceClaimName is the name of the ResourceClaim that was
|
// ResourceClaimName is the name of the ResourceClaim that was
|
||||||
// generated for the Pod in the namespace of the Pod. It this is
|
// generated for the Pod in the namespace of the Pod. If this is
|
||||||
// unset, then generating a ResourceClaim was not necessary. The
|
// unset, then generating a ResourceClaim was not necessary. The
|
||||||
// pod.spec.resourceClaims entry can be ignored in this case.
|
// pod.spec.resourceClaims entry can be ignored in this case.
|
||||||
//
|
//
|
||||||
@ -4137,6 +4291,23 @@ const (
|
|||||||
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
|
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SupplementalGroupsPolicy defines how supplemental groups
|
||||||
|
// of the first container processes are calculated.
|
||||||
|
// +enum
|
||||||
|
type SupplementalGroupsPolicy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SupplementalGroupsPolicyMerge means that the container's provided
|
||||||
|
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
|
||||||
|
// merged with the primary user's groups as defined in the container image
|
||||||
|
// (in /etc/group).
|
||||||
|
SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge"
|
||||||
|
// SupplementalGroupsPolicyStrict means that the container's provided
|
||||||
|
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
|
||||||
|
// used instead of any groups defined in the container image.
|
||||||
|
SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
|
||||||
|
)
|
||||||
|
|
||||||
// PodSecurityContext holds pod-level security attributes and common container settings.
|
// PodSecurityContext holds pod-level security attributes and common container settings.
|
||||||
// Some fields are also present in container.securityContext. Field values of
|
// Some fields are also present in container.securityContext. Field values of
|
||||||
// container.securityContext take precedence over field values of PodSecurityContext.
|
// container.securityContext take precedence over field values of PodSecurityContext.
|
||||||
@ -4179,16 +4350,27 @@ type PodSecurityContext struct {
|
|||||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||||
// +optional
|
// +optional
|
||||||
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
|
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
|
||||||
// A list of groups applied to the first process run in each container, in addition
|
// A list of groups applied to the first process run in each container, in
|
||||||
// to the container's primary GID, the fsGroup (if specified), and group memberships
|
// addition to the container's primary GID and fsGroup (if specified). If
|
||||||
// defined in the container image for the uid of the container process. If unspecified,
|
// the SupplementalGroupsPolicy feature is enabled, the
|
||||||
// no additional groups are added to any container. Note that group memberships
|
// supplementalGroupsPolicy field determines whether these are in addition
|
||||||
// defined in the container image for the uid of the container process are still effective,
|
// to or instead of any group memberships defined in the container image.
|
||||||
// even if they are not included in this list.
|
// If unspecified, no additional groups are added, though group memberships
|
||||||
|
// defined in the container image may still be used, depending on the
|
||||||
|
// supplementalGroupsPolicy field.
|
||||||
// Note that this field cannot be set when spec.os.name is windows.
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
|
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
|
||||||
|
// Defines how supplemental groups of the first container processes are calculated.
|
||||||
|
// Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
|
||||||
|
// (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
|
||||||
|
// and the container runtime must implement support for this feature.
|
||||||
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
|
// TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"`
|
||||||
// A special supplemental group that applies to all containers in a pod.
|
// A special supplemental group that applies to all containers in a pod.
|
||||||
// Some volume types allow the Kubelet to change the ownership of that volume
|
// Some volume types allow the Kubelet to change the ownership of that volume
|
||||||
// to be owned by the pod:
|
// to be owned by the pod:
|
||||||
@ -4340,13 +4522,15 @@ type PodDNSConfigOption struct {
|
|||||||
// PodIP represents a single IP address allocated to the pod.
|
// PodIP represents a single IP address allocated to the pod.
|
||||||
type PodIP struct {
|
type PodIP struct {
|
||||||
// IP is the IP address assigned to the pod
|
// IP is the IP address assigned to the pod
|
||||||
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
|
// +required
|
||||||
|
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostIP represents a single IP address allocated to the host.
|
// HostIP represents a single IP address allocated to the host.
|
||||||
type HostIP struct {
|
type HostIP struct {
|
||||||
// IP is the IP address assigned to the host
|
// IP is the IP address assigned to the host
|
||||||
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
|
// +required
|
||||||
|
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
|
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
|
||||||
@ -4663,6 +4847,7 @@ type PodStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
|
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
|
||||||
type PodStatusResult struct {
|
type PodStatusResult struct {
|
||||||
@ -4683,6 +4868,7 @@ type PodStatusResult struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
|
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Pod is a collection of containers that can run on a host. This resource is created
|
// Pod is a collection of containers that can run on a host. This resource is created
|
||||||
// by clients and scheduled onto hosts.
|
// by clients and scheduled onto hosts.
|
||||||
@ -4708,6 +4894,7 @@ type Pod struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodList is a list of Pods.
|
// PodList is a list of Pods.
|
||||||
type PodList struct {
|
type PodList struct {
|
||||||
@ -4737,6 +4924,7 @@ type PodTemplateSpec struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodTemplate describes a template for creating copies of a predefined pod.
|
// PodTemplate describes a template for creating copies of a predefined pod.
|
||||||
type PodTemplate struct {
|
type PodTemplate struct {
|
||||||
@ -4753,6 +4941,7 @@ type PodTemplate struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodTemplateList is a list of PodTemplates.
|
// PodTemplateList is a list of PodTemplates.
|
||||||
type PodTemplateList struct {
|
type PodTemplateList struct {
|
||||||
@ -4867,6 +5056,7 @@ type ReplicationControllerCondition struct {
|
|||||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ReplicationController represents the configuration of a replication controller.
|
// ReplicationController represents the configuration of a replication controller.
|
||||||
type ReplicationController struct {
|
type ReplicationController struct {
|
||||||
@ -4893,6 +5083,7 @@ type ReplicationController struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ReplicationControllerList is a collection of replication controllers.
|
// ReplicationControllerList is a collection of replication controllers.
|
||||||
type ReplicationControllerList struct {
|
type ReplicationControllerList struct {
|
||||||
@ -5437,6 +5628,7 @@ type ServicePort struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:skipVerbs=deleteCollection
|
// +genclient:skipVerbs=deleteCollection
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Service is a named abstraction of software service (for example, mysql) consisting of local port
|
// Service is a named abstraction of software service (for example, mysql) consisting of local port
|
||||||
// (for example 3306) that the proxy listens on, and the selector that determines which pods
|
// (for example 3306) that the proxy listens on, and the selector that determines which pods
|
||||||
@ -5468,6 +5660,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceList holds a list of services.
|
// ServiceList holds a list of services.
|
||||||
type ServiceList struct {
|
type ServiceList struct {
|
||||||
@ -5484,6 +5677,7 @@ type ServiceList struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
|
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceAccount binds together:
|
// ServiceAccount binds together:
|
||||||
// * a name, understood by users, and perhaps by peripheral systems, for an identity
|
// * a name, understood by users, and perhaps by peripheral systems, for an identity
|
||||||
@ -5523,6 +5717,7 @@ type ServiceAccount struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ServiceAccountList is a list of ServiceAccount objects
|
// ServiceAccountList is a list of ServiceAccount objects
|
||||||
type ServiceAccountList struct {
|
type ServiceAccountList struct {
|
||||||
@ -5539,6 +5734,7 @@ type ServiceAccountList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Endpoints is a collection of endpoints that implement the actual service. Example:
|
// Endpoints is a collection of endpoints that implement the actual service. Example:
|
||||||
//
|
//
|
||||||
@ -5660,6 +5856,7 @@ type EndpointPort struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// EndpointsList is a list of endpoints.
|
// EndpointsList is a list of endpoints.
|
||||||
type EndpointsList struct {
|
type EndpointsList struct {
|
||||||
@ -5772,13 +5969,16 @@ type NodeDaemonEndpoints struct {
|
|||||||
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
|
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeRuntimeHandlerFeatures is a set of runtime features.
|
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
|
||||||
type NodeRuntimeHandlerFeatures struct {
|
type NodeRuntimeHandlerFeatures struct {
|
||||||
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
|
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
// +optional
|
// +optional
|
||||||
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
|
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
|
||||||
// Reserved: UserNamespaces *bool (varint 2, for consistency with CRI API)
|
// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
|
||||||
|
// +featureGate=UserNamespacesSupport
|
||||||
|
// +optional
|
||||||
|
UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeRuntimeHandler is a set of runtime handler information.
|
// NodeRuntimeHandler is a set of runtime handler information.
|
||||||
@ -5792,6 +5992,15 @@ type NodeRuntimeHandler struct {
|
|||||||
Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
|
Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeFeatures describes the set of features implemented by the CRI implementation.
|
||||||
|
// The features contained in the NodeFeatures should depend only on the cri implementation
|
||||||
|
// independent of runtime handlers.
|
||||||
|
type NodeFeatures struct {
|
||||||
|
// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
|
||||||
|
// +optional
|
||||||
|
SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"`
|
||||||
|
}
|
||||||
|
|
||||||
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
|
||||||
type NodeSystemInfo struct {
|
type NodeSystemInfo struct {
|
||||||
// MachineID reported by the node. For unique machine identification
|
// MachineID reported by the node. For unique machine identification
|
||||||
@ -5812,7 +6021,7 @@ type NodeSystemInfo struct {
|
|||||||
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
|
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
|
||||||
// Kubelet Version reported by the node.
|
// Kubelet Version reported by the node.
|
||||||
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
|
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
|
||||||
// KubeProxy Version reported by the node.
|
// Deprecated: KubeProxy Version reported by the node.
|
||||||
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
|
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
|
||||||
// The Operating System reported by the node
|
// The Operating System reported by the node
|
||||||
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
|
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
|
||||||
@ -5870,7 +6079,7 @@ type NodeConfigStatus struct {
|
|||||||
// NodeStatus is information about the current status of a node.
|
// NodeStatus is information about the current status of a node.
|
||||||
type NodeStatus struct {
|
type NodeStatus struct {
|
||||||
// Capacity represents the total resources of a node.
|
// Capacity represents the total resources of a node.
|
||||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
|
// More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
|
||||||
// +optional
|
// +optional
|
||||||
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
|
||||||
// Allocatable represents the resources of a node that are available for scheduling.
|
// Allocatable represents the resources of a node that are available for scheduling.
|
||||||
@ -5930,9 +6139,14 @@ type NodeStatus struct {
|
|||||||
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
|
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
|
||||||
// The available runtime handlers.
|
// The available runtime handlers.
|
||||||
// +featureGate=RecursiveReadOnlyMounts
|
// +featureGate=RecursiveReadOnlyMounts
|
||||||
|
// +featureGate=UserNamespacesSupport
|
||||||
// +optional
|
// +optional
|
||||||
// +listType=atomic
|
// +listType=atomic
|
||||||
RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
|
RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
|
||||||
|
// Features describes the set of features implemented by the CRI implementation.
|
||||||
|
// +featureGate=SupplementalGroupsPolicy
|
||||||
|
// +optional
|
||||||
|
Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type UniqueVolumeName string
|
type UniqueVolumeName string
|
||||||
@ -6128,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Node is a worker node in Kubernetes.
|
// Node is a worker node in Kubernetes.
|
||||||
// Each node will have a unique identifier in the cache (i.e. in etcd).
|
// Each node will have a unique identifier in the cache (i.e. in etcd).
|
||||||
@ -6152,6 +6367,7 @@ type Node struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// NodeList is the whole list of all Nodes which have been registered with master.
|
// NodeList is the whole list of all Nodes which have been registered with master.
|
||||||
type NodeList struct {
|
type NodeList struct {
|
||||||
@ -6250,6 +6466,7 @@ type NamespaceCondition struct {
|
|||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +genclient:skipVerbs=deleteCollection
|
// +genclient:skipVerbs=deleteCollection
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Namespace provides a scope for Names.
|
// Namespace provides a scope for Names.
|
||||||
// Use of multiple namespaces is optional.
|
// Use of multiple namespaces is optional.
|
||||||
@ -6272,6 +6489,7 @@ type Namespace struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// NamespaceList is a list of Namespaces.
|
// NamespaceList is a list of Namespaces.
|
||||||
type NamespaceList struct {
|
type NamespaceList struct {
|
||||||
@ -6287,6 +6505,7 @@ type NamespaceList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
||||||
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
||||||
@ -6311,6 +6530,7 @@ type Preconditions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodLogOptions is the query options for a Pod's logs REST call.
|
// PodLogOptions is the query options for a Pod's logs REST call.
|
||||||
type PodLogOptions struct {
|
type PodLogOptions struct {
|
||||||
@ -6363,6 +6583,7 @@ type PodLogOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||||
|
|
||||||
// PodAttachOptions is the query options to a Pod's remote attach call.
|
// PodAttachOptions is the query options to a Pod's remote attach call.
|
||||||
// ---
|
// ---
|
||||||
@ -6401,6 +6622,7 @@ type PodAttachOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodExecOptions is the query options to a Pod's remote exec call.
|
// PodExecOptions is the query options to a Pod's remote exec call.
|
||||||
// ---
|
// ---
|
||||||
@ -6439,6 +6661,7 @@ type PodExecOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||||
|
|
||||||
// PodPortForwardOptions is the query options to a Pod's port forward call
|
// PodPortForwardOptions is the query options to a Pod's port forward call
|
||||||
// when using WebSockets.
|
// when using WebSockets.
|
||||||
@ -6458,6 +6681,7 @@ type PodPortForwardOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// PodProxyOptions is the query options to a Pod's proxy call.
|
// PodProxyOptions is the query options to a Pod's proxy call.
|
||||||
type PodProxyOptions struct {
|
type PodProxyOptions struct {
|
||||||
@ -6470,6 +6694,7 @@ type PodProxyOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// NodeProxyOptions is the query options to a Node's proxy call.
|
// NodeProxyOptions is the query options to a Node's proxy call.
|
||||||
type NodeProxyOptions struct {
|
type NodeProxyOptions struct {
|
||||||
@ -6482,6 +6707,7 @@ type NodeProxyOptions struct {
|
|||||||
|
|
||||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ServiceProxyOptions is the query options to a Service's proxy call.
|
// ServiceProxyOptions is the query options to a Service's proxy call.
|
||||||
type ServiceProxyOptions struct {
|
type ServiceProxyOptions struct {
|
||||||
@ -6584,6 +6810,7 @@ type TypedLocalObjectReference struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// SerializedReference is a reference to serialized object.
|
// SerializedReference is a reference to serialized object.
|
||||||
type SerializedReference struct {
|
type SerializedReference struct {
|
||||||
@ -6613,6 +6840,7 @@ const (
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Event is a report of an event somewhere in the cluster. Events
|
// Event is a report of an event somewhere in the cluster. Events
|
||||||
// have a limited retention time and triggers and messages may evolve
|
// have a limited retention time and triggers and messages may evolve
|
||||||
@ -6697,6 +6925,7 @@ type EventSeries struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// EventList is a list of events.
|
// EventList is a list of events.
|
||||||
type EventList struct {
|
type EventList struct {
|
||||||
@ -6711,6 +6940,7 @@ type EventList struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// List holds a list of objects, which may not be known by the server.
|
// List holds a list of objects, which may not be known by the server.
|
||||||
type List metav1.List
|
type List metav1.List
|
||||||
@ -6758,6 +6988,7 @@ type LimitRangeSpec struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
|
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
|
||||||
type LimitRange struct {
|
type LimitRange struct {
|
||||||
@ -6774,6 +7005,7 @@ type LimitRange struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// LimitRangeList is a list of LimitRange items.
|
// LimitRangeList is a list of LimitRange items.
|
||||||
type LimitRangeList struct {
|
type LimitRangeList struct {
|
||||||
@ -6822,6 +7054,8 @@ const (
|
|||||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||||
|
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||||
|
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following identify resource prefix for Kubernetes object types
|
// The following identify resource prefix for Kubernetes object types
|
||||||
@ -6922,6 +7156,7 @@ type ResourceQuotaStatus struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ResourceQuota sets aggregate quota restrictions enforced per namespace
|
// ResourceQuota sets aggregate quota restrictions enforced per namespace
|
||||||
type ResourceQuota struct {
|
type ResourceQuota struct {
|
||||||
@ -6943,6 +7178,7 @@ type ResourceQuota struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ResourceQuotaList is a list of ResourceQuota items.
|
// ResourceQuotaList is a list of ResourceQuota items.
|
||||||
type ResourceQuotaList struct {
|
type ResourceQuotaList struct {
|
||||||
@ -6959,6 +7195,7 @@ type ResourceQuotaList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Secret holds secret data of a certain type. The total bytes of the values in
|
// Secret holds secret data of a certain type. The total bytes of the values in
|
||||||
// the Data field must be less than MaxSecretSize bytes.
|
// the Data field must be less than MaxSecretSize bytes.
|
||||||
@ -7085,6 +7322,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// SecretList is a list of Secret.
|
// SecretList is a list of Secret.
|
||||||
type SecretList struct {
|
type SecretList struct {
|
||||||
@ -7101,6 +7339,7 @@ type SecretList struct {
|
|||||||
|
|
||||||
// +genclient
|
// +genclient
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ConfigMap holds configuration data for pods to consume.
|
// ConfigMap holds configuration data for pods to consume.
|
||||||
type ConfigMap struct {
|
type ConfigMap struct {
|
||||||
@ -7137,6 +7376,7 @@ type ConfigMap struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||||
|
|
||||||
// ConfigMapList is a resource containing a list of ConfigMap objects.
|
// ConfigMapList is a resource containing a list of ConfigMap objects.
|
||||||
type ConfigMapList struct {
|
type ConfigMapList struct {
|
||||||
@ -7179,6 +7419,7 @@ type ComponentCondition struct {
|
|||||||
// +genclient
|
// +genclient
|
||||||
// +genclient:nonNamespaced
|
// +genclient:nonNamespaced
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
|
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
|
||||||
// Deprecated: This API is deprecated in v1.19+
|
// Deprecated: This API is deprecated in v1.19+
|
||||||
@ -7199,6 +7440,7 @@ type ComponentStatus struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// Status of all the conditions for the component as a list of ComponentStatus objects.
|
// Status of all the conditions for the component as a list of ComponentStatus objects.
|
||||||
// Deprecated: This API is deprecated in v1.19+
|
// Deprecated: This API is deprecated in v1.19+
|
||||||
@ -7332,7 +7574,7 @@ type SecurityContext struct {
|
|||||||
// +optional
|
// +optional
|
||||||
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
|
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
|
||||||
// procMount denotes the type of proc mount to use for the containers.
|
// procMount denotes the type of proc mount to use for the containers.
|
||||||
// The default is DefaultProcMount which uses the container runtime defaults for
|
// The default value is Default which uses the container runtime defaults for
|
||||||
// readonly paths and masked paths.
|
// readonly paths and masked paths.
|
||||||
// This requires the ProcMountType feature flag to be enabled.
|
// This requires the ProcMountType feature flag to be enabled.
|
||||||
// Note that this field cannot be set when spec.os.name is windows.
|
// Note that this field cannot be set when spec.os.name is windows.
|
||||||
@ -7410,6 +7652,7 @@ type WindowsSecurityContextOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||||
|
|
||||||
// RangeAllocation is not a public type.
|
// RangeAllocation is not a public type.
|
||||||
type RangeAllocation struct {
|
type RangeAllocation struct {
|
||||||
@ -7519,3 +7762,23 @@ const (
|
|||||||
// the destination set to the node's IP and port or the pod's IP and port.
|
// the destination set to the node's IP and port or the pod's IP and port.
|
||||||
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
|
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ImageVolumeSource represents a image volume resource.
|
||||||
|
type ImageVolumeSource struct {
|
||||||
|
// Required: Image or artifact reference to be used.
|
||||||
|
// Behaves in the same way as pod.spec.containers[*].image.
|
||||||
|
// Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/containers/images
|
||||||
|
// This field is optional to allow higher level config management to default or override
|
||||||
|
// container images in workload controllers like Deployments and StatefulSets.
|
||||||
|
// +optional
|
||||||
|
Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
|
||||||
|
|
||||||
|
// Policy for pulling OCI objects. Possible values are:
|
||||||
|
// Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||||
|
// Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||||
|
// IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||||
|
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
|
||||||
|
// +optional
|
||||||
|
PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"`
|
||||||
|
}
|
||||||
|
168
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
168
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
@ -219,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
return map_CinderVolumeSource
|
return map_CinderVolumeSource
|
||||||
}
|
}
|
||||||
|
|
||||||
var map_ClaimSource = map[string]string{
|
|
||||||
"": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
|
|
||||||
"resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
|
|
||||||
"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ClaimSource) SwaggerDoc() map[string]string {
|
|
||||||
return map_ClaimSource
|
|
||||||
}
|
|
||||||
|
|
||||||
var map_ClientIPConfig = map[string]string{
|
var map_ClientIPConfig = map[string]string{
|
||||||
"": "ClientIPConfig represents the configurations of Client IP based session affinity.",
|
"": "ClientIPConfig represents the configurations of Client IP based session affinity.",
|
||||||
"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
|
"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
|
||||||
@ -469,25 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_ContainerStatus = map[string]string{
|
var map_ContainerStatus = map[string]string{
|
||||||
"": "ContainerStatus contains details for the current status of this container.",
|
"": "ContainerStatus contains details for the current status of this container.",
|
||||||
"name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.",
|
"name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.",
|
||||||
"state": "State holds details about the container's current condition.",
|
"state": "State holds details about the container's current condition.",
|
||||||
"lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.",
|
"lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.",
|
||||||
"ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.",
|
"ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.",
|
||||||
"restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.",
|
"restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.",
|
||||||
"image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.",
|
"image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.",
|
||||||
"imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.",
|
"imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.",
|
||||||
"containerID": "ContainerID is the ID of the container in the format '<type>://<container_id>'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").",
|
"containerID": "ContainerID is the ID of the container in the format '<type>://<container_id>'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").",
|
||||||
"started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.",
|
"started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.",
|
||||||
"allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
|
"allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
|
||||||
"resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
|
"resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
|
||||||
"volumeMounts": "Status of volume mounts.",
|
"volumeMounts": "Status of volume mounts.",
|
||||||
|
"user": "User represents user identity information initially attached to the first process of the container",
|
||||||
|
"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ContainerStatus) SwaggerDoc() map[string]string {
|
func (ContainerStatus) SwaggerDoc() map[string]string {
|
||||||
return map_ContainerStatus
|
return map_ContainerStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ContainerUser = map[string]string{
|
||||||
|
"": "ContainerUser represents user identity information",
|
||||||
|
"linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ContainerUser) SwaggerDoc() map[string]string {
|
||||||
|
return map_ContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
var map_DaemonEndpoint = map[string]string{
|
var map_DaemonEndpoint = map[string]string{
|
||||||
"": "DaemonEndpoint contains information about a single Daemon endpoint.",
|
"": "DaemonEndpoint contains information about a single Daemon endpoint.",
|
||||||
"Port": "Port number of the given endpoint.",
|
"Port": "Port number of the given endpoint.",
|
||||||
@ -933,6 +934,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
|
|||||||
return map_ISCSIVolumeSource
|
return map_ISCSIVolumeSource
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ImageVolumeSource = map[string]string{
|
||||||
|
"": "ImageVolumeSource represents a image volume resource.",
|
||||||
|
"reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
|
||||||
|
"pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ImageVolumeSource) SwaggerDoc() map[string]string {
|
||||||
|
return map_ImageVolumeSource
|
||||||
|
}
|
||||||
|
|
||||||
var map_KeyToPath = map[string]string{
|
var map_KeyToPath = map[string]string{
|
||||||
"": "Maps a string key to a path within a volume.",
|
"": "Maps a string key to a path within a volume.",
|
||||||
"key": "key is the key to project.",
|
"key": "key is the key to project.",
|
||||||
@ -1009,6 +1020,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string {
|
|||||||
return map_LimitRangeSpec
|
return map_LimitRangeSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_LinuxContainerUser = map[string]string{
|
||||||
|
"": "LinuxContainerUser represents user identity information in Linux containers",
|
||||||
|
"uid": "UID is the primary uid initially attached to the first process in the container",
|
||||||
|
"gid": "GID is the primary gid initially attached to the first process in the container",
|
||||||
|
"supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (LinuxContainerUser) SwaggerDoc() map[string]string {
|
||||||
|
return map_LinuxContainerUser
|
||||||
|
}
|
||||||
|
|
||||||
var map_LoadBalancerIngress = map[string]string{
|
var map_LoadBalancerIngress = map[string]string{
|
||||||
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
|
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
|
||||||
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
|
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
|
||||||
@ -1195,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
|
|||||||
return map_NodeDaemonEndpoints
|
return map_NodeDaemonEndpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_NodeFeatures = map[string]string{
|
||||||
|
"": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.",
|
||||||
|
"supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (NodeFeatures) SwaggerDoc() map[string]string {
|
||||||
|
return map_NodeFeatures
|
||||||
|
}
|
||||||
|
|
||||||
var map_NodeList = map[string]string{
|
var map_NodeList = map[string]string{
|
||||||
"": "NodeList is the whole list of all Nodes which have been registered with master.",
|
"": "NodeList is the whole list of all Nodes which have been registered with master.",
|
||||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||||
@ -1225,8 +1256,9 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_NodeRuntimeHandlerFeatures = map[string]string{
|
var map_NodeRuntimeHandlerFeatures = map[string]string{
|
||||||
"": "NodeRuntimeHandlerFeatures is a set of runtime features.",
|
"": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.",
|
||||||
"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
|
"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
|
||||||
|
"userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
|
func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
|
||||||
@ -1280,7 +1312,7 @@ func (NodeSpec) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_NodeStatus = map[string]string{
|
var map_NodeStatus = map[string]string{
|
||||||
"": "NodeStatus is information about the current status of a node.",
|
"": "NodeStatus is information about the current status of a node.",
|
||||||
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
|
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
|
||||||
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
|
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
|
||||||
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
|
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
|
||||||
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
|
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
|
||||||
@ -1292,6 +1324,7 @@ var map_NodeStatus = map[string]string{
|
|||||||
"volumesAttached": "List of volumes that are attached to the node.",
|
"volumesAttached": "List of volumes that are attached to the node.",
|
||||||
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
"config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
|
||||||
"runtimeHandlers": "The available runtime handlers.",
|
"runtimeHandlers": "The available runtime handlers.",
|
||||||
|
"features": "Features describes the set of features implemented by the CRI implementation.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NodeStatus) SwaggerDoc() map[string]string {
|
func (NodeStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1307,7 +1340,7 @@ var map_NodeSystemInfo = map[string]string{
|
|||||||
"osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
|
"osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
|
||||||
"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
|
"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
|
||||||
"kubeletVersion": "Kubelet Version reported by the node.",
|
"kubeletVersion": "Kubelet Version reported by the node.",
|
||||||
"kubeProxyVersion": "KubeProxy Version reported by the node.",
|
"kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.",
|
||||||
"operatingSystem": "The Operating System reported by the node",
|
"operatingSystem": "The Operating System reported by the node",
|
||||||
"architecture": "The Architecture reported by the node",
|
"architecture": "The Architecture reported by the node",
|
||||||
}
|
}
|
||||||
@ -1395,7 +1428,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
|
|||||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
||||||
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
||||||
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
||||||
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.",
|
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
||||||
@ -1410,8 +1443,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
|
|||||||
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
|
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
|
||||||
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||||
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||||
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1488,7 +1521,7 @@ var map_PersistentVolumeSpec = map[string]string{
|
|||||||
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
||||||
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
|
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
|
||||||
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
||||||
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
||||||
@ -1500,7 +1533,7 @@ var map_PersistentVolumeStatus = map[string]string{
|
|||||||
"phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
|
"phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
|
||||||
"message": "message is a human-readable message indicating details about why the volume is in this state.",
|
"message": "message is a human-readable message indicating details about why the volume is in this state.",
|
||||||
"reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
|
"reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
|
||||||
"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).",
|
"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
|
func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1544,8 +1577,8 @@ var map_PodAffinityTerm = map[string]string{
|
|||||||
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
|
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
|
||||||
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
|
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
|
||||||
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
|
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
|
||||||
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
|
||||||
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
||||||
@ -1696,9 +1729,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_PodResourceClaim = map[string]string{
|
var map_PodResourceClaim = map[string]string{
|
||||||
"": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
|
"": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
|
||||||
"name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
|
"name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
|
||||||
"source": "Source describes where to find the ResourceClaim.",
|
"resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
|
||||||
|
"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodResourceClaim) SwaggerDoc() map[string]string {
|
func (PodResourceClaim) SwaggerDoc() map[string]string {
|
||||||
@ -1708,7 +1742,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string {
|
|||||||
var map_PodResourceClaimStatus = map[string]string{
|
var map_PodResourceClaimStatus = map[string]string{
|
||||||
"": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
|
"": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
|
||||||
"name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
|
"name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
|
||||||
"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
|
"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
|
func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
|
||||||
@ -1725,18 +1759,19 @@ func (PodSchedulingGate) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_PodSecurityContext = map[string]string{
|
var map_PodSecurityContext = map[string]string{
|
||||||
"": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
|
"": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.",
|
||||||
"seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
"seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
|
"windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
|
||||||
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
"runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
||||||
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.",
|
"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
|
"supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
|
"fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
|
||||||
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
|
"sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
"seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
|
"appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodSecurityContext) SwaggerDoc() map[string]string {
|
func (PodSecurityContext) SwaggerDoc() map[string]string {
|
||||||
@ -1766,7 +1801,7 @@ var map_PodSpec = map[string]string{
|
|||||||
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
|
"serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
|
||||||
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
"serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
|
||||||
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
|
||||||
"nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
|
"nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
|
||||||
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
"hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
|
||||||
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
"hostPID": "Use the host's pid namespace. Optional: Default to false.",
|
||||||
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
"hostIPC": "Use the host's ipc namespace. Optional: Default to false.",
|
||||||
@ -1789,7 +1824,7 @@ var map_PodSpec = map[string]string{
|
|||||||
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
||||||
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
|
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
|
||||||
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
|
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
|
||||||
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
|
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
|
||||||
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
|
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
|
||||||
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
|
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
|
||||||
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
|
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
|
||||||
@ -1943,7 +1978,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string {
|
|||||||
|
|
||||||
var map_ProjectedVolumeSource = map[string]string{
|
var map_ProjectedVolumeSource = map[string]string{
|
||||||
"": "Represents a projected volume source",
|
"": "Represents a projected volume source",
|
||||||
"sources": "sources is the list of volume projections",
|
"sources": "sources is the list of volume projections. Each entry in this list handles one source.",
|
||||||
"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
|
"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2069,8 +2104,9 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_ResourceClaim = map[string]string{
|
var map_ResourceClaim = map[string]string{
|
||||||
"": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
|
"": "ResourceClaim references one entry in PodSpec.ResourceClaims.",
|
||||||
"name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
|
"name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
|
||||||
|
"request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ResourceClaim) SwaggerDoc() map[string]string {
|
func (ResourceClaim) SwaggerDoc() map[string]string {
|
||||||
@ -2088,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string {
|
|||||||
return map_ResourceFieldSelector
|
return map_ResourceFieldSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ResourceHealth = map[string]string{
|
||||||
|
"": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.",
|
||||||
|
"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
|
||||||
|
"health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ResourceHealth) SwaggerDoc() map[string]string {
|
||||||
|
return map_ResourceHealth
|
||||||
|
}
|
||||||
|
|
||||||
var map_ResourceQuota = map[string]string{
|
var map_ResourceQuota = map[string]string{
|
||||||
"": "ResourceQuota sets aggregate quota restrictions enforced per namespace",
|
"": "ResourceQuota sets aggregate quota restrictions enforced per namespace",
|
||||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||||
@ -2141,6 +2187,15 @@ func (ResourceRequirements) SwaggerDoc() map[string]string {
|
|||||||
return map_ResourceRequirements
|
return map_ResourceRequirements
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var map_ResourceStatus = map[string]string{
|
||||||
|
"name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.",
|
||||||
|
"resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ResourceStatus) SwaggerDoc() map[string]string {
|
||||||
|
return map_ResourceStatus
|
||||||
|
}
|
||||||
|
|
||||||
var map_SELinuxOptions = map[string]string{
|
var map_SELinuxOptions = map[string]string{
|
||||||
"": "SELinuxOptions are the labels to be applied to the container",
|
"": "SELinuxOptions are the labels to be applied to the container",
|
||||||
"user": "User is a SELinux user label that applies to the container.",
|
"user": "User is a SELinux user label that applies to the container.",
|
||||||
@ -2304,7 +2359,7 @@ var map_SecurityContext = map[string]string{
|
|||||||
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
|
||||||
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
|
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
|
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
|
"procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
|
"seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
"appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
|
"appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
|
||||||
}
|
}
|
||||||
@ -2639,7 +2694,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var map_VolumeProjection = map[string]string{
|
var map_VolumeProjection = map[string]string{
|
||||||
"": "Projection that may be projected along with other supported volume types",
|
"": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.",
|
||||||
"secret": "secret information about the secret data to project",
|
"secret": "secret information about the secret data to project",
|
||||||
"downwardAPI": "downwardAPI information about the downwardAPI data to project",
|
"downwardAPI": "downwardAPI information about the downwardAPI data to project",
|
||||||
"configMap": "configMap information about the configMap data to project",
|
"configMap": "configMap information about the configMap data to project",
|
||||||
@ -2692,6 +2747,7 @@ var map_VolumeSource = map[string]string{
|
|||||||
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||||
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
||||||
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
||||||
|
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||||
|
185
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
185
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
|
||||||
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
|
|
||||||
*out = *in
|
|
||||||
if in.ResourceClaimName != nil {
|
|
||||||
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.ResourceClaimTemplateName != nil {
|
|
||||||
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
|
|
||||||
*out = new(string)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
|
|
||||||
func (in *ClaimSource) DeepCopy() *ClaimSource {
|
|
||||||
if in == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := new(ClaimSource)
|
|
||||||
in.DeepCopyInto(out)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
|
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.User != nil {
|
||||||
|
in, out := &in.User, &out.User
|
||||||
|
*out = new(ContainerUser)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.AllocatedResourcesStatus != nil {
|
||||||
|
in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
|
||||||
|
*out = make([]ResourceStatus, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
|
||||||
|
*out = *in
|
||||||
|
if in.Linux != nil {
|
||||||
|
in, out := &in.Linux, &out.Linux
|
||||||
|
*out = new(LinuxContainerUser)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
|
||||||
|
func (in *ContainerUser) DeepCopy() *ContainerUser {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ContainerUser)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
|
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
|
||||||
|
func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ImageVolumeSource)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
|
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
|
||||||
|
*out = *in
|
||||||
|
if in.SupplementalGroups != nil {
|
||||||
|
in, out := &in.SupplementalGroups, &out.SupplementalGroups
|
||||||
|
*out = make([]int64, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
|
||||||
|
func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(LinuxContainerUser)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *List) DeepCopyInto(out *List) {
|
func (in *List) DeepCopyInto(out *List) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2695,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
|
||||||
|
*out = *in
|
||||||
|
if in.SupplementalGroupsPolicy != nil {
|
||||||
|
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
|
||||||
|
func (in *NodeFeatures) DeepCopy() *NodeFeatures {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(NodeFeatures)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *NodeList) DeepCopyInto(out *NodeList) {
|
func (in *NodeList) DeepCopyInto(out *NodeList) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -2782,6 +2847,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur
|
|||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.UserNamespaces != nil {
|
||||||
|
in, out := &in.UserNamespaces, &out.UserNamespaces
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2962,6 +3032,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.Features != nil {
|
||||||
|
in, out := &in.Features, &out.Features
|
||||||
|
*out = new(NodeFeatures)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3971,7 +4046,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
|
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Source.DeepCopyInto(&out.Source)
|
if in.ResourceClaimName != nil {
|
||||||
|
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ResourceClaimTemplateName != nil {
|
||||||
|
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4055,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
|
|||||||
*out = make([]int64, len(*in))
|
*out = make([]int64, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.SupplementalGroupsPolicy != nil {
|
||||||
|
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
|
||||||
|
*out = new(SupplementalGroupsPolicy)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.FSGroup != nil {
|
if in.FSGroup != nil {
|
||||||
in, out := &in.FSGroup, &out.FSGroup
|
in, out := &in.FSGroup, &out.FSGroup
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
@ -4900,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
|
||||||
|
func (in *ResourceHealth) DeepCopy() *ResourceHealth {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceHealth)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in ResourceList) DeepCopyInto(out *ResourceList) {
|
func (in ResourceList) DeepCopyInto(out *ResourceList) {
|
||||||
{
|
{
|
||||||
@ -5081,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.Resources != nil {
|
||||||
|
in, out := &in.Resources, &out.Resources
|
||||||
|
*out = make([]ResourceHealth, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
|
||||||
|
func (in *ResourceStatus) DeepCopy() *ResourceStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ResourceStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
|
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@ -6426,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
|||||||
*out = new(EphemeralVolumeSource)
|
*out = new(EphemeralVolumeSource)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.Image != nil {
|
||||||
|
in, out := &in.Image, &out.Image
|
||||||
|
*out = new(ImageVolumeSource)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
274
vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
274
vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
//go:build !ignore_autogenerated
|
||||||
|
// +build !ignore_autogenerated
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Binding) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Endpoints) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Event) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *EventList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *LimitRange) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *List) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Namespace) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Node) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NodeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Pod) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 6
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Secret) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *SecretList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *Service) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceList) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||||
|
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||||
|
func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) {
|
||||||
|
return 1, 2
|
||||||
|
}
|
14
vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
reviewers:
|
||||||
|
- thockin
|
||||||
|
- smarterclayton
|
||||||
|
- wojtek-t
|
||||||
|
- deads2k
|
||||||
|
- derekwaynecarr
|
||||||
|
- caesarxuchao
|
||||||
|
- mikedanese
|
||||||
|
- liggitt
|
||||||
|
- janetkuo
|
||||||
|
- ncdc
|
||||||
|
- dims
|
119
vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
generated
vendored
Normal file
119
vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true
|
||||||
|
// if the conditions are changed by this call.
|
||||||
|
// conditions must be non-nil.
|
||||||
|
// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
|
||||||
|
// newCondition, LastTransitionTime is set to now if the new status differs from the old status)
|
||||||
|
// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
|
||||||
|
func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) {
|
||||||
|
if conditions == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
existingCondition := FindStatusCondition(*conditions, newCondition.Type)
|
||||||
|
if existingCondition == nil {
|
||||||
|
if newCondition.LastTransitionTime.IsZero() {
|
||||||
|
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||||
|
}
|
||||||
|
*conditions = append(*conditions, newCondition)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingCondition.Status != newCondition.Status {
|
||||||
|
existingCondition.Status = newCondition.Status
|
||||||
|
if !newCondition.LastTransitionTime.IsZero() {
|
||||||
|
existingCondition.LastTransitionTime = newCondition.LastTransitionTime
|
||||||
|
} else {
|
||||||
|
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||||
|
}
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingCondition.Reason != newCondition.Reason {
|
||||||
|
existingCondition.Reason = newCondition.Reason
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
if existingCondition.Message != newCondition.Message {
|
||||||
|
existingCondition.Message = newCondition.Message
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
|
||||||
|
existingCondition.ObservedGeneration = newCondition.ObservedGeneration
|
||||||
|
changed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return changed
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns
|
||||||
|
// true if it was present and got removed.
|
||||||
|
// conditions must be non-nil.
|
||||||
|
func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) {
|
||||||
|
if conditions == nil || len(*conditions) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
newConditions := make([]metav1.Condition, 0, len(*conditions)-1)
|
||||||
|
for _, condition := range *conditions {
|
||||||
|
if condition.Type != conditionType {
|
||||||
|
newConditions = append(newConditions, condition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
removed = len(*conditions) != len(newConditions)
|
||||||
|
*conditions = newConditions
|
||||||
|
|
||||||
|
return removed
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindStatusCondition finds the conditionType in conditions.
|
||||||
|
func FindStatusCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition {
|
||||||
|
for i := range conditions {
|
||||||
|
if conditions[i].Type == conditionType {
|
||||||
|
return &conditions[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStatusConditionTrue returns true when the conditionType is present and set to `metav1.ConditionTrue`
|
||||||
|
func IsStatusConditionTrue(conditions []metav1.Condition, conditionType string) bool {
|
||||||
|
return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStatusConditionFalse returns true when the conditionType is present and set to `metav1.ConditionFalse`
|
||||||
|
func IsStatusConditionFalse(conditions []metav1.Condition, conditionType string) bool {
|
||||||
|
return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status.
|
||||||
|
func IsStatusConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool {
|
||||||
|
for _, condition := range conditions {
|
||||||
|
if condition.Type == conditionType {
|
||||||
|
return condition.Status == status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
19
vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package meta provides functions for retrieving API metadata from objects
|
||||||
|
// belonging to the Kubernetes API
|
||||||
|
package meta // import "k8s.io/apimachinery/pkg/api/meta"
|
132
vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
generated
vendored
Normal file
132
vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource
|
||||||
|
type AmbiguousResourceError struct {
|
||||||
|
PartialResource schema.GroupVersionResource
|
||||||
|
|
||||||
|
MatchingResources []schema.GroupVersionResource
|
||||||
|
MatchingKinds []schema.GroupVersionKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *AmbiguousResourceError) Error() string {
|
||||||
|
switch {
|
||||||
|
case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds)
|
||||||
|
case len(e.MatchingKinds) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds)
|
||||||
|
case len(e.MatchingResources) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*AmbiguousResourceError) Is(target error) bool {
|
||||||
|
_, ok := target.(*AmbiguousResourceError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind
|
||||||
|
type AmbiguousKindError struct {
|
||||||
|
PartialKind schema.GroupVersionKind
|
||||||
|
|
||||||
|
MatchingResources []schema.GroupVersionResource
|
||||||
|
MatchingKinds []schema.GroupVersionKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *AmbiguousKindError) Error() string {
|
||||||
|
switch {
|
||||||
|
case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds)
|
||||||
|
case len(e.MatchingKinds) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds)
|
||||||
|
case len(e.MatchingResources) > 0:
|
||||||
|
return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*AmbiguousKindError) Is(target error) bool {
|
||||||
|
_, ok := target.(*AmbiguousKindError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsAmbiguousError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return errors.Is(err, &AmbiguousResourceError{}) || errors.Is(err, &AmbiguousKindError{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource
|
||||||
|
type NoResourceMatchError struct {
|
||||||
|
PartialResource schema.GroupVersionResource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *NoResourceMatchError) Error() string {
|
||||||
|
return fmt.Sprintf("no matches for %v", e.PartialResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*NoResourceMatchError) Is(target error) bool {
|
||||||
|
_, ok := target.(*NoResourceMatchError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoKindMatchError is returned if the RESTMapper can't find any match for a kind
|
||||||
|
type NoKindMatchError struct {
|
||||||
|
// GroupKind is the API group and kind that was searched
|
||||||
|
GroupKind schema.GroupKind
|
||||||
|
// SearchedVersions is the optional list of versions the search was restricted to
|
||||||
|
SearchedVersions []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *NoKindMatchError) Error() string {
|
||||||
|
searchedVersions := sets.NewString()
|
||||||
|
for _, v := range e.SearchedVersions {
|
||||||
|
searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(searchedVersions) {
|
||||||
|
case 0:
|
||||||
|
return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group)
|
||||||
|
case 1:
|
||||||
|
return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0])
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*NoKindMatchError) Is(target error) bool {
|
||||||
|
_, ok := target.(*NoKindMatchError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNoMatchError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return errors.Is(err, &NoResourceMatchError{}) || errors.Is(err, &NoKindMatchError{})
|
||||||
|
}
|
105
vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
generated
vendored
Normal file
105
vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ ResettableRESTMapper = &FirstHitRESTMapper{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the
|
||||||
|
// first successful result for the singular requests
|
||||||
|
type FirstHitRESTMapper struct {
|
||||||
|
MultiRESTMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m FirstHitRESTMapper) String() string {
|
||||||
|
return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
|
||||||
|
errors := []error{}
|
||||||
|
for _, t := range m.MultiRESTMapper {
|
||||||
|
ret, err := t.ResourceFor(resource)
|
||||||
|
if err == nil {
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionResource{}, collapseAggregateErrors(errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
|
||||||
|
errors := []error{}
|
||||||
|
for _, t := range m.MultiRESTMapper {
|
||||||
|
ret, err := t.KindFor(resource)
|
||||||
|
if err == nil {
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionKind{}, collapseAggregateErrors(errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapping provides the REST mapping for the resource based on the
|
||||||
|
// kind and version. This implementation supports multiple REST schemas and
|
||||||
|
// return the first match.
|
||||||
|
func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
|
||||||
|
errors := []error{}
|
||||||
|
for _, t := range m.MultiRESTMapper {
|
||||||
|
ret, err := t.RESTMapping(gk, versions...)
|
||||||
|
if err == nil {
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
errors = append(errors, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, collapseAggregateErrors(errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m FirstHitRESTMapper) Reset() {
|
||||||
|
m.MultiRESTMapper.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list
|
||||||
|
// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same)
|
||||||
|
func collapseAggregateErrors(errors []error) error {
|
||||||
|
if len(errors) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(errors) == 1 {
|
||||||
|
return errors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
allNoMatchErrors := true
|
||||||
|
for _, err := range errors {
|
||||||
|
allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err)
|
||||||
|
}
|
||||||
|
if allNoMatchErrors {
|
||||||
|
return errors[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return utilerrors.NewAggregate(errors)
|
||||||
|
}
|
331
vendor/k8s.io/apimachinery/pkg/api/meta/help.go
generated
vendored
Normal file
331
vendor/k8s.io/apimachinery/pkg/api/meta/help.go
generated
vendored
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/conversion"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// isListCache maintains a cache of types that are checked for lists
|
||||||
|
// which is used by IsListType.
|
||||||
|
// TODO: remove and replace with an interface check
|
||||||
|
isListCache = struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
byType map[reflect.Type]bool
|
||||||
|
}{
|
||||||
|
byType: make(map[reflect.Type]bool, 1024),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsListType returns true if the provided Object has a slice called Items.
|
||||||
|
// TODO: Replace the code in this check with an interface comparison by
|
||||||
|
// creating and enforcing that lists implement a list accessor.
|
||||||
|
func IsListType(obj runtime.Object) bool {
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case runtime.Unstructured:
|
||||||
|
return t.IsList()
|
||||||
|
}
|
||||||
|
t := reflect.TypeOf(obj)
|
||||||
|
|
||||||
|
isListCache.lock.RLock()
|
||||||
|
ok, exists := isListCache.byType[t]
|
||||||
|
isListCache.lock.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
_, err := getItemsPtr(obj)
|
||||||
|
ok = err == nil
|
||||||
|
|
||||||
|
// cache only the first 1024 types
|
||||||
|
isListCache.lock.Lock()
|
||||||
|
if len(isListCache.byType) < 1024 {
|
||||||
|
isListCache.byType[t] = ok
|
||||||
|
}
|
||||||
|
isListCache.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errExpectFieldItems = errors.New("no Items field in this object")
|
||||||
|
errExpectSliceItems = errors.New("Items field must be a slice of objects")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetItemsPtr returns a pointer to the list object's Items member.
|
||||||
|
// If 'list' doesn't have an Items member, it's not really a list type
|
||||||
|
// and an error will be returned.
|
||||||
|
// This function will either return a pointer to a slice, or an error, but not both.
|
||||||
|
// TODO: this will be replaced with an interface in the future
|
||||||
|
func GetItemsPtr(list runtime.Object) (interface{}, error) {
|
||||||
|
obj, err := getItemsPtr(list)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%T is not a list: %v", list, err)
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getItemsPtr returns a pointer to the list object's Items member or an error.
|
||||||
|
func getItemsPtr(list runtime.Object) (interface{}, error) {
|
||||||
|
v, err := conversion.EnforcePtr(list)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
items := v.FieldByName("Items")
|
||||||
|
if !items.IsValid() {
|
||||||
|
return nil, errExpectFieldItems
|
||||||
|
}
|
||||||
|
switch items.Kind() {
|
||||||
|
case reflect.Interface, reflect.Pointer:
|
||||||
|
target := reflect.TypeOf(items.Interface()).Elem()
|
||||||
|
if target.Kind() != reflect.Slice {
|
||||||
|
return nil, errExpectSliceItems
|
||||||
|
}
|
||||||
|
return items.Interface(), nil
|
||||||
|
case reflect.Slice:
|
||||||
|
return items.Addr().Interface(), nil
|
||||||
|
default:
|
||||||
|
return nil, errExpectSliceItems
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates
|
||||||
|
// the loop.
|
||||||
|
//
|
||||||
|
// If items passed to fn are retained for different durations, and you want to avoid
|
||||||
|
// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead.
|
||||||
|
func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
|
||||||
|
return eachListItem(obj, fn, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj.
|
||||||
|
// It does this by making a shallow copy of non-pointer items in obj.
|
||||||
|
//
|
||||||
|
// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
|
||||||
|
func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error {
|
||||||
|
return eachListItem(obj, fn, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
|
||||||
|
func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error {
|
||||||
|
if unstructured, ok := obj.(runtime.Unstructured); ok {
|
||||||
|
if allocNew {
|
||||||
|
return unstructured.EachListItemWithAlloc(fn)
|
||||||
|
}
|
||||||
|
return unstructured.EachListItem(fn)
|
||||||
|
}
|
||||||
|
// TODO: Change to an interface call?
|
||||||
|
itemsPtr, err := GetItemsPtr(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
items, err := conversion.EnforcePtr(itemsPtr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
len := items.Len()
|
||||||
|
if len == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
takeAddr := false
|
||||||
|
if elemType := items.Type().Elem(); elemType.Kind() != reflect.Pointer && elemType.Kind() != reflect.Interface {
|
||||||
|
if !items.Index(0).CanAddr() {
|
||||||
|
return fmt.Errorf("unable to take address of items in %T for EachListItem", obj)
|
||||||
|
}
|
||||||
|
takeAddr = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len; i++ {
|
||||||
|
raw := items.Index(i)
|
||||||
|
if takeAddr {
|
||||||
|
if allocNew {
|
||||||
|
// shallow copy to avoid retaining a reference to the original list item
|
||||||
|
itemCopy := reflect.New(raw.Type())
|
||||||
|
// assign to itemCopy and type-assert
|
||||||
|
itemCopy.Elem().Set(raw)
|
||||||
|
// reflect.New will guarantee that itemCopy must be a pointer.
|
||||||
|
raw = itemCopy
|
||||||
|
} else {
|
||||||
|
raw = raw.Addr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// raw must be a pointer or an interface
|
||||||
|
// allocate a pointer is cheap
|
||||||
|
switch item := raw.Interface().(type) {
|
||||||
|
case *runtime.RawExtension:
|
||||||
|
if err := fn(item.Object); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case runtime.Object:
|
||||||
|
if err := fn(item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
obj, ok := item.(runtime.Object)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
|
||||||
|
}
|
||||||
|
if err := fn(obj); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractList returns obj's Items element as an array of runtime.Objects.
|
||||||
|
// Returns an error if obj is not a List type (does not have an Items member).
|
||||||
|
//
|
||||||
|
// If items in the returned list are retained for different durations, and you want to avoid
|
||||||
|
// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead.
|
||||||
|
func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
|
||||||
|
return extractList(obj, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj.
|
||||||
|
// It does this by making a shallow copy of non-pointer items in obj.
|
||||||
|
//
|
||||||
|
// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency.
|
||||||
|
func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) {
|
||||||
|
return extractList(obj, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
|
||||||
|
func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) {
|
||||||
|
itemsPtr, err := GetItemsPtr(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
items, err := conversion.EnforcePtr(itemsPtr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
list := make([]runtime.Object, items.Len())
|
||||||
|
if len(list) == 0 {
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
elemType := items.Type().Elem()
|
||||||
|
isRawExtension := elemType == rawExtensionObjectType
|
||||||
|
implementsObject := elemType.Implements(objectType)
|
||||||
|
for i := range list {
|
||||||
|
raw := items.Index(i)
|
||||||
|
switch {
|
||||||
|
case isRawExtension:
|
||||||
|
item := raw.Interface().(runtime.RawExtension)
|
||||||
|
switch {
|
||||||
|
case item.Object != nil:
|
||||||
|
list[i] = item.Object
|
||||||
|
case item.Raw != nil:
|
||||||
|
// TODO: Set ContentEncoding and ContentType correctly.
|
||||||
|
list[i] = &runtime.Unknown{Raw: item.Raw}
|
||||||
|
default:
|
||||||
|
list[i] = nil
|
||||||
|
}
|
||||||
|
case implementsObject:
|
||||||
|
list[i] = raw.Interface().(runtime.Object)
|
||||||
|
case allocNew:
|
||||||
|
// shallow copy to avoid retaining a reference to the original list item
|
||||||
|
itemCopy := reflect.New(raw.Type())
|
||||||
|
// assign to itemCopy and type-assert
|
||||||
|
itemCopy.Elem().Set(raw)
|
||||||
|
var ok bool
|
||||||
|
// reflect.New will guarantee that itemCopy must be a pointer.
|
||||||
|
if list[i], ok = itemCopy.Interface().(runtime.Object); !ok {
|
||||||
|
return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
var found bool
|
||||||
|
if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
|
||||||
|
return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return list, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// objectSliceType is the type of a slice of Objects
|
||||||
|
objectSliceType = reflect.TypeOf([]runtime.Object{})
|
||||||
|
objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem()
|
||||||
|
rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{})
|
||||||
|
)
|
||||||
|
|
||||||
|
// LenList returns the length of this list or 0 if it is not a list.
|
||||||
|
func LenList(list runtime.Object) int {
|
||||||
|
itemsPtr, err := GetItemsPtr(list)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
items, err := conversion.EnforcePtr(itemsPtr)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return items.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetList sets the given list object's Items member have the elements given in
|
||||||
|
// objects.
|
||||||
|
// Returns an error if list is not a List type (does not have an Items member),
|
||||||
|
// or if any of the objects are not of the right type.
|
||||||
|
func SetList(list runtime.Object, objects []runtime.Object) error {
|
||||||
|
itemsPtr, err := GetItemsPtr(list)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
items, err := conversion.EnforcePtr(itemsPtr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if items.Type() == objectSliceType {
|
||||||
|
items.Set(reflect.ValueOf(objects))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
|
||||||
|
for i := range objects {
|
||||||
|
dest := slice.Index(i)
|
||||||
|
if dest.Type() == rawExtensionObjectType {
|
||||||
|
dest = dest.FieldByName("Object")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check to see if you're directly assignable
|
||||||
|
if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) {
|
||||||
|
dest.Set(reflect.ValueOf(objects[i]))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := conversion.EnforcePtr(objects[i])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if src.Type().AssignableTo(dest.Type()) {
|
||||||
|
dest.Set(src)
|
||||||
|
} else if src.Type().ConvertibleTo(dest.Type()) {
|
||||||
|
dest.Set(src.Convert(dest.Type()))
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
items.Set(slice)
|
||||||
|
return nil
|
||||||
|
}
|
143
vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
generated
vendored
Normal file
143
vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ListMetaAccessor interface {
|
||||||
|
GetListMeta() List
|
||||||
|
}
|
||||||
|
|
||||||
|
// List lets you work with list metadata from any of the versioned or
|
||||||
|
// internal API objects. Attempting to set or retrieve a field on an object that does
|
||||||
|
// not support that field will be a no-op and return a default value.
|
||||||
|
type List metav1.ListInterface
|
||||||
|
|
||||||
|
// Type exposes the type and APIVersion of versioned or internal API objects.
|
||||||
|
type Type metav1.Type
|
||||||
|
|
||||||
|
// MetadataAccessor lets you work with object and list metadata from any of the versioned or
|
||||||
|
// internal API objects. Attempting to set or retrieve a field on an object that does
|
||||||
|
// not support that field (Name, UID, Namespace on lists) will be a no-op and return
|
||||||
|
// a default value.
|
||||||
|
//
|
||||||
|
// MetadataAccessor exposes Interface in a way that can be used with multiple objects.
|
||||||
|
type MetadataAccessor interface {
|
||||||
|
APIVersion(obj runtime.Object) (string, error)
|
||||||
|
SetAPIVersion(obj runtime.Object, version string) error
|
||||||
|
|
||||||
|
Kind(obj runtime.Object) (string, error)
|
||||||
|
SetKind(obj runtime.Object, kind string) error
|
||||||
|
|
||||||
|
Namespace(obj runtime.Object) (string, error)
|
||||||
|
SetNamespace(obj runtime.Object, namespace string) error
|
||||||
|
|
||||||
|
Name(obj runtime.Object) (string, error)
|
||||||
|
SetName(obj runtime.Object, name string) error
|
||||||
|
|
||||||
|
GenerateName(obj runtime.Object) (string, error)
|
||||||
|
SetGenerateName(obj runtime.Object, name string) error
|
||||||
|
|
||||||
|
UID(obj runtime.Object) (types.UID, error)
|
||||||
|
SetUID(obj runtime.Object, uid types.UID) error
|
||||||
|
|
||||||
|
SelfLink(obj runtime.Object) (string, error)
|
||||||
|
SetSelfLink(obj runtime.Object, selfLink string) error
|
||||||
|
|
||||||
|
Labels(obj runtime.Object) (map[string]string, error)
|
||||||
|
SetLabels(obj runtime.Object, labels map[string]string) error
|
||||||
|
|
||||||
|
Annotations(obj runtime.Object) (map[string]string, error)
|
||||||
|
SetAnnotations(obj runtime.Object, annotations map[string]string) error
|
||||||
|
|
||||||
|
Continue(obj runtime.Object) (string, error)
|
||||||
|
SetContinue(obj runtime.Object, c string) error
|
||||||
|
|
||||||
|
runtime.ResourceVersioner
|
||||||
|
}
|
||||||
|
|
||||||
|
type RESTScopeName string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RESTScopeNameNamespace RESTScopeName = "namespace"
|
||||||
|
RESTScopeNameRoot RESTScopeName = "root"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy
|
||||||
|
type RESTScope interface {
|
||||||
|
// Name of the scope
|
||||||
|
Name() RESTScopeName
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapping contains the information needed to deal with objects of a specific
|
||||||
|
// resource and kind in a RESTful manner.
|
||||||
|
type RESTMapping struct {
|
||||||
|
// Resource is the GroupVersionResource (location) for this endpoint
|
||||||
|
Resource schema.GroupVersionResource
|
||||||
|
|
||||||
|
// GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint
|
||||||
|
GroupVersionKind schema.GroupVersionKind
|
||||||
|
|
||||||
|
// Scope contains the information needed to deal with REST Resources that are in a resource hierarchy
|
||||||
|
Scope RESTScope
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapper allows clients to map resources to kind, and map kind and version
|
||||||
|
// to interfaces for manipulating those objects. It is primarily intended for
|
||||||
|
// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md.
|
||||||
|
//
|
||||||
|
// The Kubernetes API provides versioned resources and object kinds which are scoped
|
||||||
|
// to API groups. In other words, kinds and resources should not be assumed to be
|
||||||
|
// unique across groups.
|
||||||
|
//
|
||||||
|
// TODO: split into sub-interfaces
|
||||||
|
type RESTMapper interface {
|
||||||
|
// KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches
|
||||||
|
KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error)
|
||||||
|
|
||||||
|
// KindsFor takes a partial resource and returns the list of potential kinds in priority order
|
||||||
|
KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error)
|
||||||
|
|
||||||
|
// ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches
|
||||||
|
ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error)
|
||||||
|
|
||||||
|
// ResourcesFor takes a partial resource and returns the list of potential resource in priority order
|
||||||
|
ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error)
|
||||||
|
|
||||||
|
// RESTMapping identifies a preferred resource mapping for the provided group kind.
|
||||||
|
RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error)
|
||||||
|
// RESTMappings returns all resource mappings for the provided group kind if no
|
||||||
|
// version search is provided. Otherwise identifies a preferred resource mapping for
|
||||||
|
// the provided version(s).
|
||||||
|
RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error)
|
||||||
|
|
||||||
|
ResourceSingularizer(resource string) (singular string, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResettableRESTMapper is a RESTMapper which is capable of resetting itself
|
||||||
|
// from discovery.
|
||||||
|
// All rest mappers that delegate to other rest mappers must implement this interface and dynamically
|
||||||
|
// check if the delegate mapper supports the Reset() operation.
|
||||||
|
type ResettableRESTMapper interface {
|
||||||
|
RESTMapper
|
||||||
|
Reset()
|
||||||
|
}
|
112
vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
generated
vendored
Normal file
112
vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lazyObject defers loading the mapper and typer until necessary.
|
||||||
|
type lazyObject struct {
|
||||||
|
loader func() (RESTMapper, error)
|
||||||
|
|
||||||
|
lock sync.Mutex
|
||||||
|
loaded bool
|
||||||
|
err error
|
||||||
|
mapper RESTMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLazyRESTMapperLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by
|
||||||
|
// returning those initialization errors when the interface methods are invoked. This defers the
|
||||||
|
// initialization and any server calls until a client actually needs to perform the action.
|
||||||
|
func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper {
|
||||||
|
obj := &lazyObject{loader: fn}
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// init lazily loads the mapper and typer, returning an error if initialization has failed.
|
||||||
|
func (o *lazyObject) init() error {
|
||||||
|
o.lock.Lock()
|
||||||
|
defer o.lock.Unlock()
|
||||||
|
if o.loaded {
|
||||||
|
return o.err
|
||||||
|
}
|
||||||
|
o.mapper, o.err = o.loader()
|
||||||
|
o.loaded = true
|
||||||
|
return o.err
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ResettableRESTMapper = &lazyObject{}
|
||||||
|
|
||||||
|
func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return schema.GroupVersionKind{}, err
|
||||||
|
}
|
||||||
|
return o.mapper.KindFor(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return []schema.GroupVersionKind{}, err
|
||||||
|
}
|
||||||
|
return o.mapper.KindsFor(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return schema.GroupVersionResource{}, err
|
||||||
|
}
|
||||||
|
return o.mapper.ResourceFor(input)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return []schema.GroupVersionResource{}, err
|
||||||
|
}
|
||||||
|
return o.mapper.ResourcesFor(input)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o.mapper.RESTMapping(gk, versions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return o.mapper.RESTMappings(gk, versions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) {
|
||||||
|
if err := o.init(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return o.mapper.ResourceSingularizer(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *lazyObject) Reset() {
|
||||||
|
o.lock.Lock()
|
||||||
|
defer o.lock.Unlock()
|
||||||
|
if o.loaded && o.err == nil {
|
||||||
|
MaybeResetRESTMapper(o.mapper)
|
||||||
|
}
|
||||||
|
}
|
643
vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
generated
vendored
Normal file
643
vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
generated
vendored
Normal file
@ -0,0 +1,643 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/conversion"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// errNotList is returned when an object implements the Object style interfaces but not the List style
|
||||||
|
// interfaces.
|
||||||
|
var errNotList = fmt.Errorf("object does not implement the List interfaces")
|
||||||
|
|
||||||
|
var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink")
|
||||||
|
|
||||||
|
// CommonAccessor returns a Common interface for the provided object or an error if the object does
|
||||||
|
// not provide List.
|
||||||
|
func CommonAccessor(obj interface{}) (metav1.Common, error) {
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case List:
|
||||||
|
return t, nil
|
||||||
|
case ListMetaAccessor:
|
||||||
|
if m := t.GetListMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotCommon
|
||||||
|
case metav1.ListMetaAccessor:
|
||||||
|
if m := t.GetListMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotCommon
|
||||||
|
case metav1.Object:
|
||||||
|
return t, nil
|
||||||
|
case metav1.ObjectMetaAccessor:
|
||||||
|
if m := t.GetObjectMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotCommon
|
||||||
|
default:
|
||||||
|
return nil, errNotCommon
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAccessor returns a List interface for the provided object or an error if the object does
|
||||||
|
// not provide List.
|
||||||
|
// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an
|
||||||
|
// object *is* a List.
|
||||||
|
func ListAccessor(obj interface{}) (List, error) {
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case List:
|
||||||
|
return t, nil
|
||||||
|
case ListMetaAccessor:
|
||||||
|
if m := t.GetListMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotList
|
||||||
|
case metav1.ListMetaAccessor:
|
||||||
|
if m := t.GetListMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotList
|
||||||
|
default:
|
||||||
|
return nil, errNotList
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// errNotObject is returned when an object implements the List style interfaces but not the Object style
|
||||||
|
// interfaces.
|
||||||
|
var errNotObject = fmt.Errorf("object does not implement the Object interfaces")
|
||||||
|
|
||||||
|
// Accessor takes an arbitrary object pointer and returns meta.Interface.
|
||||||
|
// obj must be a pointer to an API type. An error is returned if the minimum
|
||||||
|
// required fields are missing. Fields that are not required return the default
|
||||||
|
// value and are a no-op if set.
|
||||||
|
func Accessor(obj interface{}) (metav1.Object, error) {
|
||||||
|
switch t := obj.(type) {
|
||||||
|
case metav1.Object:
|
||||||
|
return t, nil
|
||||||
|
case metav1.ObjectMetaAccessor:
|
||||||
|
if m := t.GetObjectMeta(); m != nil {
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
return nil, errNotObject
|
||||||
|
default:
|
||||||
|
return nil, errNotObject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsPartialObjectMetadata takes the metav1 interface and returns a partial object.
|
||||||
|
// TODO: consider making this solely a conversion action.
|
||||||
|
func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata {
|
||||||
|
switch t := m.(type) {
|
||||||
|
case *metav1.ObjectMeta:
|
||||||
|
return &metav1.PartialObjectMetadata{ObjectMeta: *t}
|
||||||
|
default:
|
||||||
|
return &metav1.PartialObjectMetadata{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: m.GetName(),
|
||||||
|
GenerateName: m.GetGenerateName(),
|
||||||
|
Namespace: m.GetNamespace(),
|
||||||
|
SelfLink: m.GetSelfLink(),
|
||||||
|
UID: m.GetUID(),
|
||||||
|
ResourceVersion: m.GetResourceVersion(),
|
||||||
|
Generation: m.GetGeneration(),
|
||||||
|
CreationTimestamp: m.GetCreationTimestamp(),
|
||||||
|
DeletionTimestamp: m.GetDeletionTimestamp(),
|
||||||
|
DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(),
|
||||||
|
Labels: m.GetLabels(),
|
||||||
|
Annotations: m.GetAnnotations(),
|
||||||
|
OwnerReferences: m.GetOwnerReferences(),
|
||||||
|
Finalizers: m.GetFinalizers(),
|
||||||
|
ManagedFields: m.GetManagedFields(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
|
||||||
|
// and Kind of an in-memory internal object.
|
||||||
|
// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
|
||||||
|
// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
|
||||||
|
// api conventions).
|
||||||
|
func TypeAccessor(obj interface{}) (Type, error) {
|
||||||
|
if typed, ok := obj.(runtime.Object); ok {
|
||||||
|
return objectAccessor{typed}, nil
|
||||||
|
}
|
||||||
|
v, err := conversion.EnforcePtr(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t := v.Type()
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
typeMeta := v.FieldByName("TypeMeta")
|
||||||
|
if !typeMeta.IsValid() {
|
||||||
|
return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t)
|
||||||
|
}
|
||||||
|
a := &genericAccessor{}
|
||||||
|
if err := extractFromTypeMeta(typeMeta, a); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err)
|
||||||
|
}
|
||||||
|
return a, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type objectAccessor struct {
|
||||||
|
runtime.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj objectAccessor) GetKind() string {
|
||||||
|
return obj.GetObjectKind().GroupVersionKind().Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj objectAccessor) SetKind(kind string) {
|
||||||
|
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||||
|
gvk.Kind = kind
|
||||||
|
obj.GetObjectKind().SetGroupVersionKind(gvk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj objectAccessor) GetAPIVersion() string {
|
||||||
|
return obj.GetObjectKind().GroupVersionKind().GroupVersion().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (obj objectAccessor) SetAPIVersion(version string) {
|
||||||
|
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||||
|
gv, err := schema.ParseGroupVersion(version)
|
||||||
|
if err != nil {
|
||||||
|
gv = schema.GroupVersion{Version: version}
|
||||||
|
}
|
||||||
|
gvk.Group, gvk.Version = gv.Group, gv.Version
|
||||||
|
obj.GetObjectKind().SetGroupVersionKind(gvk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAccessor returns a MetadataAccessor that can retrieve
|
||||||
|
// or manipulate resource version on objects derived from core API
|
||||||
|
// metadata concepts.
|
||||||
|
func NewAccessor() MetadataAccessor {
|
||||||
|
return resourceAccessor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// resourceAccessor implements ResourceVersioner and SelfLinker.
|
||||||
|
type resourceAccessor struct{}
|
||||||
|
|
||||||
|
func (resourceAccessor) Kind(obj runtime.Object) (string, error) {
|
||||||
|
return objectAccessor{obj}.GetKind(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetKind(obj runtime.Object, kind string) error {
|
||||||
|
objectAccessor{obj}.SetKind(kind)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) {
|
||||||
|
return objectAccessor{obj}.GetAPIVersion(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error {
|
||||||
|
objectAccessor{obj}.SetAPIVersion(version)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) Namespace(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetNamespace(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetNamespace(namespace)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) Name(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetName(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetName(obj runtime.Object, name string) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetName(name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetGenerateName(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetGenerateName(name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetUID(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetUID(uid)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := CommonAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetSelfLink(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error {
|
||||||
|
accessor, err := CommonAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetSelfLink(selfLink)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return accessor.GetLabels(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetLabels(labels)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return accessor.GetAnnotations(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error {
|
||||||
|
accessor, err := Accessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetAnnotations(annotations)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := CommonAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetResourceVersion(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error {
|
||||||
|
accessor, err := CommonAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetResourceVersion(version)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) Continue(obj runtime.Object) (string, error) {
|
||||||
|
accessor, err := ListAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return accessor.GetContinue(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resourceAccessor) SetContinue(obj runtime.Object, version string) error {
|
||||||
|
accessor, err := ListAccessor(obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
accessor.SetContinue(version)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
|
||||||
|
func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
|
||||||
|
if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.Field(v, "Name", &o.Name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.Field(v, "UID", &o.UID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var controllerPtr *bool
|
||||||
|
if err := runtime.Field(v, "Controller", &controllerPtr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if controllerPtr != nil {
|
||||||
|
controller := *controllerPtr
|
||||||
|
o.Controller = &controller
|
||||||
|
}
|
||||||
|
var blockOwnerDeletionPtr *bool
|
||||||
|
if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if blockOwnerDeletionPtr != nil {
|
||||||
|
block := *blockOwnerDeletionPtr
|
||||||
|
o.BlockOwnerDeletion = &block
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
|
||||||
|
func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
|
||||||
|
if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.SetField(o.Name, v, "Name"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.SetField(o.UID, v, "UID"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if o.Controller != nil {
|
||||||
|
controller := *(o.Controller)
|
||||||
|
if err := runtime.SetField(&controller, v, "Controller"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if o.BlockOwnerDeletion != nil {
|
||||||
|
block := *(o.BlockOwnerDeletion)
|
||||||
|
if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// genericAccessor contains pointers to strings that can modify an arbitrary
|
||||||
|
// struct and implements the Accessor interface.
|
||||||
|
type genericAccessor struct {
|
||||||
|
namespace *string
|
||||||
|
name *string
|
||||||
|
generateName *string
|
||||||
|
uid *types.UID
|
||||||
|
apiVersion *string
|
||||||
|
kind *string
|
||||||
|
resourceVersion *string
|
||||||
|
selfLink *string
|
||||||
|
creationTimestamp *metav1.Time
|
||||||
|
deletionTimestamp **metav1.Time
|
||||||
|
labels *map[string]string
|
||||||
|
annotations *map[string]string
|
||||||
|
ownerReferences reflect.Value
|
||||||
|
finalizers *[]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetNamespace() string {
|
||||||
|
if a.namespace == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *a.namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetNamespace(namespace string) {
|
||||||
|
if a.namespace == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*a.namespace = namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetName() string {
|
||||||
|
if a.name == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *a.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetName(name string) {
|
||||||
|
if a.name == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*a.name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetGenerateName() string {
|
||||||
|
if a.generateName == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *a.generateName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetGenerateName(generateName string) {
|
||||||
|
if a.generateName == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*a.generateName = generateName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetUID() types.UID {
|
||||||
|
if a.uid == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *a.uid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetUID(uid types.UID) {
|
||||||
|
if a.uid == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
*a.uid = uid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetAPIVersion() string {
|
||||||
|
return *a.apiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetAPIVersion(version string) {
|
||||||
|
*a.apiVersion = version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetKind() string {
|
||||||
|
return *a.kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetKind(kind string) {
|
||||||
|
*a.kind = kind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetResourceVersion() string {
|
||||||
|
return *a.resourceVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetResourceVersion(version string) {
|
||||||
|
*a.resourceVersion = version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetSelfLink() string {
|
||||||
|
return *a.selfLink
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetSelfLink(selfLink string) {
|
||||||
|
*a.selfLink = selfLink
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetCreationTimestamp() metav1.Time {
|
||||||
|
return *a.creationTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) {
|
||||||
|
*a.creationTimestamp = timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetDeletionTimestamp() *metav1.Time {
|
||||||
|
return *a.deletionTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) {
|
||||||
|
*a.deletionTimestamp = timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetLabels() map[string]string {
|
||||||
|
if a.labels == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *a.labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetLabels(labels map[string]string) {
|
||||||
|
*a.labels = labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetAnnotations() map[string]string {
|
||||||
|
if a.annotations == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *a.annotations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetAnnotations(annotations map[string]string) {
|
||||||
|
if a.annotations == nil {
|
||||||
|
emptyAnnotations := make(map[string]string)
|
||||||
|
a.annotations = &emptyAnnotations
|
||||||
|
}
|
||||||
|
*a.annotations = annotations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetFinalizers() []string {
|
||||||
|
if a.finalizers == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return *a.finalizers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetFinalizers(finalizers []string) {
|
||||||
|
*a.finalizers = finalizers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
|
||||||
|
var ret []metav1.OwnerReference
|
||||||
|
s := a.ownerReferences
|
||||||
|
if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice {
|
||||||
|
klog.Errorf("expect %v to be a pointer to slice", s)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
s = s.Elem()
|
||||||
|
// Set the capacity to one element greater to avoid copy if the caller later append an element.
|
||||||
|
ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1)
|
||||||
|
for i := 0; i < s.Len(); i++ {
|
||||||
|
if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
|
||||||
|
klog.Errorf("extractFromOwnerReference failed: %v", err)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) {
|
||||||
|
s := a.ownerReferences
|
||||||
|
if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice {
|
||||||
|
klog.Errorf("expect %v to be a pointer to slice", s)
|
||||||
|
}
|
||||||
|
s = s.Elem()
|
||||||
|
newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
|
||||||
|
for i := 0; i < len(references); i++ {
|
||||||
|
if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
|
||||||
|
klog.Errorf("setOwnerReference failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Set(newReferences)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractFromTypeMeta extracts pointers to version and kind fields from an object
|
||||||
|
func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
|
||||||
|
if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
220
vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
generated
vendored
Normal file
220
vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ ResettableRESTMapper = MultiRESTMapper{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MultiRESTMapper is a wrapper for multiple RESTMappers.
|
||||||
|
type MultiRESTMapper []RESTMapper
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) String() string {
|
||||||
|
nested := make([]string, 0, len(m))
|
||||||
|
for _, t := range m {
|
||||||
|
currString := fmt.Sprintf("%v", t)
|
||||||
|
splitStrings := strings.Split(currString, "\n")
|
||||||
|
nested = append(nested, strings.Join(splitStrings, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod)
|
||||||
|
// This implementation supports multiple REST schemas and return the first match.
|
||||||
|
func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
|
||||||
|
for _, t := range m {
|
||||||
|
singular, err = t.ResourceSingularizer(resource)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
|
||||||
|
allGVRs := []schema.GroupVersionResource{}
|
||||||
|
for _, t := range m {
|
||||||
|
gvrs, err := t.ResourcesFor(resource)
|
||||||
|
// ignore "no match" errors, but any other error percolates back up
|
||||||
|
if IsNoMatchError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the existing values to de-dup
|
||||||
|
for _, curr := range gvrs {
|
||||||
|
found := false
|
||||||
|
for _, existing := range allGVRs {
|
||||||
|
if curr == existing {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
allGVRs = append(allGVRs, curr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(allGVRs) == 0 {
|
||||||
|
return nil, &NoResourceMatchError{PartialResource: resource}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allGVRs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
|
||||||
|
allGVKs := []schema.GroupVersionKind{}
|
||||||
|
for _, t := range m {
|
||||||
|
gvks, err := t.KindsFor(resource)
|
||||||
|
// ignore "no match" errors, but any other error percolates back up
|
||||||
|
if IsNoMatchError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// walk the existing values to de-dup
|
||||||
|
for _, curr := range gvks {
|
||||||
|
found := false
|
||||||
|
for _, existing := range allGVKs {
|
||||||
|
if curr == existing {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
allGVKs = append(allGVKs, curr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(allGVKs) == 0 {
|
||||||
|
return nil, &NoResourceMatchError{PartialResource: resource}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allGVKs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
|
||||||
|
resources, err := m.ResourcesFor(resource)
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionResource{}, err
|
||||||
|
}
|
||||||
|
if len(resources) == 1 {
|
||||||
|
return resources[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
|
||||||
|
kinds, err := m.KindsFor(resource)
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionKind{}, err
|
||||||
|
}
|
||||||
|
if len(kinds) == 1 {
|
||||||
|
return kinds[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapping provides the REST mapping for the resource based on the
|
||||||
|
// kind and version. This implementation supports multiple REST schemas and
|
||||||
|
// return the first match.
|
||||||
|
func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
|
||||||
|
allMappings := []*RESTMapping{}
|
||||||
|
errors := []error{}
|
||||||
|
|
||||||
|
for _, t := range m {
|
||||||
|
currMapping, err := t.RESTMapping(gk, versions...)
|
||||||
|
// ignore "no match" errors, but any other error percolates back up
|
||||||
|
if IsNoMatchError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
allMappings = append(allMappings, currMapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got exactly one mapping, then use it even if other requested failed
|
||||||
|
if len(allMappings) == 1 {
|
||||||
|
return allMappings[0], nil
|
||||||
|
}
|
||||||
|
if len(allMappings) > 1 {
|
||||||
|
var kinds []schema.GroupVersionKind
|
||||||
|
for _, m := range allMappings {
|
||||||
|
kinds = append(kinds, m.GroupVersionKind)
|
||||||
|
}
|
||||||
|
return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
|
||||||
|
}
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return nil, utilerrors.NewAggregate(errors)
|
||||||
|
}
|
||||||
|
return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMappings returns all possible RESTMappings for the provided group kind, or an error
|
||||||
|
// if the type is not recognized.
|
||||||
|
func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
|
||||||
|
var allMappings []*RESTMapping
|
||||||
|
var errors []error
|
||||||
|
|
||||||
|
for _, t := range m {
|
||||||
|
currMappings, err := t.RESTMappings(gk, versions...)
|
||||||
|
// ignore "no match" errors, but any other error percolates back up
|
||||||
|
if IsNoMatchError(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
allMappings = append(allMappings, currMappings...)
|
||||||
|
}
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return nil, utilerrors.NewAggregate(errors)
|
||||||
|
}
|
||||||
|
if len(allMappings) == 0 {
|
||||||
|
return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
|
||||||
|
}
|
||||||
|
return allMappings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MultiRESTMapper) Reset() {
|
||||||
|
for _, t := range m {
|
||||||
|
MaybeResetRESTMapper(t)
|
||||||
|
}
|
||||||
|
}
|
230
vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
generated
vendored
Normal file
230
vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AnyGroup = "*"
|
||||||
|
AnyVersion = "*"
|
||||||
|
AnyResource = "*"
|
||||||
|
AnyKind = "*"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ ResettableRESTMapper = PriorityRESTMapper{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind
|
||||||
|
// when multiple matches are possible
|
||||||
|
type PriorityRESTMapper struct {
|
||||||
|
// Delegate is the RESTMapper to use to locate all the Kind and Resource matches
|
||||||
|
Delegate RESTMapper
|
||||||
|
|
||||||
|
// ResourcePriority is a list of priority patterns to apply to matching resources.
|
||||||
|
// The list of all matching resources is narrowed based on the patterns until only one remains.
|
||||||
|
// A pattern with no matches is skipped. A pattern with more than one match uses its
|
||||||
|
// matches as the list to continue matching against.
|
||||||
|
ResourcePriority []schema.GroupVersionResource
|
||||||
|
|
||||||
|
// KindPriority is a list of priority patterns to apply to matching kinds.
|
||||||
|
// The list of all matching kinds is narrowed based on the patterns until only one remains.
|
||||||
|
// A pattern with no matches is skipped. A pattern with more than one match uses its
|
||||||
|
// matches as the list to continue matching against.
|
||||||
|
KindPriority []schema.GroupVersionKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) String() string {
|
||||||
|
return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit.
|
||||||
|
func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
|
||||||
|
originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource)
|
||||||
|
if originalErr != nil && len(originalGVRs) == 0 {
|
||||||
|
return schema.GroupVersionResource{}, originalErr
|
||||||
|
}
|
||||||
|
if len(originalGVRs) == 1 {
|
||||||
|
return originalGVRs[0], originalErr
|
||||||
|
}
|
||||||
|
|
||||||
|
remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...)
|
||||||
|
for _, pattern := range m.ResourcePriority {
|
||||||
|
matchedGVRs := []schema.GroupVersionResource{}
|
||||||
|
for _, gvr := range remainingGVRs {
|
||||||
|
if resourceMatches(pattern, gvr) {
|
||||||
|
matchedGVRs = append(matchedGVRs, gvr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(matchedGVRs) {
|
||||||
|
case 0:
|
||||||
|
// if you have no matches, then nothing matched this pattern just move to the next
|
||||||
|
continue
|
||||||
|
case 1:
|
||||||
|
// one match, return
|
||||||
|
return matchedGVRs[0], originalErr
|
||||||
|
default:
|
||||||
|
// more than one match, use the matched hits as the list moving to the next pattern.
|
||||||
|
// this way you can have a series of selection criteria
|
||||||
|
remainingGVRs = matchedGVRs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs}
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit.
|
||||||
|
func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
|
||||||
|
originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource)
|
||||||
|
if originalErr != nil && len(originalGVKs) == 0 {
|
||||||
|
return schema.GroupVersionKind{}, originalErr
|
||||||
|
}
|
||||||
|
if len(originalGVKs) == 1 {
|
||||||
|
return originalGVKs[0], originalErr
|
||||||
|
}
|
||||||
|
|
||||||
|
remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...)
|
||||||
|
for _, pattern := range m.KindPriority {
|
||||||
|
matchedGVKs := []schema.GroupVersionKind{}
|
||||||
|
for _, gvr := range remainingGVKs {
|
||||||
|
if kindMatches(pattern, gvr) {
|
||||||
|
matchedGVKs = append(matchedGVKs, gvr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(matchedGVKs) {
|
||||||
|
case 0:
|
||||||
|
// if you have no matches, then nothing matched this pattern just move to the next
|
||||||
|
continue
|
||||||
|
case 1:
|
||||||
|
// one match, return
|
||||||
|
return matchedGVKs[0], originalErr
|
||||||
|
default:
|
||||||
|
// more than one match, use the matched hits as the list moving to the next pattern.
|
||||||
|
// this way you can have a series of selection criteria
|
||||||
|
remainingGVKs = matchedGVKs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool {
|
||||||
|
if pattern.Group != AnyGroup && pattern.Group != resource.Group {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if pattern.Version != AnyVersion && pattern.Version != resource.Version {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if pattern.Resource != AnyResource && pattern.Resource != resource.Resource {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool {
|
||||||
|
if pattern.Group != AnyGroup && pattern.Group != kind.Group {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if pattern.Version != AnyVersion && pattern.Version != kind.Version {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if pattern.Kind != AnyKind && pattern.Kind != kind.Kind {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) {
|
||||||
|
mappings, originalErr := m.Delegate.RESTMappings(gk, versions...)
|
||||||
|
if originalErr != nil && len(mappings) == 0 {
|
||||||
|
return nil, originalErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// any versions the user provides take priority
|
||||||
|
priorities := m.KindPriority
|
||||||
|
if len(versions) > 0 {
|
||||||
|
priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions))
|
||||||
|
for _, version := range versions {
|
||||||
|
gv := schema.GroupVersion{
|
||||||
|
Version: version,
|
||||||
|
Group: gk.Group,
|
||||||
|
}
|
||||||
|
priorities = append(priorities, gv.WithKind(AnyKind))
|
||||||
|
}
|
||||||
|
priorities = append(priorities, m.KindPriority...)
|
||||||
|
}
|
||||||
|
|
||||||
|
remaining := append([]*RESTMapping{}, mappings...)
|
||||||
|
for _, pattern := range priorities {
|
||||||
|
var matching []*RESTMapping
|
||||||
|
for _, m := range remaining {
|
||||||
|
if kindMatches(pattern, m.GroupVersionKind) {
|
||||||
|
matching = append(matching, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch len(matching) {
|
||||||
|
case 0:
|
||||||
|
// if you have no matches, then nothing matched this pattern just move to the next
|
||||||
|
continue
|
||||||
|
case 1:
|
||||||
|
// one match, return
|
||||||
|
return matching[0], originalErr
|
||||||
|
default:
|
||||||
|
// more than one match, use the matched hits as the list moving to the next pattern.
|
||||||
|
// this way you can have a series of selection criteria
|
||||||
|
remaining = matching
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(remaining) == 1 {
|
||||||
|
return remaining[0], originalErr
|
||||||
|
}
|
||||||
|
|
||||||
|
var kinds []schema.GroupVersionKind
|
||||||
|
for _, m := range mappings {
|
||||||
|
kinds = append(kinds, m.GroupVersionKind)
|
||||||
|
}
|
||||||
|
return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
|
||||||
|
return m.Delegate.RESTMappings(gk, versions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
|
||||||
|
return m.Delegate.ResourceSingularizer(resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
|
||||||
|
return m.Delegate.ResourcesFor(partiallySpecifiedResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
|
||||||
|
return m.Delegate.KindsFor(partiallySpecifiedResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m PriorityRESTMapper) Reset() {
|
||||||
|
MaybeResetRESTMapper(m.Delegate)
|
||||||
|
}
|
529
vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
generated
vendored
Normal file
529
vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
generated
vendored
Normal file
@ -0,0 +1,529 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// TODO: move everything in this file to pkg/api/rest
|
||||||
|
package meta
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Implements RESTScope interface
|
||||||
|
type restScope struct {
|
||||||
|
name RESTScopeName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *restScope) Name() RESTScopeName {
|
||||||
|
return r.name
|
||||||
|
}
|
||||||
|
|
||||||
|
var RESTScopeNamespace = &restScope{
|
||||||
|
name: RESTScopeNameNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
var RESTScopeRoot = &restScope{
|
||||||
|
name: RESTScopeNameRoot,
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRESTMapper exposes mappings between the types defined in a
|
||||||
|
// runtime.Scheme. It assumes that all types defined the provided scheme
|
||||||
|
// can be mapped with the provided MetadataAccessor and Codec interfaces.
|
||||||
|
//
|
||||||
|
// The resource name of a Kind is defined as the lowercase,
|
||||||
|
// English-plural version of the Kind string.
|
||||||
|
// When converting from resource to Kind, the singular version of the
|
||||||
|
// resource name is also accepted for convenience.
|
||||||
|
//
|
||||||
|
// TODO: Only accept plural for some operations for increased control?
|
||||||
|
// (`get pod bar` vs `get pods bar`)
|
||||||
|
type DefaultRESTMapper struct {
|
||||||
|
defaultGroupVersions []schema.GroupVersion
|
||||||
|
|
||||||
|
resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind
|
||||||
|
kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource
|
||||||
|
kindToScope map[schema.GroupVersionKind]RESTScope
|
||||||
|
singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource
|
||||||
|
pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) String() string {
|
||||||
|
if m == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ RESTMapper = &DefaultRESTMapper{}
|
||||||
|
|
||||||
|
// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion
|
||||||
|
// to a resource name and back based on the objects in a runtime.Scheme
|
||||||
|
// and the Kubernetes API conventions. Takes a group name, a priority list of the versions
|
||||||
|
// to search when an object has no default version (set empty to return an error),
|
||||||
|
// and a function that retrieves the correct metadata for a given version.
|
||||||
|
func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper {
|
||||||
|
resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind)
|
||||||
|
kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource)
|
||||||
|
kindToScope := make(map[schema.GroupVersionKind]RESTScope)
|
||||||
|
singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
|
||||||
|
pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
|
||||||
|
// TODO: verify name mappings work correctly when versions differ
|
||||||
|
|
||||||
|
return &DefaultRESTMapper{
|
||||||
|
resourceToKind: resourceToKind,
|
||||||
|
kindToPluralResource: kindToPluralResource,
|
||||||
|
kindToScope: kindToScope,
|
||||||
|
defaultGroupVersions: defaultGroupVersions,
|
||||||
|
singularToPlural: singularToPlural,
|
||||||
|
pluralToSingular: pluralToSingular,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) {
|
||||||
|
plural, singular := UnsafeGuessKindToResource(kind)
|
||||||
|
m.AddSpecific(kind, plural, singular, scope)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) {
|
||||||
|
m.singularToPlural[singular] = plural
|
||||||
|
m.pluralToSingular[plural] = singular
|
||||||
|
|
||||||
|
m.resourceToKind[singular] = kind
|
||||||
|
m.resourceToKind[plural] = kind
|
||||||
|
|
||||||
|
m.kindToPluralResource[kind] = plural
|
||||||
|
m.kindToScope[kind] = scope
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular
|
||||||
|
// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should.
|
||||||
|
// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all
|
||||||
|
// callers to use the RESTMapper they mean.
|
||||||
|
var unpluralizedSuffixes = []string{
|
||||||
|
"endpoints",
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnsafeGuessKindToResource converts Kind to a resource name.
|
||||||
|
// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match
|
||||||
|
// and they aren't guaranteed to do so.
|
||||||
|
func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) {
|
||||||
|
kindName := kind.Kind
|
||||||
|
if len(kindName) == 0 {
|
||||||
|
return schema.GroupVersionResource{}, schema.GroupVersionResource{}
|
||||||
|
}
|
||||||
|
singularName := strings.ToLower(kindName)
|
||||||
|
singular := kind.GroupVersion().WithResource(singularName)
|
||||||
|
|
||||||
|
for _, skip := range unpluralizedSuffixes {
|
||||||
|
if strings.HasSuffix(singularName, skip) {
|
||||||
|
return singular, singular
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch string(singularName[len(singularName)-1]) {
|
||||||
|
case "s":
|
||||||
|
return kind.GroupVersion().WithResource(singularName + "es"), singular
|
||||||
|
case "y":
|
||||||
|
return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular
|
||||||
|
}
|
||||||
|
|
||||||
|
return kind.GroupVersion().WithResource(singularName + "s"), singular
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceSingularizer implements RESTMapper
|
||||||
|
// It converts a resource name from plural to singular (e.g., from pods to pod)
|
||||||
|
func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) {
|
||||||
|
partialResource := schema.GroupVersionResource{Resource: resourceType}
|
||||||
|
resources, err := m.ResourcesFor(partialResource)
|
||||||
|
if err != nil {
|
||||||
|
return resourceType, err
|
||||||
|
}
|
||||||
|
|
||||||
|
singular := schema.GroupVersionResource{}
|
||||||
|
for _, curr := range resources {
|
||||||
|
currSingular, ok := m.pluralToSingular[curr]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if singular.Empty() {
|
||||||
|
singular = currSingular
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if currSingular.Resource != singular.Resource {
|
||||||
|
return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if singular.Empty() {
|
||||||
|
return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType)
|
||||||
|
}
|
||||||
|
|
||||||
|
return singular.Resource, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior)
|
||||||
|
func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource {
|
||||||
|
resource.Resource = strings.ToLower(resource.Resource)
|
||||||
|
if resource.Version == runtime.APIVersionInternal {
|
||||||
|
resource.Version = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return resource
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
|
||||||
|
resource := coerceResourceForMatching(input)
|
||||||
|
|
||||||
|
hasResource := len(resource.Resource) > 0
|
||||||
|
hasGroup := len(resource.Group) > 0
|
||||||
|
hasVersion := len(resource.Version) > 0
|
||||||
|
|
||||||
|
if !hasResource {
|
||||||
|
return nil, fmt.Errorf("a resource must be present, got: %v", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := []schema.GroupVersionResource{}
|
||||||
|
switch {
|
||||||
|
case hasGroup && hasVersion:
|
||||||
|
// fully qualified. Find the exact match
|
||||||
|
for plural, singular := range m.pluralToSingular {
|
||||||
|
if singular == resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if plural == resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case hasGroup:
|
||||||
|
// given a group, prefer an exact match. If you don't find one, resort to a prefix match on group
|
||||||
|
foundExactMatch := false
|
||||||
|
requestedGroupResource := resource.GroupResource()
|
||||||
|
for plural, singular := range m.pluralToSingular {
|
||||||
|
if singular.GroupResource() == requestedGroupResource {
|
||||||
|
foundExactMatch = true
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
if plural.GroupResource() == requestedGroupResource {
|
||||||
|
foundExactMatch = true
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
|
||||||
|
// storageclass.storage.k8s.io
|
||||||
|
if !foundExactMatch {
|
||||||
|
for plural, singular := range m.pluralToSingular {
|
||||||
|
if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if singular.Resource == requestedGroupResource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
if plural.Resource == requestedGroupResource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
case hasVersion:
|
||||||
|
for plural, singular := range m.pluralToSingular {
|
||||||
|
if singular.Version == resource.Version && singular.Resource == resource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
if plural.Version == resource.Version && plural.Resource == resource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
for plural, singular := range m.pluralToSingular {
|
||||||
|
if singular.Resource == resource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
if plural.Resource == resource.Resource {
|
||||||
|
ret = append(ret, plural)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
return nil, &NoResourceMatchError{PartialResource: resource}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions})
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
|
||||||
|
resources, err := m.ResourcesFor(resource)
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionResource{}, err
|
||||||
|
}
|
||||||
|
if len(resources) == 1 {
|
||||||
|
return resources[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
|
||||||
|
resource := coerceResourceForMatching(input)
|
||||||
|
|
||||||
|
hasResource := len(resource.Resource) > 0
|
||||||
|
hasGroup := len(resource.Group) > 0
|
||||||
|
hasVersion := len(resource.Version) > 0
|
||||||
|
|
||||||
|
if !hasResource {
|
||||||
|
return nil, fmt.Errorf("a resource must be present, got: %v", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := []schema.GroupVersionKind{}
|
||||||
|
switch {
|
||||||
|
// fully qualified. Find the exact match
|
||||||
|
case hasGroup && hasVersion:
|
||||||
|
kind, exists := m.resourceToKind[resource]
|
||||||
|
if exists {
|
||||||
|
ret = append(ret, kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
case hasGroup:
|
||||||
|
foundExactMatch := false
|
||||||
|
requestedGroupResource := resource.GroupResource()
|
||||||
|
for currResource, currKind := range m.resourceToKind {
|
||||||
|
if currResource.GroupResource() == requestedGroupResource {
|
||||||
|
foundExactMatch = true
|
||||||
|
ret = append(ret, currKind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
|
||||||
|
// storageclass.storage.k8s.io
|
||||||
|
if !foundExactMatch {
|
||||||
|
for currResource, currKind := range m.resourceToKind {
|
||||||
|
if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if currResource.Resource == requestedGroupResource.Resource {
|
||||||
|
ret = append(ret, currKind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
case hasVersion:
|
||||||
|
for currResource, currKind := range m.resourceToKind {
|
||||||
|
if currResource.Version == resource.Version && currResource.Resource == resource.Resource {
|
||||||
|
ret = append(ret, currKind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
for currResource, currKind := range m.resourceToKind {
|
||||||
|
if currResource.Resource == resource.Resource {
|
||||||
|
ret = append(ret, currKind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ret) == 0 {
|
||||||
|
return nil, &NoResourceMatchError{PartialResource: input}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions})
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
|
||||||
|
kinds, err := m.KindsFor(resource)
|
||||||
|
if err != nil {
|
||||||
|
return schema.GroupVersionKind{}, err
|
||||||
|
}
|
||||||
|
if len(kinds) == 1 {
|
||||||
|
return kinds[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
|
||||||
|
}
|
||||||
|
|
||||||
|
type kindByPreferredGroupVersion struct {
|
||||||
|
list []schema.GroupVersionKind
|
||||||
|
sortOrder []schema.GroupVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o kindByPreferredGroupVersion) Len() int { return len(o.list) }
|
||||||
|
func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
|
||||||
|
func (o kindByPreferredGroupVersion) Less(i, j int) bool {
|
||||||
|
lhs := o.list[i]
|
||||||
|
rhs := o.list[j]
|
||||||
|
if lhs == rhs {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if lhs.GroupVersion() == rhs.GroupVersion() {
|
||||||
|
return lhs.Kind < rhs.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
|
||||||
|
lhsIndex := -1
|
||||||
|
rhsIndex := -1
|
||||||
|
|
||||||
|
for i := range o.sortOrder {
|
||||||
|
if o.sortOrder[i] == lhs.GroupVersion() {
|
||||||
|
lhsIndex = i
|
||||||
|
}
|
||||||
|
if o.sortOrder[i] == rhs.GroupVersion() {
|
||||||
|
rhsIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rhsIndex == -1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return lhsIndex < rhsIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceByPreferredGroupVersion struct {
|
||||||
|
list []schema.GroupVersionResource
|
||||||
|
sortOrder []schema.GroupVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) }
|
||||||
|
func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
|
||||||
|
func (o resourceByPreferredGroupVersion) Less(i, j int) bool {
|
||||||
|
lhs := o.list[i]
|
||||||
|
rhs := o.list[j]
|
||||||
|
if lhs == rhs {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if lhs.GroupVersion() == rhs.GroupVersion() {
|
||||||
|
return lhs.Resource < rhs.Resource
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
|
||||||
|
lhsIndex := -1
|
||||||
|
rhsIndex := -1
|
||||||
|
|
||||||
|
for i := range o.sortOrder {
|
||||||
|
if o.sortOrder[i] == lhs.GroupVersion() {
|
||||||
|
lhsIndex = i
|
||||||
|
}
|
||||||
|
if o.sortOrder[i] == rhs.GroupVersion() {
|
||||||
|
rhsIndex = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rhsIndex == -1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return lhsIndex < rhsIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMapping returns a struct representing the resource path and conversion interfaces a
|
||||||
|
// RESTClient should use to operate on the provided group/kind in order of versions. If a version search
|
||||||
|
// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which
|
||||||
|
// version should be used to access the named group/kind.
|
||||||
|
func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
|
||||||
|
mappings, err := m.RESTMappings(gk, versions...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(mappings) == 0 {
|
||||||
|
return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
|
||||||
|
}
|
||||||
|
// since we rely on RESTMappings method
|
||||||
|
// take the first match and return to the caller
|
||||||
|
// as this was the existing behavior.
|
||||||
|
return mappings[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RESTMappings returns the RESTMappings for the provided group kind. If a version search order
|
||||||
|
// is not provided, the search order provided to DefaultRESTMapper will be used.
|
||||||
|
func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
|
||||||
|
mappings := make([]*RESTMapping, 0)
|
||||||
|
potentialGVK := make([]schema.GroupVersionKind, 0)
|
||||||
|
hadVersion := false
|
||||||
|
|
||||||
|
// Pick an appropriate version
|
||||||
|
for _, version := range versions {
|
||||||
|
if len(version) == 0 || version == runtime.APIVersionInternal {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currGVK := gk.WithVersion(version)
|
||||||
|
hadVersion = true
|
||||||
|
if _, ok := m.kindToPluralResource[currGVK]; ok {
|
||||||
|
potentialGVK = append(potentialGVK, currGVK)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Use the default preferred versions
|
||||||
|
if !hadVersion && len(potentialGVK) == 0 {
|
||||||
|
for _, gv := range m.defaultGroupVersions {
|
||||||
|
if gv.Group != gk.Group {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(potentialGVK) == 0 {
|
||||||
|
return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, gvk := range potentialGVK {
|
||||||
|
//Ensure we have a REST mapping
|
||||||
|
res, ok := m.kindToPluralResource[gvk]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we have a REST scope
|
||||||
|
scope, ok := m.kindToScope[gvk]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
mappings = append(mappings, &RESTMapping{
|
||||||
|
Resource: res,
|
||||||
|
GroupVersionKind: gvk,
|
||||||
|
Scope: scope,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(mappings) == 0 {
|
||||||
|
return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}}
|
||||||
|
}
|
||||||
|
return mappings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper.
|
||||||
|
func MaybeResetRESTMapper(mapper RESTMapper) {
|
||||||
|
m, ok := mapper.(ResettableRESTMapper)
|
||||||
|
if ok {
|
||||||
|
m.Reset()
|
||||||
|
}
|
||||||
|
}
|
29
vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
29
vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
|
|
||||||
inf "gopkg.in/inf.v0"
|
inf "gopkg.in/inf.v0"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -683,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q Quantity) MarshalCBOR() ([]byte, error) {
|
||||||
|
// The call to String() should never return the string "<nil>" because the receiver's
|
||||||
|
// address will never be nil.
|
||||||
|
return cbor.Marshal(q.String())
|
||||||
|
}
|
||||||
|
|
||||||
// ToUnstructured implements the value.UnstructuredConverter interface.
|
// ToUnstructured implements the value.UnstructuredConverter interface.
|
||||||
func (q Quantity) ToUnstructured() interface{} {
|
func (q Quantity) ToUnstructured() interface{} {
|
||||||
return q.String()
|
return q.String()
|
||||||
@ -711,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *Quantity) UnmarshalCBOR(value []byte) error {
|
||||||
|
var s *string
|
||||||
|
if err := cbor.Unmarshal(value, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s == nil {
|
||||||
|
q.d.Dec = nil
|
||||||
|
q.i = int64Amount{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed, err := ParseQuantity(strings.TrimSpace(*s))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*q = parsed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NewDecimalQuantity returns a new Quantity representing the given
|
// NewDecimalQuantity returns a new Quantity representing the given
|
||||||
// value in the given format.
|
// value in the given format.
|
||||||
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
|
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
|
||||||
|
13
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
generated
vendored
13
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
generated
vendored
@ -18,6 +18,7 @@ package v1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsControlledBy checks if the object has a controllerRef set to the given owner
|
// IsControlledBy checks if the object has a controllerRef set to the given owner
|
||||||
@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cp := *ref
|
cp := *ref
|
||||||
|
cp.Controller = ptr.To(*ref.Controller)
|
||||||
|
if ref.BlockOwnerDeletion != nil {
|
||||||
|
cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
|
||||||
|
}
|
||||||
return &cp
|
return &cp
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
|
// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
|
||||||
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
||||||
refs := controllee.GetOwnerReferences()
|
refs := controllee.GetOwnerReferences()
|
||||||
for i := range refs {
|
for i := range refs {
|
||||||
@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference {
|
|||||||
|
|
||||||
// NewControllerRef creates an OwnerReference pointing to the given owner.
|
// NewControllerRef creates an OwnerReference pointing to the given owner.
|
||||||
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
|
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
|
||||||
blockOwnerDeletion := true
|
|
||||||
isController := true
|
|
||||||
return &OwnerReference{
|
return &OwnerReference{
|
||||||
APIVersion: gvk.GroupVersion().String(),
|
APIVersion: gvk.GroupVersion().String(),
|
||||||
Kind: gvk.Kind,
|
Kind: gvk.Kind,
|
||||||
Name: owner.GetName(),
|
Name: owner.GetName(),
|
||||||
UID: owner.GetUID(),
|
UID: owner.GetUID(),
|
||||||
BlockOwnerDeletion: &blockOwnerDeletion,
|
BlockOwnerDeletion: ptr.To(true),
|
||||||
Controller: &isController,
|
Controller: ptr.To(true),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
677
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
677
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() {
|
|||||||
|
|
||||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
|
||||||
|
func (*FieldSelectorRequirement) ProtoMessage() {}
|
||||||
|
func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_a8431b6e0aeeb761, []int{10}
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
||||||
|
return m.Unmarshal(b)
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
n, err := m.MarshalToSizedBuffer(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return b[:n], nil
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_FieldSelectorRequirement.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_Size() int {
|
||||||
|
return m.Size()
|
||||||
|
}
|
||||||
|
func (m *FieldSelectorRequirement) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo
|
||||||
|
|
||||||
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
|
func (m *FieldsV1) Reset() { *m = FieldsV1{} }
|
||||||
func (*FieldsV1) ProtoMessage() {}
|
func (*FieldsV1) ProtoMessage() {}
|
||||||
func (*FieldsV1) Descriptor() ([]byte, []int) {
|
func (*FieldsV1) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{10}
|
return fileDescriptor_a8431b6e0aeeb761, []int{11}
|
||||||
}
|
}
|
||||||
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
|
func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
|
|||||||
func (m *GetOptions) Reset() { *m = GetOptions{} }
|
func (m *GetOptions) Reset() { *m = GetOptions{} }
|
||||||
func (*GetOptions) ProtoMessage() {}
|
func (*GetOptions) ProtoMessage() {}
|
||||||
func (*GetOptions) Descriptor() ([]byte, []int) {
|
func (*GetOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{11}
|
return fileDescriptor_a8431b6e0aeeb761, []int{12}
|
||||||
}
|
}
|
||||||
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
|
func (m *GetOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo
|
|||||||
func (m *GroupKind) Reset() { *m = GroupKind{} }
|
func (m *GroupKind) Reset() { *m = GroupKind{} }
|
||||||
func (*GroupKind) ProtoMessage() {}
|
func (*GroupKind) ProtoMessage() {}
|
||||||
func (*GroupKind) Descriptor() ([]byte, []int) {
|
func (*GroupKind) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{12}
|
return fileDescriptor_a8431b6e0aeeb761, []int{13}
|
||||||
}
|
}
|
||||||
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
|
func (m *GroupKind) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo
|
|||||||
func (m *GroupResource) Reset() { *m = GroupResource{} }
|
func (m *GroupResource) Reset() { *m = GroupResource{} }
|
||||||
func (*GroupResource) ProtoMessage() {}
|
func (*GroupResource) ProtoMessage() {}
|
||||||
func (*GroupResource) Descriptor() ([]byte, []int) {
|
func (*GroupResource) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{13}
|
return fileDescriptor_a8431b6e0aeeb761, []int{14}
|
||||||
}
|
}
|
||||||
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
|
func (m *GroupResource) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo
|
|||||||
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
|
func (m *GroupVersion) Reset() { *m = GroupVersion{} }
|
||||||
func (*GroupVersion) ProtoMessage() {}
|
func (*GroupVersion) ProtoMessage() {}
|
||||||
func (*GroupVersion) Descriptor() ([]byte, []int) {
|
func (*GroupVersion) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{14}
|
return fileDescriptor_a8431b6e0aeeb761, []int{15}
|
||||||
}
|
}
|
||||||
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
|
func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
|
||||||
func (*GroupVersionForDiscovery) ProtoMessage() {}
|
func (*GroupVersionForDiscovery) ProtoMessage() {}
|
||||||
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
|
func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{15}
|
return fileDescriptor_a8431b6e0aeeb761, []int{16}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
|
func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
|
||||||
func (*GroupVersionKind) ProtoMessage() {}
|
func (*GroupVersionKind) ProtoMessage() {}
|
||||||
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
|
func (*GroupVersionKind) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{16}
|
return fileDescriptor_a8431b6e0aeeb761, []int{17}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
|
|||||||
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
|
func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
|
||||||
func (*GroupVersionResource) ProtoMessage() {}
|
func (*GroupVersionResource) ProtoMessage() {}
|
||||||
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
|
func (*GroupVersionResource) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{17}
|
return fileDescriptor_a8431b6e0aeeb761, []int{18}
|
||||||
}
|
}
|
||||||
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
|
func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
|
|||||||
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
|
func (m *LabelSelector) Reset() { *m = LabelSelector{} }
|
||||||
func (*LabelSelector) ProtoMessage() {}
|
func (*LabelSelector) ProtoMessage() {}
|
||||||
func (*LabelSelector) Descriptor() ([]byte, []int) {
|
func (*LabelSelector) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{18}
|
return fileDescriptor_a8431b6e0aeeb761, []int{19}
|
||||||
}
|
}
|
||||||
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
|
func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
|
|||||||
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
|
func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
|
||||||
func (*LabelSelectorRequirement) ProtoMessage() {}
|
func (*LabelSelectorRequirement) ProtoMessage() {}
|
||||||
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
|
func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{19}
|
return fileDescriptor_a8431b6e0aeeb761, []int{20}
|
||||||
}
|
}
|
||||||
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
|
|||||||
func (m *List) Reset() { *m = List{} }
|
func (m *List) Reset() { *m = List{} }
|
||||||
func (*List) ProtoMessage() {}
|
func (*List) ProtoMessage() {}
|
||||||
func (*List) Descriptor() ([]byte, []int) {
|
func (*List) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{20}
|
return fileDescriptor_a8431b6e0aeeb761, []int{21}
|
||||||
}
|
}
|
||||||
func (m *List) XXX_Unmarshal(b []byte) error {
|
func (m *List) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
|
|||||||
func (m *ListMeta) Reset() { *m = ListMeta{} }
|
func (m *ListMeta) Reset() { *m = ListMeta{} }
|
||||||
func (*ListMeta) ProtoMessage() {}
|
func (*ListMeta) ProtoMessage() {}
|
||||||
func (*ListMeta) Descriptor() ([]byte, []int) {
|
func (*ListMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{21}
|
return fileDescriptor_a8431b6e0aeeb761, []int{22}
|
||||||
}
|
}
|
||||||
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
|
func (m *ListMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo
|
|||||||
func (m *ListOptions) Reset() { *m = ListOptions{} }
|
func (m *ListOptions) Reset() { *m = ListOptions{} }
|
||||||
func (*ListOptions) ProtoMessage() {}
|
func (*ListOptions) ProtoMessage() {}
|
||||||
func (*ListOptions) Descriptor() ([]byte, []int) {
|
func (*ListOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{22}
|
return fileDescriptor_a8431b6e0aeeb761, []int{23}
|
||||||
}
|
}
|
||||||
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
|
func (m *ListOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo
|
|||||||
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
|
func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
|
||||||
func (*ManagedFieldsEntry) ProtoMessage() {}
|
func (*ManagedFieldsEntry) ProtoMessage() {}
|
||||||
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
|
func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{23}
|
return fileDescriptor_a8431b6e0aeeb761, []int{24}
|
||||||
}
|
}
|
||||||
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
|
func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
|
|||||||
func (m *MicroTime) Reset() { *m = MicroTime{} }
|
func (m *MicroTime) Reset() { *m = MicroTime{} }
|
||||||
func (*MicroTime) ProtoMessage() {}
|
func (*MicroTime) ProtoMessage() {}
|
||||||
func (*MicroTime) Descriptor() ([]byte, []int) {
|
func (*MicroTime) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{24}
|
return fileDescriptor_a8431b6e0aeeb761, []int{25}
|
||||||
}
|
}
|
||||||
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
|
func (m *MicroTime) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
|
return xxx_messageInfo_MicroTime.Unmarshal(m, b)
|
||||||
@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo
|
|||||||
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
|
func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
|
||||||
func (*ObjectMeta) ProtoMessage() {}
|
func (*ObjectMeta) ProtoMessage() {}
|
||||||
func (*ObjectMeta) Descriptor() ([]byte, []int) {
|
func (*ObjectMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{25}
|
return fileDescriptor_a8431b6e0aeeb761, []int{26}
|
||||||
}
|
}
|
||||||
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
|
func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
|
|||||||
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
|
func (m *OwnerReference) Reset() { *m = OwnerReference{} }
|
||||||
func (*OwnerReference) ProtoMessage() {}
|
func (*OwnerReference) ProtoMessage() {}
|
||||||
func (*OwnerReference) Descriptor() ([]byte, []int) {
|
func (*OwnerReference) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{26}
|
return fileDescriptor_a8431b6e0aeeb761, []int{27}
|
||||||
}
|
}
|
||||||
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
|
func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
|
|||||||
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
|
func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
|
||||||
func (*PartialObjectMetadata) ProtoMessage() {}
|
func (*PartialObjectMetadata) ProtoMessage() {}
|
||||||
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
|
func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{27}
|
return fileDescriptor_a8431b6e0aeeb761, []int{28}
|
||||||
}
|
}
|
||||||
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
|
func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
|
|||||||
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
|
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
|
||||||
func (*PartialObjectMetadataList) ProtoMessage() {}
|
func (*PartialObjectMetadataList) ProtoMessage() {}
|
||||||
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
|
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{28}
|
return fileDescriptor_a8431b6e0aeeb761, []int{29}
|
||||||
}
|
}
|
||||||
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
|
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
|
|||||||
func (m *Patch) Reset() { *m = Patch{} }
|
func (m *Patch) Reset() { *m = Patch{} }
|
||||||
func (*Patch) ProtoMessage() {}
|
func (*Patch) ProtoMessage() {}
|
||||||
func (*Patch) Descriptor() ([]byte, []int) {
|
func (*Patch) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{29}
|
return fileDescriptor_a8431b6e0aeeb761, []int{30}
|
||||||
}
|
}
|
||||||
func (m *Patch) XXX_Unmarshal(b []byte) error {
|
func (m *Patch) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo
|
|||||||
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
|
func (m *PatchOptions) Reset() { *m = PatchOptions{} }
|
||||||
func (*PatchOptions) ProtoMessage() {}
|
func (*PatchOptions) ProtoMessage() {}
|
||||||
func (*PatchOptions) Descriptor() ([]byte, []int) {
|
func (*PatchOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{30}
|
return fileDescriptor_a8431b6e0aeeb761, []int{31}
|
||||||
}
|
}
|
||||||
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
|
func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
|
|||||||
func (m *Preconditions) Reset() { *m = Preconditions{} }
|
func (m *Preconditions) Reset() { *m = Preconditions{} }
|
||||||
func (*Preconditions) ProtoMessage() {}
|
func (*Preconditions) ProtoMessage() {}
|
||||||
func (*Preconditions) Descriptor() ([]byte, []int) {
|
func (*Preconditions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{31}
|
return fileDescriptor_a8431b6e0aeeb761, []int{32}
|
||||||
}
|
}
|
||||||
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
|
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
|
|||||||
func (m *RootPaths) Reset() { *m = RootPaths{} }
|
func (m *RootPaths) Reset() { *m = RootPaths{} }
|
||||||
func (*RootPaths) ProtoMessage() {}
|
func (*RootPaths) ProtoMessage() {}
|
||||||
func (*RootPaths) Descriptor() ([]byte, []int) {
|
func (*RootPaths) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{32}
|
return fileDescriptor_a8431b6e0aeeb761, []int{33}
|
||||||
}
|
}
|
||||||
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
|
func (m *RootPaths) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo
|
|||||||
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
|
func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
|
||||||
func (*ServerAddressByClientCIDR) ProtoMessage() {}
|
func (*ServerAddressByClientCIDR) ProtoMessage() {}
|
||||||
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
|
func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{33}
|
return fileDescriptor_a8431b6e0aeeb761, []int{34}
|
||||||
}
|
}
|
||||||
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
|
func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
|
|||||||
func (m *Status) Reset() { *m = Status{} }
|
func (m *Status) Reset() { *m = Status{} }
|
||||||
func (*Status) ProtoMessage() {}
|
func (*Status) ProtoMessage() {}
|
||||||
func (*Status) Descriptor() ([]byte, []int) {
|
func (*Status) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{34}
|
return fileDescriptor_a8431b6e0aeeb761, []int{35}
|
||||||
}
|
}
|
||||||
func (m *Status) XXX_Unmarshal(b []byte) error {
|
func (m *Status) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo
|
|||||||
func (m *StatusCause) Reset() { *m = StatusCause{} }
|
func (m *StatusCause) Reset() { *m = StatusCause{} }
|
||||||
func (*StatusCause) ProtoMessage() {}
|
func (*StatusCause) ProtoMessage() {}
|
||||||
func (*StatusCause) Descriptor() ([]byte, []int) {
|
func (*StatusCause) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{35}
|
return fileDescriptor_a8431b6e0aeeb761, []int{36}
|
||||||
}
|
}
|
||||||
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
|
func (m *StatusCause) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo
|
|||||||
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
|
func (m *StatusDetails) Reset() { *m = StatusDetails{} }
|
||||||
func (*StatusDetails) ProtoMessage() {}
|
func (*StatusDetails) ProtoMessage() {}
|
||||||
func (*StatusDetails) Descriptor() ([]byte, []int) {
|
func (*StatusDetails) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{36}
|
return fileDescriptor_a8431b6e0aeeb761, []int{37}
|
||||||
}
|
}
|
||||||
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
|
func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
|
|||||||
func (m *TableOptions) Reset() { *m = TableOptions{} }
|
func (m *TableOptions) Reset() { *m = TableOptions{} }
|
||||||
func (*TableOptions) ProtoMessage() {}
|
func (*TableOptions) ProtoMessage() {}
|
||||||
func (*TableOptions) Descriptor() ([]byte, []int) {
|
func (*TableOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{37}
|
return fileDescriptor_a8431b6e0aeeb761, []int{38}
|
||||||
}
|
}
|
||||||
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
|
func (m *TableOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo
|
|||||||
func (m *Time) Reset() { *m = Time{} }
|
func (m *Time) Reset() { *m = Time{} }
|
||||||
func (*Time) ProtoMessage() {}
|
func (*Time) ProtoMessage() {}
|
||||||
func (*Time) Descriptor() ([]byte, []int) {
|
func (*Time) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{38}
|
return fileDescriptor_a8431b6e0aeeb761, []int{39}
|
||||||
}
|
}
|
||||||
func (m *Time) XXX_Unmarshal(b []byte) error {
|
func (m *Time) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Time.Unmarshal(m, b)
|
return xxx_messageInfo_Time.Unmarshal(m, b)
|
||||||
@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo
|
|||||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
func (*Timestamp) ProtoMessage() {}
|
func (*Timestamp) ProtoMessage() {}
|
||||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{39}
|
return fileDescriptor_a8431b6e0aeeb761, []int{40}
|
||||||
}
|
}
|
||||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
|||||||
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
|
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
|
||||||
func (*TypeMeta) ProtoMessage() {}
|
func (*TypeMeta) ProtoMessage() {}
|
||||||
func (*TypeMeta) Descriptor() ([]byte, []int) {
|
func (*TypeMeta) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{40}
|
return fileDescriptor_a8431b6e0aeeb761, []int{41}
|
||||||
}
|
}
|
||||||
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
|
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
|
|||||||
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
|
func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
|
||||||
func (*UpdateOptions) ProtoMessage() {}
|
func (*UpdateOptions) ProtoMessage() {}
|
||||||
func (*UpdateOptions) Descriptor() ([]byte, []int) {
|
func (*UpdateOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{41}
|
return fileDescriptor_a8431b6e0aeeb761, []int{42}
|
||||||
}
|
}
|
||||||
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
|
func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
|
|||||||
func (m *Verbs) Reset() { *m = Verbs{} }
|
func (m *Verbs) Reset() { *m = Verbs{} }
|
||||||
func (*Verbs) ProtoMessage() {}
|
func (*Verbs) ProtoMessage() {}
|
||||||
func (*Verbs) Descriptor() ([]byte, []int) {
|
func (*Verbs) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{42}
|
return fileDescriptor_a8431b6e0aeeb761, []int{43}
|
||||||
}
|
}
|
||||||
func (m *Verbs) XXX_Unmarshal(b []byte) error {
|
func (m *Verbs) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo
|
|||||||
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
|
func (m *WatchEvent) Reset() { *m = WatchEvent{} }
|
||||||
func (*WatchEvent) ProtoMessage() {}
|
func (*WatchEvent) ProtoMessage() {}
|
||||||
func (*WatchEvent) Descriptor() ([]byte, []int) {
|
func (*WatchEvent) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_a8431b6e0aeeb761, []int{43}
|
return fileDescriptor_a8431b6e0aeeb761, []int{44}
|
||||||
}
|
}
|
||||||
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
|
func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
|
||||||
return m.Unmarshal(b)
|
return m.Unmarshal(b)
|
||||||
@ -1282,6 +1310,7 @@ func init() {
|
|||||||
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
|
proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
|
||||||
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
|
proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
|
||||||
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
|
proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
|
||||||
|
proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement")
|
||||||
proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
|
proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
|
||||||
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
|
proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
|
||||||
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
|
proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
|
||||||
@ -1326,186 +1355,187 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptor_a8431b6e0aeeb761 = []byte{
|
var fileDescriptor_a8431b6e0aeeb761 = []byte{
|
||||||
// 2853 bytes of a gzipped FileDescriptorProto
|
// 2873 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
|
||||||
0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44,
|
0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
|
||||||
0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d,
|
0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
|
||||||
0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a,
|
0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
|
||||||
0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88,
|
0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
|
||||||
0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85,
|
0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
|
||||||
0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa,
|
0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
|
||||||
0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d,
|
0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
|
||||||
0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58,
|
0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
|
||||||
0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1,
|
0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
|
||||||
0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9,
|
0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
|
||||||
0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f,
|
0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
|
||||||
0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6,
|
0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
|
||||||
0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9,
|
0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
|
||||||
0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad,
|
0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
|
||||||
0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc,
|
0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
|
||||||
0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1,
|
0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
|
||||||
0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70,
|
0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
|
||||||
0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14,
|
0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
|
||||||
0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56,
|
0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
|
||||||
0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2,
|
0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
|
||||||
0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe,
|
0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
|
||||||
0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83,
|
0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
|
||||||
0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10,
|
0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
|
||||||
0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c,
|
0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
|
||||||
0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3,
|
0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
|
||||||
0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d,
|
0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
|
||||||
0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc,
|
0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
|
||||||
0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59,
|
0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
|
||||||
0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b,
|
0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
|
||||||
0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7,
|
0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
|
||||||
0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec,
|
0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
|
||||||
0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09,
|
0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
|
||||||
0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37,
|
0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
|
||||||
0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a,
|
0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
|
||||||
0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40,
|
0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
|
||||||
0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40,
|
0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
|
||||||
0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a,
|
0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
|
||||||
0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef,
|
0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
|
||||||
0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11,
|
0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
|
||||||
0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20,
|
0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
|
||||||
0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4,
|
0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
|
||||||
0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2,
|
0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
|
||||||
0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb,
|
0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
|
||||||
0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf,
|
0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
|
||||||
0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9,
|
0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
|
||||||
0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55,
|
0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
|
||||||
0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d,
|
0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
|
||||||
0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0,
|
0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
|
||||||
0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05,
|
0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
|
||||||
0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51,
|
0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
|
||||||
0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b,
|
0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
|
||||||
0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b,
|
0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
|
||||||
0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3,
|
0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
|
||||||
0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0,
|
0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
|
||||||
0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2,
|
0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
|
||||||
0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d,
|
0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
|
||||||
0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17,
|
0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
|
||||||
0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16,
|
0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
|
||||||
0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc,
|
0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
|
||||||
0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52,
|
0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
|
||||||
0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1,
|
0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
|
||||||
0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59,
|
0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
|
||||||
0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec,
|
0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
|
||||||
0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b,
|
0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
|
||||||
0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83,
|
0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
|
||||||
0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0,
|
0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
|
||||||
0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5,
|
0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
|
||||||
0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50,
|
0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
|
||||||
0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f,
|
0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
|
||||||
0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b,
|
0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
|
||||||
0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0,
|
0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
|
||||||
0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42,
|
0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
|
||||||
0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3,
|
0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
|
||||||
0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc,
|
0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
|
||||||
0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29,
|
0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
|
||||||
0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e,
|
0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
|
||||||
0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85,
|
0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
|
||||||
0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1,
|
0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
|
||||||
0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48,
|
0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
|
||||||
0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb,
|
0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
|
||||||
0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04,
|
0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
|
||||||
0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2,
|
0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
|
||||||
0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f,
|
0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
|
||||||
0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc,
|
0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
|
||||||
0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2,
|
0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
|
||||||
0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72,
|
0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
|
||||||
0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b,
|
0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
|
||||||
0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90,
|
0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
|
||||||
0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21,
|
0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
|
||||||
0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4,
|
0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
|
||||||
0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46,
|
0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
|
||||||
0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11,
|
0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
|
||||||
0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b,
|
0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
|
||||||
0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d,
|
0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
|
||||||
0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d,
|
0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
|
||||||
0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73,
|
0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
|
||||||
0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99,
|
0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
|
||||||
0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a,
|
0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
|
||||||
0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07,
|
0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
|
||||||
0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63,
|
0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
|
||||||
0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7,
|
0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
|
||||||
0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed,
|
0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
|
||||||
0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e,
|
0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
|
||||||
0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13,
|
0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
|
||||||
0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde,
|
0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
|
||||||
0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f,
|
0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
|
||||||
0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1,
|
0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
|
||||||
0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19,
|
0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
|
||||||
0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50,
|
0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
|
||||||
0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84,
|
0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
|
||||||
0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c,
|
0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
|
||||||
0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44,
|
0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
|
||||||
0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69,
|
0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
|
||||||
0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e,
|
0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
|
||||||
0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e,
|
0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
|
||||||
0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71,
|
0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
|
||||||
0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae,
|
0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
|
||||||
0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88,
|
0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
|
||||||
0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef,
|
0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
|
||||||
0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd,
|
0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
|
||||||
0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87,
|
0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
|
||||||
0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21,
|
0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
|
||||||
0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77,
|
0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
|
||||||
0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76,
|
0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
|
||||||
0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64,
|
0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
|
||||||
0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87,
|
0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
|
||||||
0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30,
|
0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
|
||||||
0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58,
|
0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
|
||||||
0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd,
|
0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
|
||||||
0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75,
|
0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
|
||||||
0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47,
|
0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
|
||||||
0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e,
|
0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
|
||||||
0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55,
|
0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
|
||||||
0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15,
|
0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
|
||||||
0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75,
|
0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
|
||||||
0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a,
|
0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
|
||||||
0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c,
|
0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
|
||||||
0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08,
|
0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
|
||||||
0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf,
|
0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
|
||||||
0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3,
|
0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
|
||||||
0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15,
|
0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
|
||||||
0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1,
|
0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
|
||||||
0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98,
|
0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
|
||||||
0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44,
|
0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
|
||||||
0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63,
|
0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
|
||||||
0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e,
|
0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
|
||||||
0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d,
|
0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
|
||||||
0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64,
|
0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
|
||||||
0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4,
|
0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
|
||||||
0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb,
|
0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
|
||||||
0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89,
|
0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
|
||||||
0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78,
|
0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
|
||||||
0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a,
|
0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
|
||||||
0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2,
|
0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
|
||||||
0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4,
|
0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
|
||||||
0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae,
|
0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
|
||||||
0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87,
|
0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
|
||||||
0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05,
|
0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
|
||||||
0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3,
|
0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
|
||||||
0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6,
|
0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
|
||||||
0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce,
|
0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
|
||||||
0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f,
|
0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
|
||||||
0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1,
|
0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
|
||||||
0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8,
|
0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
|
||||||
0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37,
|
0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
|
||||||
0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50,
|
0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
|
||||||
0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c,
|
0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
|
||||||
0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25,
|
0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
|
||||||
0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d,
|
0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
|
||||||
0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f,
|
0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
|
||||||
0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8,
|
0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
|
||||||
0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4,
|
0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
|
||||||
0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca,
|
0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
|
||||||
0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c,
|
0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
|
||||||
0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a,
|
0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
|
||||||
0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5,
|
0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
|
||||||
0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff,
|
0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
|
||||||
0xd4, 0x07, 0x28, 0x00, 0x00,
|
0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
|
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
|
||||||
@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||||||
return len(dAtA) - i, nil
|
return len(dAtA) - i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
size := m.Size()
|
||||||
|
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||||
|
i := len(dAtA)
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Values) > 0 {
|
||||||
|
for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
|
||||||
|
i -= len(m.Values[iNdEx])
|
||||||
|
copy(dAtA[i:], m.Values[iNdEx])
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x1a
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i -= len(m.Operator)
|
||||||
|
copy(dAtA[i:], m.Operator)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i -= len(m.Key)
|
||||||
|
copy(dAtA[i:], m.Key)
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
|
||||||
|
i--
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
return len(dAtA) - i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
|
func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
@ -3714,6 +3786,25 @@ func (m *Duration) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *FieldSelectorRequirement) Size() (n int) {
|
||||||
|
if m == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Key)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
l = len(m.Operator)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
if len(m.Values) > 0 {
|
||||||
|
for _, s := range m.Values {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *FieldsV1) Size() (n int) {
|
func (m *FieldsV1) Size() (n int) {
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return 0
|
return 0
|
||||||
@ -4429,6 +4520,18 @@ func (this *Duration) String() string {
|
|||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
func (this *FieldSelectorRequirement) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&FieldSelectorRequirement{`,
|
||||||
|
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
|
||||||
|
`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
|
||||||
|
`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *GetOptions) String() string {
|
func (this *GetOptions) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@ -6443,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Key = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 3:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= uint64(b&0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
|
func (m *FieldsV1) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
|
23
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
23
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
@ -324,6 +324,25 @@ message Duration {
|
|||||||
optional int64 duration = 1;
|
optional int64 duration = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FieldSelectorRequirement is a selector that contains values, a key, and an operator that
|
||||||
|
// relates the key and values.
|
||||||
|
message FieldSelectorRequirement {
|
||||||
|
// key is the field selector key that the requirement applies to.
|
||||||
|
optional string key = 1;
|
||||||
|
|
||||||
|
// operator represents a key's relationship to a set of values.
|
||||||
|
// Valid operators are In, NotIn, Exists, DoesNotExist.
|
||||||
|
// The list of operators may grow in the future.
|
||||||
|
optional string operator = 2;
|
||||||
|
|
||||||
|
// values is an array of string values.
|
||||||
|
// If the operator is In or NotIn, the values array must be non-empty.
|
||||||
|
// If the operator is Exists or DoesNotExist, the values array must be empty.
|
||||||
|
// +optional
|
||||||
|
// +listType=atomic
|
||||||
|
repeated string values = 3;
|
||||||
|
}
|
||||||
|
|
||||||
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
|
// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
|
||||||
//
|
//
|
||||||
// Each key is either a '.' representing the field itself, and will always map to an empty set,
|
// Each key is either a '.' representing the field itself, and will always map to an empty set,
|
||||||
@ -460,7 +479,7 @@ message List {
|
|||||||
optional ListMeta metadata = 1;
|
optional ListMeta metadata = 1;
|
||||||
|
|
||||||
// List of objects
|
// List of objects
|
||||||
repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
|
repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListMeta describes metadata that synthetic resources must have, including lists and
|
// ListMeta describes metadata that synthetic resources must have, including lists and
|
||||||
@ -1209,6 +1228,6 @@ message WatchEvent {
|
|||||||
// * If Type is Deleted: the state of the object immediately before deletion.
|
// * If Type is Deleted: the state of the object immediately before deletion.
|
||||||
// * If Type is Error: *Status is recommended; other types may make sense
|
// * If Type is Error: *Status is recommended; other types may make sense
|
||||||
// depending on context.
|
// depending on context.
|
||||||
optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
|
optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
83
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
generated
vendored
83
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
generated
vendored
@ -24,8 +24,10 @@ import (
|
|||||||
|
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||||
"k8s.io/apimachinery/pkg/selection"
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utiljson "k8s.io/apimachinery/pkg/util/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
|
// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
|
||||||
@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) {
|
|||||||
if f.Raw == nil {
|
if f.Raw == nil {
|
||||||
return []byte("null"), nil
|
return []byte("null"), nil
|
||||||
}
|
}
|
||||||
|
if f.getContentType() == fieldsV1InvalidOrValidCBORObject {
|
||||||
|
var u map[string]interface{}
|
||||||
|
if err := cbor.Unmarshal(f.Raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err)
|
||||||
|
}
|
||||||
|
return utiljson.Marshal(u)
|
||||||
|
}
|
||||||
return f.Raw, nil
|
return f.Raw, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler
|
// UnmarshalJSON implements json.Unmarshaler
|
||||||
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
|
return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer")
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b, []byte("null")) {
|
if !bytes.Equal(b, []byte("null")) {
|
||||||
f.Raw = append(f.Raw[0:0], b...)
|
f.Raw = append(f.Raw[0:0], b...)
|
||||||
@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error {
|
|||||||
|
|
||||||
var _ json.Marshaler = FieldsV1{}
|
var _ json.Marshaler = FieldsV1{}
|
||||||
var _ json.Unmarshaler = &FieldsV1{}
|
var _ json.Unmarshaler = &FieldsV1{}
|
||||||
|
|
||||||
|
func (f FieldsV1) MarshalCBOR() ([]byte, error) {
|
||||||
|
if f.Raw == nil {
|
||||||
|
return cbor.Marshal(nil)
|
||||||
|
}
|
||||||
|
if f.getContentType() == fieldsV1InvalidOrValidJSONObject {
|
||||||
|
var u map[string]interface{}
|
||||||
|
if err := utiljson.Unmarshal(f.Raw, &u); err != nil {
|
||||||
|
return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err)
|
||||||
|
}
|
||||||
|
return cbor.Marshal(u)
|
||||||
|
}
|
||||||
|
return f.Raw, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cborNull = []byte{0xf6}
|
||||||
|
|
||||||
|
func (f *FieldsV1) UnmarshalCBOR(b []byte) error {
|
||||||
|
if f == nil {
|
||||||
|
return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(b, cborNull) {
|
||||||
|
f.Raw = append(f.Raw[0:0], b...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw
|
||||||
|
// bytes don't represent an allowable value in any supported encoding.
|
||||||
|
fieldsV1InvalidOrEmpty = iota
|
||||||
|
|
||||||
|
// fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that
|
||||||
|
// are a valid JSON encoding of an allowable value or don't represent an allowable value in
|
||||||
|
// any supported encoding.
|
||||||
|
fieldsV1InvalidOrValidJSONObject
|
||||||
|
|
||||||
|
// fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that
|
||||||
|
// are a valid CBOR encoding of an allowable value or don't represent an allowable value in
|
||||||
|
// any supported encoding.
|
||||||
|
fieldsV1InvalidOrValidCBORObject
|
||||||
|
)
|
||||||
|
|
||||||
|
// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject,
|
||||||
|
// fieldsV1InvalidOrValidCBORObject based on the value of Raw.
|
||||||
|
//
|
||||||
|
// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map)
|
||||||
|
// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty
|
||||||
|
// and represents an allowable value, then the initial byte unambiguously distinguishes a
|
||||||
|
// JSON-encoded value from a CBOR-encoded value.
|
||||||
|
//
|
||||||
|
// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first
|
||||||
|
// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid
|
||||||
|
// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map",
|
||||||
|
// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or
|
||||||
|
// 0xc6...0xdb). The two sets of valid initial bytes don't intersect.
|
||||||
|
func (f FieldsV1) getContentType() int {
|
||||||
|
if len(f.Raw) > 0 {
|
||||||
|
p := f.Raw[0]
|
||||||
|
switch p {
|
||||||
|
case 'n', '{', '\t', '\r', '\n', ' ':
|
||||||
|
return fieldsV1InvalidOrValidJSONObject
|
||||||
|
case 0xf6: // null
|
||||||
|
return fieldsV1InvalidOrValidCBORObject
|
||||||
|
default:
|
||||||
|
if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ {
|
||||||
|
return fieldsV1InvalidOrValidCBORObject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fieldsV1InvalidOrEmpty
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user