Merge pull request #1514 from dims/update-kubernetes-to-1.19.0-beta.2

Update kubernetes to 1.19.0 beta.2
This commit is contained in:
Mike Brown 2020-06-22 10:58:47 -05:00 committed by GitHub
commit 22b55461b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
174 changed files with 6168 additions and 15415 deletions

2
cri.go
View File

@ -37,7 +37,7 @@ import (
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/klog"
"k8s.io/klog/v2"
criconfig "github.com/containerd/cri/pkg/config"
"github.com/containerd/cri/pkg/constants"

View File

@ -36,7 +36,7 @@ import (
"google.golang.org/grpc"
"k8s.io/cri-api/pkg/apis"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/remote"
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
criconfig "github.com/containerd/cri/pkg/config"

View File

@ -20,7 +20,7 @@ package config
import (
"github.com/containerd/containerd"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming"
)
// DefaultConfig returns default configurations of cri plugin.

View File

@ -23,7 +23,7 @@ import (
"path/filepath"
"github.com/containerd/containerd"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming"
)
// DefaultConfig returns default configurations of cri plugin.

View File

@ -33,7 +33,7 @@ import (
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming"
"github.com/containerd/cri/pkg/store/label"

View File

@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/remotecommand"
k8scert "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/cri/streaming"
"k8s.io/utils/exec"
ctrdutil "github.com/containerd/cri/pkg/containerd/util"

View File

@ -39,10 +39,10 @@ github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc10
github.com/opencontainers/runtime-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.3.0
github.com/prometheus/client_model v0.1.0
github.com/prometheus/common v0.7.0
github.com/prometheus/procfs v0.0.8
github.com/prometheus/client_golang v1.6.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/prometheus/procfs v0.0.11
github.com/russross/blackfriday v1.5.2
github.com/sirupsen/logrus v1.6.0
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
@ -63,26 +63,28 @@ github.com/cilium/ebpf 4032b1d8aae306b7bb94a2a11002
github.com/davecgh/go-spew v1.1.1
github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528
github.com/emicklei/go-restful v2.9.5
github.com/go-logr/logr v0.2.0
github.com/google/gofuzz v1.1.0
github.com/json-iterator/go v1.1.8
github.com/json-iterator/go v1.1.9
github.com/modern-go/concurrent 1.0.3
github.com/modern-go/reflect2 v1.0.1
github.com/pmezard/go-difflib v1.0.0
github.com/seccomp/libseccomp-golang v0.9.1
github.com/stretchr/testify v1.4.0
golang.org/x/crypto bac4c82f69751a6dd76e702d54b3ceb88adab236
golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33
golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef
golang.org/x/oauth2 858c2ad4c8b6c5d10852cb89079f6ca1c7309787
golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v2 v2.2.8
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/apiserver v0.18.2
k8s.io/client-go v0.18.2
k8s.io/cri-api v0.18.2
k8s.io/klog v1.0.0
k8s.io/kubernetes v1.18.2
k8s.io/utils a9aa75ae1b89e1b992c33383f48e942d97e52dae
k8s.io/api v0.19.0-beta.2
k8s.io/apiserver v0.19.0-beta.2
k8s.io/apimachinery v0.19.0-beta.2
k8s.io/client-go v0.19.0-beta.2
k8s.io/component-base v0.19.0-beta.2
k8s.io/cri-api v0.19.0-beta.2
k8s.io/klog/v2 v2.2.0
k8s.io/kubernetes v1.19.0-beta.2
k8s.io/utils 2df71ebbae66f39338aed4cd0bb82d2212ee33cc
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml v1.2.0
@ -90,7 +92,7 @@ sigs.k8s.io/yaml v1.2.0
github.com/containerd/go-cni v1.0.0
github.com/containernetworking/cni v0.7.1
github.com/containernetworking/plugins v0.7.6
github.com/fsnotify/fsnotify v1.4.8
github.com/fsnotify/fsnotify v1.4.9
# image decrypt depedencies
github.com/containerd/imgcrypt v1.0.1

View File

@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Usage
```go
package main
import (
"log"
"github.com/fsnotify/fsnotify"
)
func main() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
done := make(chan bool)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("modified file:", event.Name)
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", err)
}
}
}()
err = watcher.Add("/tmp/foo")
if err != nil {
log.Fatal(err)
}
<-done
}
```
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.

201
vendor/github.com/go-logr/logr/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

181
vendor/github.com/go-logr/logr/README.md generated vendored Normal file
View File

@ -0,0 +1,181 @@
# A more minimal logging API for Go
Before you consider this package, please read [this blog post by the
inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what
he has to say, and it largely aligns with my own experiences. Too many
choices of levels means inconsistent logs.
This package offers a purely abstract interface, based on these ideas but with
a few twists. Code can depend on just this interface and have the actual
logging implementation be injected from callers. Ideally only `main()` knows
what logging implementation is being used.
# Differences from Dave's ideas
The main differences are:
1) Dave basically proposes doing away with the notion of a logging API in favor
of `fmt.Printf()`. I disagree, especially when you consider things like output
locations, timestamps, file and line decorations, and structured logging. I
restrict the API to just 2 types of logs: info and error.
Info logs are things you want to tell the user which are not errors. Error
logs are, well, errors. If your code receives an `error` from a subordinate
function call and is logging that `error` *and not returning it*, use error
logs.
2) Verbosity-levels on info logs. This gives developers a chance to indicate
arbitrary grades of importance for info logs, without assigning names with
semantic meaning such as "warning", "trace", and "debug". Superficially this
may feel very similar, but the primary difference is the lack of semantics.
Because verbosity is a numerical value, it's safe to assume that an app running
with higher verbosity means more (and less important) logs will be generated.
This is a BETA grade API.
There are implementations for the following logging libraries:
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr)
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
- **log** (the Go standard library logger):
[stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
# FAQ
## Conceptual
## Why structured logging?
- **Structured logs are more easily queriable**: Since you've got
key-value pairs, it's much easier to query your structured logs for
particular values by filtering on the contents of a particular key --
think searching request logs for error codes, Kubernetes reconcilers for
the name and namespace of the reconciled object, etc
- **Structured logging makes it easier to have cross-referencable logs**:
Similarly to searchability, if you maintain conventions around your
keys, it becomes easy to gather all log lines related to a particular
concept.
- **Structured logs allow better dimensions of filtering**: if you have
structure to your logs, you've got more precise control over how much
information is logged -- you might choose in a particular configuration
to log certain keys but not others, only log lines where a certain key
matches a certain value, etc, instead of just having v-levels and names
to key off of.
- **Structured logs better represent structured data**: sometimes, the
data that you want to log is inherently structured (think tuple-link
objects). Structured logs allow you to preserve that structure when
outputting.
## Why V-levels?
**V-levels give operators an easy way to control the chattiness of log
operations**. V-levels provide a way for a given package to distinguish
the relative importance or verbosity of a given log message. Then, if
a particular logger or package is logging too many messages, the user
of the package can simply change the v-levels for that library.
## Why not more named levels, like Warning?
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
from Dave's ideas](#differences-from-daves-ideas).
## Why not allow format strings, too?
**Format strings negate many of the benefits of structured logs**:
- They're not easily searchable without resorting to fuzzy searching,
regular expressions, etc
- They don't store structured data well, since contents are flattened into
a string
- They're not cross-referencable
- They don't compress easily, since the message is not constant
(unless you turn positional parameters into key-value pairs with numerical
keys, at which point you've gotten key-value logging with meaningless
keys)
## Practical
## Why key-value pairs, and not a map?
Key-value pairs are *much* easier to optimize, especially around
allocations. Zap (a structured logger that inspired logr's interface) has
[performance measurements](https://github.com/uber-go/zap#performance)
that show this quite nicely.
While the interface ends up being a little less obvious, you get
potentially better performance, plus avoid making users type
`map[string]string{}` every time they want to log.
## What if my V-levels differ between libraries?
That's fine. Control your V-levels on a per-logger basis, and use the
`WithName` function to pass different loggers to different libraries.
Generally, you should take care to ensure that you have relatively
consistent V-levels within a given logger, however, as this makes deciding
on what verbosity of logs to request easier.
## But I *really* want to use a format string!
That's not actually a question. Assuming your question is "how do
I convert my mental model of logging with format strings to logging with
constant messages":
1. figure out what the error actually is, as you'd write in a TL;DR style,
and use that as a message
2. For every place you'd write a format specifier, look to the word before
it, and add that as a key value pair
For instance, consider the following examples (all taken from spots in the
Kubernetes codebase):
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
responseCode, err)` becomes `logger.Error(err, "client returned an
error", "code", responseCode)`
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
response when requesting url", "attempt", retries, "after
seconds", seconds, "url", url)`
If you *really* must use a format string, place it as a key value, and
call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to
reflect over type %T")` becomes `logger.Info("unable to reflect over
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
this is necessary should be few and far between.
## How do I choose my V-levels?
This is basically the only hard constraint: increase V-levels to denote
more verbose or more debug-y logs.
Otherwise, you can start out with `0` as "you always want to see this",
`1` as "common logging that you might *possibly* want to turn off", and
`10` as "I would like to performance-test your log collection stack".
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs).
## How do I choose my keys
- make your keys human-readable
- constant keys are generally a good idea
- be consistent across your codebase
- keys should naturally match parts of the message string
While key names are mostly unrestricted (and spaces are acceptable),
it's generally a good idea to stick to printable ascii characters, or at
least match the general character set of your log lines.
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging

3
vendor/github.com/go-logr/logr/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/go-logr/logr
go 1.14

178
vendor/github.com/go-logr/logr/logr.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package logr defines abstract interfaces for logging. Packages can depend on
// these interfaces and callers can implement logging in whatever way is
// appropriate.
//
// This design derives from Dave Cheney's blog:
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
//
// This is a BETA grade API. Until there is a significant 2nd implementation,
// I don't really know how it will change.
//
// The logging specifically makes it non-trivial to use format strings, to encourage
// attaching structured information instead of unstructured format strings.
//
// Usage
//
// Logging is done using a Logger. Loggers can have name prefixes and named
// values attached, so that all log messages logged with that Logger have some
// base context associated.
//
// The term "key" is used to refer to the name associated with a particular
// value, to disambiguate it from the general Logger name.
//
// For instance, suppose we're trying to reconcile the state of an object, and
// we want to log that we've made some decision.
//
// With the traditional log package, we might write:
// log.Printf(
// "decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name)
//
// With logr's structured logging, we'd write:
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
// // and the named value target-type=Foo, for extra context.
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
//
// // later on...
// log.Info("setting field foo on object", "value", targetValue, "object", object)
//
// Depending on our logging implementation, we could then make logging decisions
// based on field values (like only logging such events for objects in a certain
// namespace), or copy the structured information into a structured log store.
//
// For logging errors, Logger has a method called Error. Suppose we wanted to
// log an error while reconciling. With the traditional log package, we might
// write:
// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err)
//
// With logr, we'd instead write:
// // assuming the above setup for log
// log.Error(err, "unable to reconcile object", "object", object)
//
// This functions similarly to:
// log.Info("unable to reconcile object", "error", err, "object", object)
//
// However, it ensures that a standard key for the error value ("error") is used
// across all error logging. Furthermore, certain implementations may choose to
// attach additional information (such as stack traces) on calls to Error, so
// it's preferred to use Error to log errors.
//
// Parts of a log line
//
// Each log message from a Logger has four types of context:
// logger name, log verbosity, log message, and the named values.
//
// The Logger name constists of a series of name "segments" added by successive
// calls to WithName. These name segments will be joined in some way by the
// underlying implementation. It is strongly reccomended that name segements
// contain simple identifiers (letters, digits, and hyphen), and do not contain
// characters that could muddle the log output or confuse the joining operation
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
//
// Log verbosity represents how little a log matters. Level zero, the default,
// matters most. Increasing levels matter less and less. Try to avoid lots of
// different verbosity levels, and instead provide useful keys, logger names,
// and log messages for users to filter on. It's illegal to pass a log level
// below zero.
//
// The log message consists of a constant message attached to the the log line.
// This should generally be a simple description of what's occuring, and should
// never be a format string.
//
// Variable information can then be attached using named values (key/value
// pairs). Keys are arbitrary strings, while values may be any Go value.
//
// Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
// * be human-readable and meaningful (not auto-generated or simple ordinals)
// * be constant (not dependent on input data)
// * contain only printable characters
// * not contain whitespace or punctuation
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
// output JSON data or will store data for later database (e.g. SQL) queries.
//
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
//
// - `"caller"`: the calling information (file/line) of a particular log line.
// - `"error"`: the underlying error value in the `Error` method.
// - `"level"`: the log level.
// - `"logger"`: the name of the associated logger.
// - `"msg"`: the log message.
// - `"stacktrace"`: the stack trace associated with a particular log line or
// error (often from the `Error` message).
// - `"ts"`: the timestamp for a log line.
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when neccessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
package logr
// TODO: consider adding back in format strings if they're really needed
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
// Logger represents the ability to log messages, both errors and not.
type Logger interface {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info
// logs.
Enabled() bool
// Info logs a non-error message with the given key/value pairs as context.
//
// The msg argument should be used to add some constant description to
// the log line. The key/value pairs can then be used to add additional
// variable information. The key/value pairs should alternate string
// keys and arbitrary values.
Info(msg string, keysAndValues ...interface{})
// Error logs an error, with the given message and key/value pairs as context.
// It functions similarly to calling Info with the "error" named value, but may
// have unique behavior, and should be preferred for logging errors (see the
// package documentations for more information).
//
// The msg field should be used to add context to any underlying error,
// while the err field should be used to attach the actual error that
// triggered this log line, if present.
Error(err error, msg string, keysAndValues ...interface{})
// V returns an Logger value for a specific verbosity level, relative to
// this Logger. In other words, V values are additive. V higher verbosity
// level means a log message is less important. It's illegal to pass a log
// level less than zero.
V(level int) Logger
// WithValues adds some key-value pairs of context to a logger.
// See Info for documentation on how key/value pairs work.
WithValues(keysAndValues ...interface{}) Logger
// WithName adds a new element to the logger's name.
// Successive calls with WithName continue to append
// suffixes to the logger's name. It's strongly reccomended
// that name segments contain only letters, digits, and hyphens
// (see the package documentation for more information).
WithName(name string) Logger
}

View File

@ -341,7 +341,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
if tag == "-" {
if tag == "-" || field.Name() == "_" {
continue
}
tagParts := strings.Split(tag, ",")

View File

@ -290,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
mapIter := encoder.mapType.UnsafeIterate(ptr)
subStream := stream.cfg.BorrowStream(nil)
subStream.Attachment = stream.Attachment
subIter := stream.cfg.BorrowIterator(nil)
keyValues := encodedKeyValues{}
for mapIter.HasNext() {
subStream.buf = make([]byte, 0, 64)
key, elem := mapIter.UnsafeNext()
subStreamIndex := subStream.Buffered()
encoder.keyEncoder.Encode(key, subStream)
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
stream.Error = subStream.Error
}
encodedKey := subStream.Buffer()
encodedKey := subStream.Buffer()[subStreamIndex:]
subIter.ResetBytes(encodedKey)
decodedKey := subIter.ReadString()
if stream.indention > 0 {
@ -310,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.elemEncoder.Encode(elem, subStream)
keyValues = append(keyValues, encodedKV{
key: decodedKey,
keyValue: subStream.Buffer(),
keyValue: subStream.Buffer()[subStreamIndex:],
})
}
sort.Sort(keyValues)
@ -320,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
stream.Write(keyValue.keyValue)
}
if subStream.Error != nil && stream.Error == nil {
stream.Error = subStream.Error
}
stream.WriteObjectEnd()
stream.cfg.ReturnStream(subStream)
stream.cfg.ReturnIterator(subIter)

View File

@ -200,6 +200,7 @@ type stringModeStringEncoder struct {
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
tempStream.Attachment = stream.Attachment
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))

View File

@ -3,12 +3,16 @@ module github.com/prometheus/client_golang
require (
github.com/beorn7/perks v1.0.1
github.com/cespare/xxhash/v2 v2.1.1
github.com/golang/protobuf v1.3.2
github.com/json-iterator/go v1.1.8
github.com/prometheus/client_model v0.1.0
github.com/prometheus/common v0.7.0
github.com/prometheus/procfs v0.0.8
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f
github.com/golang/protobuf v1.4.0
github.com/json-iterator/go v1.1.9
github.com/kr/pretty v0.1.0 // indirect
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.9.1
github.com/prometheus/procfs v0.0.11
github.com/stretchr/testify v1.4.0 // indirect
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v2 v2.2.5 // indirect
)
go 1.11

View File

@ -17,6 +17,7 @@ import (
"errors"
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
@ -42,11 +43,27 @@ type Counter interface {
Add(float64)
}
// ExemplarAdder is implemented by Counters that offer the option of adding a
// value to the Counter together with an exemplar. Its AddWithExemplar method
// works like the Add method of the Counter interface but also replaces the
// currently saved exemplar (if any) with a new one, created from the provided
// value, the current time as timestamp, and the provided labels. Empty Labels
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
// than 64 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
// perform the corresponding type assertion.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
result.init(result) // Init self-collection.
return result
}
@ -78,6 +95,9 @@ type counter struct {
desc *Desc
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (c *counter) Desc() *Desc {
@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
}
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
c.Add(v)
c.updateExemplar(v, e)
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return populateMetric(CounterValue, val, c.labelPairs, out)
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
func (c *counter) updateExemplar(v float64, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, c.now(), l)
if err != nil {
panic(err)
}
c.exemplar.Store(e)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
@ -267,6 +309,8 @@ type CounterFunc interface {
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
//
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

View File

@ -84,25 +84,21 @@
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// HistogramVec, and UntypedVec.
// and HistogramVec.
//
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
// and HistogramVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
// UntypedOpts.
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
// Custom Collectors and constant Metrics
//
@ -118,13 +114,16 @@
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created later.
// NewDesc comes in handy to create those Desc instances. Alternatively, you
// could return no Desc at all, which will mark the Collector “unchecked”. No
// checks are performed at registration time, but metric consistency will still
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
// for all metric types with just a float64 as their value: Counter, Gauge, and
// a special “type” called Untyped. Use the latter if you are not sure if the
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
// happens in the Collect method. The Describe method has to return separate
// Desc instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
// you could return no Desc at all, which will mark the Collector “unchecked”.
// No checks are performed at registration time, but metric consistency will
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is

View File

@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, out)
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same

View File

@ -73,7 +73,7 @@ func NewGoCollector() Collector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",

View File

@ -20,6 +20,7 @@ import (
"sort"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
@ -151,6 +152,10 @@ type HistogramOpts struct {
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
// The returned implementation also implements ExemplarObserver. It is safe to
// perform the corresponding type assertion. Exemplars are tracked separately
// for each bucket.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
@ -188,6 +193,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@ -205,9 +211,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts:
// for both counts as well as exemplars:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
return h
@ -254,6 +261,9 @@ type histogram struct {
upperBounds []float64
labelPairs []*dto.LabelPair
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (h *histogram) Desc() *Desc {
@ -261,36 +271,13 @@ func (h *histogram) Desc() *Desc {
}
func (h *histogram) Observe(v float64) {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
h.observe(v, h.findBucket(v))
}
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
h.updateExemplar(v, i, e)
}
func (h *histogram) Write(out *dto.Metric) error {
@ -329,6 +316,18 @@ func (h *histogram) Write(out *dto.Metric) error {
CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
if e := h.exemplars[i].Load(); e != nil {
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
}
}
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
b := &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e.(*dto.Exemplar),
}
his.Bucket = append(his.Bucket, b)
}
out.Histogram = his
@ -352,6 +351,57 @@ func (h *histogram) Write(out *dto.Metric) error {
return nil
}
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
return sort.SearchFloat64s(h.upperBounds, v)
}
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if bucket < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
// labels, it's a no-op. It panics if any of the labels is invalid.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, h.now(), l)
if err != nil {
panic(err)
}
h.exemplars[bucket].Store(e)
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions

View File

@ -50,3 +50,15 @@ type ObserverVec interface {
Collector
}
// ExemplarObserver is implemented by Observers that offer the option of
// observing a value together with an exemplar. Its ObserveWithExemplar method
// works like the Observe method of an Observer but also replaces the currently
// saved exemplar (if any) with a new one, created from the provided value, the
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
// invalid or if the provided labels contain more than 64 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}

View File

@ -33,18 +33,22 @@ var (
)
type processMemoryCounters struct {
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
// System interface description
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
// Refer to the Golang internal implementation
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
_ uint32
PageFaultCount uint32
PeakWorkingSetSize uint64
WorkingSetSize uint64
QuotaPeakPagedPoolUsage uint64
QuotaPagedPoolUsage uint64
QuotaPeakNonPagedPoolUsage uint64
QuotaNonPagedPoolUsage uint64
PagefileUsage uint64
PeakPagefileUsage uint64
PrivateUsage uint64
PeakWorkingSetSize uintptr
WorkingSetSize uintptr
QuotaPeakPagedPoolUsage uintptr
QuotaPagedPoolUsage uintptr
QuotaPeakNonPagedPoolUsage uintptr
QuotaNonPagedPoolUsage uintptr
PagefileUsage uintptr
PeakPagefileUsage uintptr
PrivateUsage uintptr
}
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {

View File

@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 {
}
func (r *responseWriterDelegator) WriteHeader(code int) {
if r.observeWriteHeader != nil && !r.wroteHeader {
// Only call observeWriteHeader for the 1st time. It's a bug if
// WriteHeader is called more than once, but we want to protect
// against it here. Note that we still delegate the WriteHeader
// to the original ResponseWriter to not mask the bug from it.
r.observeWriteHeader(code)
}
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {

View File

@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
}
}
contentType := expfmt.Negotiate(req.Header)
var contentType expfmt.Format
if opts.EnableOpenMetrics {
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
} else {
contentType = expfmt.Negotiate(req.Header)
}
header := rsp.Header()
header.Set(contentTypeHeader, string(contentType))
@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
enc := expfmt.NewEncoder(w, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
// Handled later.
case HTTPErrorOnError:
httpError(rsp, err)
return
}
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
handleError := func(err error) bool {
if err == nil {
return false
}
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case HTTPErrorOnError:
// We cannot really send an HTTP error at this
// point because we most likely have written
// something to rsp already. But at least we can
// stop sending.
return true
}
// Do nothing in all other cases, including ContinueOnError.
return false
}
if lastErr != nil {
httpError(rsp, lastErr)
for _, mf := range mfs {
if handleError(enc.Encode(mf)) {
return
}
}
if closer, ok := enc.(expfmt.Closer); ok {
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
if handleError(closer.Close()) {
return
}
}
})
@ -255,7 +272,12 @@ type HandlerErrorHandling int
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
// encountered. Report the error message in the body. Note that HTTP
// errors cannot be served anymore once the beginning of a regular
// payload has been sent. Thus, in the (unlikely) case that encoding the
// payload into the negotiated wire format fails, serving the response
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
// those errors.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
@ -318,6 +340,16 @@ type HandlerOpts struct {
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
// If true, the experimental OpenMetrics encoding is added to the
// possible options during content negotiation. Note that Prometheus
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
// the only way to transmit exemplars. However, the move to OpenMetrics
// is not completely transparent. Most notably, the values of "quantile"
// labels of Summaries and "le" labels of Histograms are formatted with
// a trailing ".0" if they would otherwise look like integer numbers
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool {
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerErrer. Error contents is
// supposed to be uncompressed plain text. However, same as with a plain
// http.Error, any header settings will be void if the header has already been
// sent. The error message will still be written to the writer, but it will
// probably be of limited use.
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
// must not be called if the header or any payload has already been sent.
func httpError(rsp http.ResponseWriter, err error) {
rsp.Header().Del(contentEncodingHeader)
http.Error(

View File

@ -16,8 +16,11 @@ package prometheus
import (
"fmt"
"sort"
"time"
"unicode/utf8"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
dto "github.com/prometheus/client_model/go"
)
@ -25,7 +28,8 @@ import (
// ValueType is an enumeration of metric types that represent a simple value.
type ValueType int
// Possible values for the ValueType enum.
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
// with an unknown type.
const (
_ ValueType = iota
CounterValue
@ -69,7 +73,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, out)
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@ -116,19 +120,20 @@ func (m *constMetric) Desc() *Desc {
}
func (m *constMetric) Write(out *dto.Metric) error {
return populateMetric(m.valType, m.val, m.labelPairs, out)
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
}
func populateMetric(
t ValueType,
v float64,
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v)}
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@ -160,3 +165,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
sort.Sort(labelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
const ExemplarMaxRunes = 64
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
// number of runes in the label names and values exceeds ExemplarMaxRunes.
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
tsProto, err := ptypes.TimestampProto(ts)
if err != nil {
return nil, err
}
e.Timestamp = tsProto
labelPairs := make([]*dto.LabelPair, 0, len(l))
var runes int
for name, value := range l {
if !checkLabelName(name) {
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
}
runes += utf8.RuneCountInString(name)
if !utf8.ValidString(value) {
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
}
runes += utf8.RuneCountInString(value)
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(name),
Value: proto.String(value),
})
}
if runes > ExemplarMaxRunes {
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
}
e.Label = labelPairs
return e, nil
}

View File

@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool {
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
// show up in GoDoc.
// Describe implements Collector.
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector.
func (m *metricVec) Reset() { m.metricMap.Reset() }
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
var (
newCurry []curriedLabelValue

View File

@ -1,11 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metrics.proto
package io_prometheus_client // import "github.com/prometheus/client_model/go"
package io_prometheus_client
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MetricType int32
@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
3: "UNTYPED",
4: "HISTOGRAM",
}
var MetricType_value = map[string]int32{
"COUNTER": 0,
"GAUGE": 1,
@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
*p = x
return p
}
func (x MetricType) String() string {
return proto.EnumName(MetricType_name, int32(x))
}
func (x *MetricType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
if err != nil {
@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
*x = MetricType(value)
return nil
}
func (MetricType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
type LabelPair struct {
@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
return fileDescriptor_6039342a2ba47b72, []int{0}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
}
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
}
func (dst *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(dst, src)
func (m *LabelPair) XXX_Merge(src proto.Message) {
xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return xxx_messageInfo_LabelPair.Size(m)
@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
func (*Gauge) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
return fileDescriptor_6039342a2ba47b72, []int{1}
}
func (m *Gauge) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Gauge.Unmarshal(m, b)
}
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
}
func (dst *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(dst, src)
func (m *Gauge) XXX_Merge(src proto.Message) {
xxx_messageInfo_Gauge.Merge(m, src)
}
func (m *Gauge) XXX_Size() int {
return xxx_messageInfo_Gauge.Size(m)
@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
func (*Counter) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
return fileDescriptor_6039342a2ba47b72, []int{2}
}
func (m *Counter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Counter.Unmarshal(m, b)
}
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
}
func (dst *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(dst, src)
func (m *Counter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Counter.Merge(m, src)
}
func (m *Counter) XXX_Size() int {
return xxx_messageInfo_Counter.Size(m)
@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
return 0
}
func (m *Counter) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Quantile struct {
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
func (*Quantile) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
return fileDescriptor_6039342a2ba47b72, []int{3}
}
func (m *Quantile) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Quantile.Unmarshal(m, b)
}
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
}
func (dst *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(dst, src)
func (m *Quantile) XXX_Merge(src proto.Message) {
xxx_messageInfo_Quantile.Merge(m, src)
}
func (m *Quantile) XXX_Size() int {
return xxx_messageInfo_Quantile.Size(m)
@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
return fileDescriptor_6039342a2ba47b72, []int{4}
}
func (m *Summary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Summary.Unmarshal(m, b)
}
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
}
func (dst *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(dst, src)
func (m *Summary) XXX_Merge(src proto.Message) {
xxx_messageInfo_Summary.Merge(m, src)
}
func (m *Summary) XXX_Size() int {
return xxx_messageInfo_Summary.Size(m)
@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
func (*Untyped) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
return fileDescriptor_6039342a2ba47b72, []int{5}
}
func (m *Untyped) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Untyped.Unmarshal(m, b)
}
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
}
func (dst *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(dst, src)
func (m *Untyped) XXX_Merge(src proto.Message) {
xxx_messageInfo_Untyped.Merge(m, src)
}
func (m *Untyped) XXX_Size() int {
return xxx_messageInfo_Untyped.Size(m)
@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
func (*Histogram) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
return fileDescriptor_6039342a2ba47b72, []int{6}
}
func (m *Histogram) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Histogram.Unmarshal(m, b)
}
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
}
func (dst *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(dst, src)
func (m *Histogram) XXX_Merge(src proto.Message) {
xxx_messageInfo_Histogram.Merge(m, src)
}
func (m *Histogram) XXX_Size() int {
return xxx_messageInfo_Histogram.Size(m)
@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
}
type Bucket struct {
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
return fileDescriptor_6039342a2ba47b72, []int{7}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
}
func (dst *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(dst, src)
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return xxx_messageInfo_Bucket.Size(m)
@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
return 0
}
func (m *Bucket) GetExemplar() *Exemplar {
if m != nil {
return m.Exemplar
}
return nil
}
type Exemplar struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_6039342a2ba47b72, []int{8}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Exemplar.Unmarshal(m, b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return xxx_messageInfo_Exemplar.Size(m)
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabel() []*LabelPair {
if m != nil {
return m.Label
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil && m.Value != nil {
return *m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
if m != nil {
return m.Timestamp
}
return nil
}
type Metric struct {
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
return fileDescriptor_6039342a2ba47b72, []int{9}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Metric.Unmarshal(m, b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
}
func (dst *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(dst, src)
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return xxx_messageInfo_Metric.Size(m)
@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
func (*MetricFamily) Descriptor() ([]byte, []int) {
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
return fileDescriptor_6039342a2ba47b72, []int{10}
}
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
}
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
}
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(dst, src)
func (m *MetricFamily) XXX_Merge(src proto.Message) {
xxx_messageInfo_MetricFamily.Merge(m, src)
}
func (m *MetricFamily) XXX_Size() int {
return xxx_messageInfo_MetricFamily.Size(m)
@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
}
func init() {
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
@ -580,50 +669,55 @@ func init() {
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
}
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
// 591 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
var fileDescriptor_6039342a2ba47b72 = []byte{
// 665 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
}

View File

@ -30,17 +30,38 @@ type Encoder interface {
Encode(*dto.MetricFamily) error
}
type encoder func(*dto.MetricFamily) error
func (e encoder) Encode(v *dto.MetricFamily) error {
return e(v)
// Closer is implemented by Encoders that need to be closed to finalize
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
//
// Note that all Encoder implementations returned from this package implement
// Closer, too, even if the Close call is a no-op. This happens in preparation
// for adding a Close method to the Encoder interface directly in a (mildly
// breaking) release in the future.
type Closer interface {
Close() error
}
// Negotiate returns the Content-Type based on the given Accept header.
// If no appropriate accepted type is found, FmtText is returned.
type encoderCloser struct {
encode func(*dto.MetricFamily) error
close func() error
}
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
return ec.encode(v)
}
func (ec encoderCloser) Close() error {
return ec.close()
}
// Negotiate returns the Content-Type based on the given Accept header. If no
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
// Check for protocol buffer
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
return FmtProtoCompact
}
}
// Check for text format.
ver := ac.Params["version"]
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation.
// NegotiateIncludingOpenMetrics works like Negotiate but includes
// FmtOpenMetrics as an option for the result. Note that this function is
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
case "text":
return FmtProtoText
case "compact-text":
return FmtProtoCompact
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
return FmtOpenMetrics
}
}
return FmtText
}
// NewEncoder returns a new encoder based on content type negotiation. All
// Encoder implementations returned by NewEncoder also implement Closer, and
// callers should always call the Close method. It is currently only required
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
func NewEncoder(w io.Writer, format Format) Encoder {
switch format {
case FmtProtoDelim:
return encoder(func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
return err
},
close: func() error { return nil },
}
case FmtProtoCompact:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
return err
},
close: func() error { return nil },
}
case FmtProtoText:
return encoder(func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
return err
},
close: func() error { return nil },
}
case FmtText:
return encoder(func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
})
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
return err
},
close: func() error { return nil },
}
case FmtOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
return err
},
close: func() error {
_, err := FinalizeOpenMetrics(w)
return err
},
}
}
panic("expfmt.NewEncoder: unknown format")
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
}

View File

@ -19,10 +19,12 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion = "0.0.1"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
@ -30,6 +32,7 @@ const (
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
)
const (

View File

@ -0,0 +1,527 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
// the same order as the input, no further sorting is performed. Furthermore,
// this function assumes the input is already sanitized and does not perform any
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
// on individual metric families, it is the responsibility of the caller to
// append this line to 'out' once all metric families have been written.
// Conveniently, this can be done by calling FinalizeOpenMetrics.
//
// The output should be fully OpenMetrics compliant. However, there are a few
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
// - Counters are expected to have the `_total` suffix in their metric name. In
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
// line. A counter with a missing `_total` suffix is not an error. However,
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
// output.
//
// - No support for the following (optional) features: `# UNIT` line, `_created`
// line, info type, stateset type, gaugehistogram type.
//
// - The size of exemplar labels is not checked (i.e. it's possible to create
// exemplars that are larger than allowed by the OpenMetrics specification).
//
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
}
// Try the interface upgrade. If it doesn't work, we'll use a
// bufio.Writer from the sync.Pool.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bufio.Writer)
b.Reset(out)
w = b
defer func() {
bErr := b.Flush()
if err == nil {
err = bErr
}
bufPool.Put(b)
}()
}
var (
n int
metricType = in.GetType()
shortName = name
)
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
shortName = name[:len(name)-6]
}
// Comments, first HELP, then TYPE.
if in.Help != nil {
n, err = w.WriteString("# HELP ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
err = w.WriteByte(' ')
written++
if err != nil {
return
}
n, err = writeEscapedString(w, *in.Help, true)
written += n
if err != nil {
return
}
err = w.WriteByte('\n')
written++
if err != nil {
return
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
return
}
n, err = w.WriteString(shortName)
written += n
if err != nil {
return
}
switch metricType {
case dto.MetricType_COUNTER:
if strings.HasSuffix(name, "_total") {
n, err = w.WriteString(" counter\n")
} else {
n, err = w.WriteString(" unknown\n")
}
case dto.MetricType_GAUGE:
n, err = w.WriteString(" gauge\n")
case dto.MetricType_SUMMARY:
n, err = w.WriteString(" summary\n")
case dto.MetricType_UNTYPED:
n, err = w.WriteString(" unknown\n")
case dto.MetricType_HISTOGRAM:
n, err = w.WriteString(" histogram\n")
default:
return written, fmt.Errorf("unknown metric type %s", metricType.String())
}
written += n
if err != nil {
return
}
// Finally the samples, one line for each.
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
"expected counter in metric %s %s", name, metric,
)
}
// Note that we have ensured above that either the name
// ends on `_total` or that the rendered type is
// `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
"expected gauge in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
"expected untyped in metric %s %s", name, metric,
)
}
n, err = writeOpenMetricsSample(
w, name, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
"expected summary in metric %s %s", name, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
w, name, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
"expected histogram in metric %s %s", name, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
)
written += n
if err != nil {
return
}
if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeOpenMetricsSample(
w, name, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
)
written += n
if err != nil {
return
}
}
n, err = writeOpenMetricsSample(
w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
written += n
if err != nil {
return
}
n, err = writeOpenMetricsSample(
w, name, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
default:
return written, fmt.Errorf(
"unexpected type in metric %s %s", name, metric,
)
}
written += n
if err != nil {
return
}
}
return
}
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
return w.Write([]byte("# EOF\n"))
}
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
// w, given the metric name, the metric proto message itself, optionally an
// additional label name with a float64 value (use empty string as label name if
// not required), the value (optionally as float64 or uint64, determined by
// useIntValue), and optionally an exemplar (use nil if not required). The
// function returns the number of bytes written and any error encountered.
func writeOpenMetricsSample(
w enhancedWriter,
name, suffix string,
metric *dto.Metric,
additionalLabelName string, additionalLabelValue float64,
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeOpenMetricsLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
if useIntValue {
n, err = writeUint(w, intValue)
} else {
n, err = writeOpenMetricsFloat(w, floatValue)
}
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly without converting to a float first.
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
written += n
if err != nil {
return written, err
}
}
if exemplar != nil {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
return written, err
}
}
err = w.WriteByte('\n')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
// in OpenMetrics style.
func writeOpenMetricsLabelPairs(
w enhancedWriter,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
)
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeEscapedString(w, lp.GetValue(), true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
separator = ','
}
if additionalLabelName != "" {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(additionalLabelName)
written += n
if err != nil {
return written, err
}
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
}
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
return written, nil
}
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
written := 0
n, err := w.WriteString(" # ")
written += n
if err != nil {
return written, err
}
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
written += n
if err != nil {
return written, err
}
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
n, err = writeOpenMetricsFloat(w, e.GetValue())
written += n
if err != nil {
return written, err
}
if e.Timestamp != nil {
err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
ts, err := ptypes.Timestamp((*e).Timestamp)
if err != nil {
return written, err
}
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
written += n
if err != nil {
return written, err
}
}
return written, nil
}
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
// number would otherwise contain neither a "." nor an "e".
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
switch {
case f == 1:
return w.WriteString("1.0")
case f == 0:
return w.WriteString("0.0")
case f == -1:
return w.WriteString("-1.0")
case math.IsNaN(f):
return w.WriteString("NaN")
case math.IsInf(f, +1):
return w.WriteString("+Inf")
case math.IsInf(f, -1):
return w.WriteString("-Inf")
default:
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
if !bytes.ContainsAny(*bp, "e.") {
*bp = append(*bp, '.', '0')
}
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}
}
// writeUint is like writeInt just for uint64.
func writeUint(w enhancedWriter, u uint64) (int, error) {
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendUint((*bp)[:0], u, 10)
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}

View File

@ -423,9 +423,8 @@ var (
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
if includeDoubleQuote {
return quotedEscaper.WriteString(w, v)
} else {
return escaper.WriteString(w, v)
}
return escaper.WriteString(w, v)
}
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes

View File

@ -11,12 +11,12 @@ require (
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
github.com/prometheus/client_model v0.2.0
github.com/sirupsen/logrus v1.4.2
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.2.2
gopkg.in/yaml.v2 v2.2.4
)
go 1.11

View File

@ -14,10 +14,10 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"strconv"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
@ -52,80 +52,102 @@ type Crypto struct {
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
}
crypto, err := parseCrypto(data)
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
}
return crypto, nil
}
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
crypto := []Crypto{}
// parseCrypto parses a /proc/crypto stream into Crypto elements.
func parseCrypto(r io.Reader) ([]Crypto, error) {
var out []Crypto
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
for _, block := range cryptoBlocks {
var newCryptoElem Crypto
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' {
continue
}
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
vp := util.NewValueParser(value)
switch strings.TrimSpace(key) {
case "async":
b, err := strconv.ParseBool(value)
if err == nil {
newCryptoElem.Async = b
}
case "blocksize":
newCryptoElem.Blocksize = vp.PUInt64()
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
s := bufio.NewScanner(r)
for s.Scan() {
text := s.Text()
switch {
case strings.HasPrefix(text, "name"):
// Each crypto element begins with its name.
out = append(out, Crypto{})
case text == "":
continue
}
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
// Parse the key/value pair into the currently focused element.
c := &out[len(out)-1]
if err := c.parseKV(k, v); err != nil {
return nil, err
}
crypto = append(crypto, newCryptoElem)
}
return crypto, nil
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// parseKV parses a key/value pair into the appropriate field of c.
func (c *Crypto) parseKV(k, v string) error {
vp := util.NewValueParser(v)
switch k {
case "async":
// Interpret literal yes as true.
c.Async = v == "yes"
case "blocksize":
c.Blocksize = vp.PUInt64()
case "chunksize":
c.Chunksize = vp.PUInt64()
case "digestsize":
c.Digestsize = vp.PUInt64()
case "driver":
c.Driver = v
case "geniv":
c.Geniv = v
case "internal":
c.Internal = v
case "ivsize":
c.Ivsize = vp.PUInt64()
case "maxauthsize":
c.Maxauthsize = vp.PUInt64()
case "max keysize":
c.MaxKeysize = vp.PUInt64()
case "min keysize":
c.MinKeysize = vp.PUInt64()
case "module":
c.Module = v
case "name":
c.Name = v
case "priority":
c.Priority = vp.PInt64()
case "refcnt":
c.Refcnt = vp.PInt64()
case "seedsize":
c.Seedsize = vp.PUInt64()
case "selftest":
c.Selftest = v
case "type":
c.Type = v
case "walksize":
c.Walksize = vp.PUInt64()
}
return vp.Err()
}

View File

@ -5,4 +5,5 @@ go 1.12
require (
github.com/google/go-cmp v0.3.1
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
)

62
vendor/github.com/prometheus/procfs/loadavg.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// LoadAvg represents an entry in /proc/loadavg
type LoadAvg struct {
Load1 float64
Load5 float64
Load15 float64
}
// LoadAvg returns loadavg from /proc.
func (fs FS) LoadAvg() (*LoadAvg, error) {
path := fs.proc.Path("loadavg")
data, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
return parseLoad(data)
}
// Parse /proc loadavg and return 1m, 5m and 15m.
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
}
}
return &LoadAvg{
Load1: loads[0],
Load5: loads[1],
Load15: loads[2],
}, nil
}

View File

@ -29,10 +29,10 @@ import (
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// Unique ID for the mount
MountID int
// The ID of the parent mount
ParentID int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
}
mount.MountId, err = strconv.Atoi(mountInfo[0])
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(mountInfo[1])
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}

View File

@ -0,0 +1,153 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
// and contains netfilter conntrack statistics at one CPU core
type ConntrackStatEntry struct {
Entries uint64
Found uint64
Invalid uint64
Ignore uint64
Insert uint64
InsertFailed uint64
Drop uint64
EarlyDrop uint64
SearchRestart uint64
}
// Retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
// Parses a slice of ConntrackStatEntries from the given filepath
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
}
return stat, nil
}
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
conntrackEntry, err := parseConntrackStatEntry(fields)
if err != nil {
return nil, err
}
entries = append(entries, *conntrackEntry)
}
return entries, nil
}
// Parses a ConntrackStatEntry from given array of fields
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil {
return nil, err
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
}
return val, err
}

View File

@ -14,78 +14,89 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct {
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
type SoftnetStat struct {
// Number of processed packets
Processed uint
Processed uint32
// Number of dropped packets
Dropped uint
Dropped uint32
// Number of times processing packets ran out of quota
TimeSqueezed uint
TimeSqueezed uint32
}
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
// and then return a slice of SoftnetEntry's.
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
var softNetProcFile = "net/softnet_stat"
// NetSoftnetStat reads data from /proc/net/softnet_stat.
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
return nil, err
}
return parseSoftnetEntries(data)
}
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
}
entries = append(entries, entry)
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
}
return entries, nil
}
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
var err error
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
const minColumns = 9
s := bufio.NewScanner(r)
var stats []SoftnetStat
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
// We only parse the first three columns at the moment.
us, err := parseHexUint32s(columns[0:3])
if err != nil {
return nil, err
}
stats = append(stats, SoftnetStat{
Processed: us[0],
Dropped: us[1],
TimeSqueezed: us[2],
})
}
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
}
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
}
return SoftnetEntry{
Processed: uint(processed),
Dropped: uint(dropped),
TimeSqueezed: uint(timeSqueezed),
}, nil
return stats, nil
}
func parseHexUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}

229
vendor/github.com/prometheus/procfs/net_udp.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netUDPLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
NetUDPSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netUDPLine represents the fields parsed from a single line
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netUDPLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp.
func (fs FS) NetUDP() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp"))
}
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp6.
func (fs FS) NetUDP6() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp6"))
}
// NetUDPSummary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp.
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp"))
}
// NetUDP6Summary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp6.
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp6"))
}
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDP := NetUDP{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDP = append(netUDP, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDP, nil
}
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
netUDPSummary := &NetUDPSummary{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDPSummary.TxQueueLength += line.TxQueue
netUDPSummary.RxQueueLength += line.RxQueue
netUDPSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDPSummary, nil
}
// parseNetUDPLine parses a single line, represented by a list of fields.
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
line := &netUDPLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net udp socket line as it has less then 8 columns: %s",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf(
"cannot parse sl field in udp socket line: %s", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf(
"cannot parse local_address field in udp socket line: %s", fields[1])
}
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address value in udp socket line: %s", err)
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address port value in udp socket line: %s", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf(
"cannot parse rem_address field in udp socket line: %s", fields[1])
}
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address value in udp socket line: %s", err)
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address port value in udp socket line: %s", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse st value in udp socket line: %s", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse uid value in udp socket line: %s", err)
}
return line, nil
}

View File

@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
@ -27,25 +26,15 @@ import (
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
// Constants for the various /proc/net/unix enumerations.
// TODO: match against x/sys/unix or similar?
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
@ -53,129 +42,127 @@ const (
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUNIXFlags is the type of the flags field.
type NetUNIXFlags uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUNIXState is the type of the state field.
type NetUNIXState uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
// NetUNIXLine represents a line of /proc/net/unix.
type NetUNIXLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Flags NetUNIXFlags
Type NetUNIXType
State NetUNIXState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
// NetUNIX holds the data read from /proc/net/unix.
type NetUNIX struct {
Rows []*NetUNIXLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
// NetUNIX returns data read from /proc/net/unix.
func (fs FS) NetUNIX() (*NetUNIX, error) {
return readNetUNIX(fs.proc.Path("net/unix"))
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
// readNetUNIX reads data in /proc/net/unix format from the specified file.
func readNetUNIX(file string) (*NetUNIX, error) {
// This file could be quite large and a streaming read is desirable versus
// reading the entire contents at once.
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
return parseNetUNIX(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
// parseNetUNIX creates a NetUnix structure from the incoming stream.
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
// Begin scanning by checking for the existence of Inode.
s := bufio.NewScanner(r)
s.Scan()
minFieldsCnt := netUnixStaticFieldsCnt
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode {
minFieldsCnt++
minFields++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
var nu NetUNIX
for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nu, err
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
}
return &nu, nil
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
}
}
nuLine := &NetUnixLine{
n := &NetUNIXLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if l > min {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
n.Path = fields[pathIdx]
}
return nuLine, nil
return n, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
func (u NetUNIX) parseUsers(s string) (uint64, error) {
return strconv.ParseUint(s, 16, 32)
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
typ, err := strconv.ParseUint(s, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
return NetUNIXType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
return NetUNIXFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(s, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
return NetUNIXState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func (t NetUnixType) String() string {
func (t NetUNIXType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
return "unknown"
}
func (f NetUnixFlags) String() string {
func (f NetUNIXFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
}
}
func (s NetUnixState) String() string {
func (s NetUNIXState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"

View File

@ -16,6 +16,7 @@ package procfs
import (
"bufio"
"bytes"
"errors"
"regexp"
"github.com/prometheus/procfs/internal/util"
@ -23,10 +24,11 @@ import (
// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
// ProcFDInfo contains represents file descriptor information.
@ -96,15 +98,21 @@ type InotifyInfo struct {
// InotifyInfo constructor. Only available on kernel 3.8+.
func parseInotifyInfo(line string) (*InotifyInfo, error) {
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
m := r.FindStringSubmatch(line)
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: m[4],
m := rInotifyParts.FindStringSubmatch(line)
if len(m) >= 4 {
var mask string
if len(m) == 5 {
mask = m[4]
}
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: mask,
}
return i, nil
}
return i, nil
return nil, errors.New("invalid inode entry: " + line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.

208
vendor/github.com/prometheus/procfs/proc_maps.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"golang.org/x/sys/unix"
)
type ProcMapPermissions struct {
// mapping has the [R]ead flag set
Read bool
// mapping has the [W]rite flag set
Write bool
// mapping has the [X]ecutable flag set
Execute bool
// mapping has the [S]hared flag set
Shared bool
// mapping is marked as [P]rivate (copy on write)
Private bool
}
// ProcMap contains the process memory-mappings of the process,
// read from /proc/[pid]/maps
type ProcMap struct {
// The start address of current mapping.
StartAddr uintptr
// The end address of the current mapping
EndAddr uintptr
// The permissions for this mapping
Perms *ProcMapPermissions
// The current offset into the file/fd (e.g., shared libs)
Offset int64
// Device owner of this mapping (major:minor) in Mkdev format.
Dev uint64
// The inode of the device above
Inode uint64
// The file or psuedofile (or empty==anonymous)
Pathname string
}
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":")
if len(toks) < 2 {
return 0, fmt.Errorf("unexpected number of fields")
}
major, err := strconv.ParseUint(toks[0], 16, 0)
if err != nil {
return 0, err
}
minor, err := strconv.ParseUint(toks[1], 16, 0)
if err != nil {
return 0, err
}
return unix.Mkdev(uint32(major), uint32(minor)), nil
}
// parseAddress just converts a hex-string to a uintptr
func parseAddress(s string) (uintptr, error) {
a, err := strconv.ParseUint(s, 16, 0)
if err != nil {
return 0, err
}
return uintptr(a), nil
}
// parseAddresses parses the start-end address
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
return 0, 0, fmt.Errorf("invalid address")
}
saddr, err := parseAddress(toks[0])
if err != nil {
return 0, 0, err
}
eaddr, err := parseAddress(toks[1])
if err != nil {
return 0, 0, err
}
return saddr, eaddr, nil
}
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token")
}
perms := ProcMapPermissions{}
for _, ch := range s {
switch ch {
case 'r':
perms.Read = true
case 'w':
perms.Write = true
case 'x':
perms.Execute = true
case 'p':
perms.Private = true
case 's':
perms.Shared = true
}
}
return &perms, nil
}
// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
// buffer.
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry")
}
saddr, eaddr, err := parseAddresses(fields[0])
if err != nil {
return nil, err
}
perms, err := parsePermissions(fields[1])
if err != nil {
return nil, err
}
offset, err := strconv.ParseInt(fields[2], 16, 0)
if err != nil {
return nil, err
}
device, err := parseDevice(fields[3])
if err != nil {
return nil, err
}
inode, err := strconv.ParseUint(fields[4], 10, 0)
if err != nil {
return nil, err
}
pathname := ""
if len(fields) >= 5 {
pathname = strings.Join(fields[5:], " ")
}
return &ProcMap{
StartAddr: saddr,
EndAddr: eaddr,
Perms: perms,
Offset: offset,
Dev: device,
Inode: inode,
Pathname: pathname,
}, nil
}
// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
// process.
func (p Proc) ProcMaps() ([]*ProcMap, error) {
file, err := os.Open(p.path("maps"))
if err != nil {
return nil, err
}
defer file.Close()
maps := []*ProcMap{}
scan := bufio.NewScanner(file)
for scan.Scan() {
m, err := parseProcMap(scan.Text())
if err != nil {
return nil, err
}
maps = append(maps, m)
}
return maps, nil
}

View File

@ -33,37 +33,37 @@ type ProcStatus struct {
TGID int
// Peak virtual memory size.
VmPeak uint64
VmPeak uint64 // nolint:golint
// Virtual memory size.
VmSize uint64
VmSize uint64 // nolint:golint
// Locked memory size.
VmLck uint64
VmLck uint64 // nolint:golint
// Pinned memory size.
VmPin uint64
VmPin uint64 // nolint:golint
// Peak resident set size.
VmHWM uint64
VmHWM uint64 // nolint:golint
// Resident set size (sum of RssAnnon RssFile and RssShmem).
VmRSS uint64
VmRSS uint64 // nolint:golint
// Size of resident anonymous memory.
RssAnon uint64
RssAnon uint64 // nolint:golint
// Size of resident file mappings.
RssFile uint64
RssFile uint64 // nolint:golint
// Size of resident shared memory.
RssShmem uint64
RssShmem uint64 // nolint:golint
// Size of data segments.
VmData uint64
VmData uint64 // nolint:golint
// Size of stack segments.
VmStk uint64
VmStk uint64 // nolint:golint
// Size of text segments.
VmExe uint64
VmExe uint64 // nolint:golint
// Shared library code size.
VmLib uint64
VmLib uint64 // nolint:golint
// Page table entries size.
VmPTE uint64
VmPTE uint64 // nolint:golint
// Size of second-level page tables.
VmPMD uint64
VmPMD uint64 // nolint:golint
// Swapped-out virtual memory size by anonymous private.
VmSwap uint64
VmSwap uint64 // nolint:golint
// Size of hugetlb memory portions
HugetlbPages uint64
@ -71,6 +71,9 @@ type ProcStatus struct {
VoluntaryCtxtSwitches uint64
// Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs))
UIDs [4]string
}
// NewStatus returns the current status information of the process.
@ -114,6 +117,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":

89
vendor/github.com/prometheus/procfs/swaps.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Swap represents an entry in /proc/swaps.
type Swap struct {
Filename string
Type string
Size int
Used int
Priority int
}
// Swaps returns a slice of all configured swap devices on the system.
func (fs FS) Swaps() ([]*Swap, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
if err != nil {
return nil, err
}
return parseSwaps(data)
}
func parseSwaps(info []byte) ([]*Swap, error) {
swaps := []*Swap{}
scanner := bufio.NewScanner(bytes.NewReader(info))
scanner.Scan() // ignore header line
for scanner.Scan() {
swapString := scanner.Text()
parsedSwap, err := parseSwapString(swapString)
if err != nil {
return nil, err
}
swaps = append(swaps, parsedSwap)
}
err := scanner.Err()
return swaps, err
}
func parseSwapString(swapString string) (*Swap, error) {
var err error
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
}
swap := &Swap{
Filename: swapFields[0],
Type: swapFields[1],
}
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
}
return swap, nil
}

View File

@ -6,7 +6,7 @@ package oauth2
import (
"errors"
"io"
"log"
"net/http"
"sync"
)
@ -25,9 +25,6 @@ type Transport struct {
// Base is the base RoundTripper used to make HTTP requests.
// If nil, http.DefaultTransport is used.
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip authorizes and authenticates the request with an
@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req2 := cloneRequest(req) // per RoundTripper contract
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
// req.Body is assumed to have been closed by the base RoundTripper.
// req.Body is assumed to be closed by the base RoundTripper.
reqBodyClosed = true
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
return t.base().RoundTrip(req2)
}
// CancelRequest cancels an in-flight request by closing its connection.
var cancelOnce sync.Once
// CancelRequest does nothing. It used to be a legacy cancellation mechanism
// but now only it only logs on first use to warn that it's deprecated.
//
// Deprecated: use contexts for cancellation instead.
func (t *Transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
cancelOnce.Do(func() {
log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts")
})
}
func (t *Transport) base() http.RoundTripper {
@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper {
return http.DefaultTransport
}
func (t *Transport) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request {
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}

View File

@ -223,7 +223,12 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) {
// canceled, or the expected wait time exceeds the Context's Deadline.
// The burst limit is ignored if the rate limit is Inf.
func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
if n > lim.burst && lim.limit != Inf {
lim.mu.Lock()
burst := lim.burst
limit := lim.limit
lim.mu.Unlock()
if n > burst && limit != Inf {
return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
}
// Check if ctx is already cancelled
@ -281,6 +286,23 @@ func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
lim.limit = newLimit
}
// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst).
func (lim *Limiter) SetBurst(newBurst int) {
lim.SetBurstAt(time.Now(), newBurst)
}
// SetBurstAt sets a new burst size for the limiter.
func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
now, _, tokens := lim.advance(now)
lim.last = now
lim.tokens = tokens
lim.burst = newBurst
}
// reserveN is a helper method for AllowN, ReserveN, and WaitN.
// maxFutureReserve specifies the maximum reservation wait duration allowed.
// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
@ -370,5 +392,9 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
// which could be accumulated during that duration at a rate of limit tokens per second.
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
return d.Seconds() * float64(limit)
// Split the integer and fractional parts ourself to minimize rounding errors.
// See golang.org/issues/34861.
sec := float64(d/time.Second) * float64(limit)
nsec := float64(d%time.Second) * float64(limit)
return sec + nsec/1e9
}

View File

@ -48,6 +48,22 @@ const (
// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
SeccompProfileRuntimeDefault string = "runtime/default"
// AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile.
AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
// AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile.
AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName"
// AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles.
AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames"
// AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default.
AppArmorBetaProfileRuntimeDefault = "runtime/default"
// AppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node.
AppArmorBetaProfileNamePrefix = "localhost/"
// AppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile
AppArmorBetaProfileNameUnconfined = "unconfined"
// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
// This is now deprecated and should be replaced by SeccompProfileRuntimeDefault.
DeprecatedSeccompProfileDockerDefault string = "docker/default"

File diff suppressed because it is too large Load Diff

View File

@ -459,7 +459,7 @@ message ConfigMap {
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
// +optional
optional bool immutable = 4;
@ -588,8 +588,10 @@ message ConfigMapVolumeSource {
// +optional
repeated KeyToPath items = 2;
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -957,8 +959,10 @@ message DownwardAPIVolumeFile {
// +optional
optional ResourceFieldSelector resourceFieldRef = 3;
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// Optional: mode bits used to set permissions on this file, must be an octal value
// between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
@ -973,7 +977,10 @@ message DownwardAPIVolumeSource {
repeated DownwardAPIVolumeFile items = 1;
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -1048,7 +1055,8 @@ message EndpointPort {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// Field can be enabled with ServiceAppProtocol feature gate.
// This is a beta field that is guarded by the ServiceAppProtocol feature
// gate and enabled by default.
// +optional
optional string appProtocol = 4;
}
@ -1158,7 +1166,7 @@ message EnvVar {
// EnvVarSource represents a source for the value of an EnvVar.
message EnvVarSource {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
optional ObjectFieldSelector fieldRef = 1;
@ -1447,10 +1455,6 @@ message EventSeries {
// Time of the last occurrence observed
optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
// State of this Series: Ongoing or Finished
// Deprecated. Planned removal for 1.18
optional string state = 3;
}
// EventSource contains information for an event.
@ -1880,8 +1884,10 @@ message KeyToPath {
// May not start with the string '..'.
optional string path = 2;
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// Optional: mode bits used to set permissions on this file.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
@ -2277,7 +2283,7 @@ message NodeProxyOptions {
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
message NodeResources {
// Capacity represents the available resources of a node
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1;
@ -2431,7 +2437,7 @@ message NodeSystemInfo {
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
optional string systemUUID = 2;
// Boot ID reported by the node.
@ -3537,7 +3543,6 @@ message PodSpec {
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// This field is only honored by clusters that enable the EvenPodsSpread feature.
// All topologySpreadConstraints are ANDed.
// +optional
// +patchMergeKey=topologyKey
@ -3788,8 +3793,9 @@ message ProjectedVolumeSource {
// list of volume projections
repeated VolumeProjection sources = 1;
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -4299,7 +4305,7 @@ message Secret {
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
// +optional
optional bool immutable = 5;
@ -4419,8 +4425,10 @@ message SecretVolumeSource {
// +optional
repeated KeyToPath items = 2;
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values
// for mode bits. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -4634,7 +4642,8 @@ message ServicePort {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// Field can be enabled with ServiceAppProtocol feature gate.
// This is a beta field that is guarded by the ServiceAppProtocol feature
// gate and enabled by default.
// +optional
optional string appProtocol = 6;
@ -4994,8 +5003,8 @@ message TopologySelectorTerm {
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
message TopologySpreadConstraint {
// MaxSkew describes the degree to which pods may be unevenly distributed.
// It's the maximum permitted difference between the number of matching pods in
// any two topology domains of a given topology type.
// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
// between the number of matching pods in the target topology and the global minimum.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 1/1/0:
// +-------+-------+-------+
@ -5007,6 +5016,8 @@ message TopologySpreadConstraint {
// scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)
// violate MaxSkew(1).
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
// to topologies that satisfy it.
// It's a required field. Default value is 1 and 0 is not allowed.
optional int32 maxSkew = 1;
@ -5019,10 +5030,13 @@ message TopologySpreadConstraint {
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
// the spread constraint.
// - DoNotSchedule (default) tells the scheduler not to schedule it
// - ScheduleAnyway tells the scheduler to still schedule it
// It's considered as "Unsatisfiable" if and only if placing incoming pod on any
// topology violates "MaxSkew".
// - DoNotSchedule (default) tells the scheduler not to schedule it.
// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
// but giving higher precedence to topologies that would help reduce the
// skew.
// A constraint is considered "Unsatisfiable" for an incoming pod
// if and only if every possible node assigment for that pod would violate
// "MaxSkew" on some topology.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 3/1/1:
// +-------+-------+-------+

81
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -1092,8 +1092,10 @@ type SecretVolumeSource struct {
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values
// for mode bits. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -1518,8 +1520,10 @@ type ConfigMapVolumeSource struct {
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -1585,8 +1589,9 @@ type ServiceAccountTokenProjection struct {
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -1626,8 +1631,10 @@ type KeyToPath struct {
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// Optional: mode bits used to set permissions on this file.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
@ -1850,7 +1857,7 @@ type EnvVar struct {
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
@ -3040,7 +3047,6 @@ type PodSpec struct {
Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// This field is only honored by clusters that enable the EvenPodsSpread feature.
// All topologySpreadConstraints are ANDed.
// +optional
// +patchMergeKey=topologyKey
@ -3065,8 +3071,8 @@ const (
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
type TopologySpreadConstraint struct {
// MaxSkew describes the degree to which pods may be unevenly distributed.
// It's the maximum permitted difference between the number of matching pods in
// any two topology domains of a given topology type.
// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
// between the number of matching pods in the target topology and the global minimum.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 1/1/0:
// +-------+-------+-------+
@ -3078,6 +3084,8 @@ type TopologySpreadConstraint struct {
// scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2)
// violate MaxSkew(1).
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
// to topologies that satisfy it.
// It's a required field. Default value is 1 and 0 is not allowed.
MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"`
// TopologyKey is the key of node labels. Nodes that have a label with this key
@ -3088,10 +3096,13 @@ type TopologySpreadConstraint struct {
TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"`
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
// the spread constraint.
// - DoNotSchedule (default) tells the scheduler not to schedule it
// - ScheduleAnyway tells the scheduler to still schedule it
// It's considered as "Unsatisfiable" if and only if placing incoming pod on any
// topology violates "MaxSkew".
// - DoNotSchedule (default) tells the scheduler not to schedule it.
// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
// but giving higher precedence to topologies that would help reduce the
// skew.
// A constraint is considered "Unsatisfiable" for an incoming pod
// if and only if every possible node assigment for that pod would violate
// "MaxSkew" on some topology.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 3/1/1:
// +-------+-------+-------+
@ -4020,7 +4031,8 @@ type ServicePort struct {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// Field can be enabled with ServiceAppProtocol feature gate.
// This is a beta field that is guarded by the ServiceAppProtocol feature
// gate and enabled by default.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
@ -4246,7 +4258,8 @@ type EndpointPort struct {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// Field can be enabled with ServiceAppProtocol feature gate.
// This is a beta field that is guarded by the ServiceAppProtocol feature
// gate and enabled by default.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
}
@ -4368,7 +4381,7 @@ type NodeSystemInfo struct {
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
@ -5206,19 +5219,10 @@ type EventSeries struct {
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
// Deprecated. Planned removal for 1.18
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
// +k8s:deprecated=state,protobuf=3
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
@ -5486,7 +5490,7 @@ type Secret struct {
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"`
@ -5627,7 +5631,7 @@ type ConfigMap struct {
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
// This is a beta field enabled by ImmutableEphemeralVolumes feature gate.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"`
@ -5730,7 +5734,10 @@ type DownwardAPIVolumeSource struct {
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
@ -5753,8 +5760,10 @@ type DownwardAPIVolumeFile struct {
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// Optional: mode bits used to set permissions on this file, must be an octal value
// between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
@ -5920,7 +5929,7 @@ type Sysctl struct {
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`

View File

@ -252,7 +252,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string {
var map_ConfigMap = map[string]string{
"": "ConfigMap holds configuration data for pods to consume.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"immutable": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.",
"immutable": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is a beta field enabled by ImmutableEphemeralVolumes feature gate.",
"data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.",
"binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.",
}
@ -316,7 +316,7 @@ func (ConfigMapProjection) SwaggerDoc() map[string]string {
var map_ConfigMapVolumeSource = map[string]string{
"": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
"items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"defaultMode": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"optional": "Specify whether the ConfigMap or its keys must be defined",
}
@ -462,7 +462,7 @@ var map_DownwardAPIVolumeFile = map[string]string{
"path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
"fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.",
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
"mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"mode": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
}
func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string {
@ -472,7 +472,7 @@ func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string {
var map_DownwardAPIVolumeSource = map[string]string{
"": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
"items": "Items is a list of downward API volume file",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
}
func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string {
@ -506,7 +506,7 @@ var map_EndpointPort = map[string]string{
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
@ -568,7 +568,7 @@ func (EnvVar) SwaggerDoc() map[string]string {
var map_EnvVarSource = map[string]string{
"": "EnvVarSource represents a source for the value of an EnvVar.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
"fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
"configMapKeyRef": "Selects a key of a ConfigMap.",
"secretKeyRef": "Selects a key of a secret in the pod's namespace",
@ -663,7 +663,6 @@ var map_EventSeries = map[string]string{
"": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
"count": "Number of occurrences in this series up to the last heartbeat time",
"lastObservedTime": "Time of the last occurrence observed",
"state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18",
}
func (EventSeries) SwaggerDoc() map[string]string {
@ -880,7 +879,7 @@ var map_KeyToPath = map[string]string{
"": "Maps a string key to a path within a volume.",
"key": "The key to project.",
"path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
"mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"mode": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
}
func (KeyToPath) SwaggerDoc() map[string]string {
@ -1134,7 +1133,7 @@ func (NodeProxyOptions) SwaggerDoc() map[string]string {
}
var map_NodeResources = map[string]string{
"": "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.",
"": "NodeResources is an object for conveying resource information about a node. see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.",
"Capacity": "Capacity represents the available resources of a node",
}
@ -1209,7 +1208,7 @@ func (NodeStatus) SwaggerDoc() map[string]string {
var map_NodeSystemInfo = map[string]string{
"": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
"machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
"systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html",
"systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid",
"bootID": "Boot ID reported by the node.",
"kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).",
"osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
@ -1634,7 +1633,7 @@ var map_PodSpec = map[string]string{
"enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
"preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
"overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@ -1760,7 +1759,7 @@ func (Probe) SwaggerDoc() map[string]string {
var map_ProjectedVolumeSource = map[string]string{
"": "Represents a projected volume source",
"sources": "list of volume projections",
"defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"defaultMode": "Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
}
func (ProjectedVolumeSource) SwaggerDoc() map[string]string {
@ -2018,7 +2017,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
var map_Secret = map[string]string{
"": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.",
"immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is a beta field enabled by ImmutableEphemeralVolumes feature gate.",
"data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
"stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
"type": "Used to facilitate programmatic handling of secret data.",
@ -2081,7 +2080,7 @@ var map_SecretVolumeSource = map[string]string{
"": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
"secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
"items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
"defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"defaultMode": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
"optional": "Specify whether the Secret or its keys must be defined",
}
@ -2174,7 +2173,7 @@ var map_ServicePort = map[string]string{
"": "ServicePort contains information on service's port.",
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
"protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.",
"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.",
"port": "The port that will be exposed by this service.",
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
"nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
@ -2326,9 +2325,9 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string {
var map_TopologySpreadConstraint = map[string]string{
"": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
"maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ",
"maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ",
"topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.",
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assigment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
}

4
vendor/k8s.io/api/go.mod generated vendored
View File

@ -7,11 +7,11 @@ go 1.13
require (
github.com/gogo/protobuf v1.3.1
github.com/stretchr/testify v1.4.0
k8s.io/apimachinery v0.18.2
k8s.io/apimachinery v0.19.0-beta.2
)
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/apimachinery => k8s.io/apimachinery v0.18.2
k8s.io/apimachinery => k8s.io/apimachinery v0.19.0-beta.2
)

21
vendor/k8s.io/apimachinery/go.mod generated vendored
View File

@ -9,30 +9,29 @@ require (
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153
github.com/evanphx/json-patch v4.2.0+incompatible
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gogo/protobuf v1.3.1
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903
github.com/golang/protobuf v1.3.2
github.com/google/go-cmp v0.3.0
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7
github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.4.0
github.com/google/gofuzz v1.1.0
github.com/google/uuid v1.1.1
github.com/googleapis/gnostic v0.1.0
github.com/googleapis/gnostic v0.4.1
github.com/hashicorp/golang-lru v0.5.1
github.com/json-iterator/go v1.1.8
github.com/kr/pretty v0.1.0 // indirect
github.com/json-iterator/go v1.1.9
github.com/modern-go/reflect2 v1.0.1
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
github.com/onsi/ginkgo v1.11.0 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.4.0
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 // indirect
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
golang.org/x/text v0.3.2 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v2 v2.2.8
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
k8s.io/klog/v2 v2.1.0
k8s.io/kube-openapi v0.0.0-20200427153329-656914f816f9
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml v1.2.0
)

View File

@ -18,6 +18,7 @@ package errors
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
@ -29,14 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
const (
// StatusTooManyRequests means the server experienced too many requests within a
// given window and that the client must wait to perform the action again.
// DEPRECATED: please use http.StatusTooManyRequests, this will be removed in
// the future version.
StatusTooManyRequests = http.StatusTooManyRequests
)
// StatusError is an error intended for consumption by a REST API server; it can also be
// reconstructed by clients from a REST response. Public to allow easy type switches.
type StatusError struct {
@ -483,127 +476,141 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr
}
// IsNotFound returns true if the specified error was created by NewNotFound.
// It supports wrapped errors.
func IsNotFound(err error) bool {
return ReasonForError(err) == metav1.StatusReasonNotFound
}
// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
// It supports wrapped errors.
func IsAlreadyExists(err error) bool {
return ReasonForError(err) == metav1.StatusReasonAlreadyExists
}
// IsConflict determines if the err is an error which indicates the provided update conflicts.
// It supports wrapped errors.
func IsConflict(err error) bool {
return ReasonForError(err) == metav1.StatusReasonConflict
}
// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
// It supports wrapped errors.
func IsInvalid(err error) bool {
return ReasonForError(err) == metav1.StatusReasonInvalid
}
// IsGone is true if the error indicates the requested resource is no longer available.
// It supports wrapped errors.
func IsGone(err error) bool {
return ReasonForError(err) == metav1.StatusReasonGone
}
// IsResourceExpired is true if the error indicates the resource has expired and the current action is
// no longer possible.
// It supports wrapped errors.
func IsResourceExpired(err error) bool {
return ReasonForError(err) == metav1.StatusReasonExpired
}
// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
// It supports wrapped errors.
func IsNotAcceptable(err error) bool {
return ReasonForError(err) == metav1.StatusReasonNotAcceptable
}
// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
// It supports wrapped errors.
func IsUnsupportedMediaType(err error) bool {
return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
}
// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
// be performed because it is not supported by the server.
// It supports wrapped errors.
func IsMethodNotSupported(err error) bool {
return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
}
// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
// It supports wrapped errors.
func IsServiceUnavailable(err error) bool {
return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
}
// IsBadRequest determines if err is an error which indicates that the request is invalid.
// It supports wrapped errors.
func IsBadRequest(err error) bool {
return ReasonForError(err) == metav1.StatusReasonBadRequest
}
// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
// requires authentication by the user.
// It supports wrapped errors.
func IsUnauthorized(err error) bool {
return ReasonForError(err) == metav1.StatusReasonUnauthorized
}
// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
// be completed as requested.
// It supports wrapped errors.
func IsForbidden(err error) bool {
return ReasonForError(err) == metav1.StatusReasonForbidden
}
// IsTimeout determines if err is an error which indicates that request times out due to long
// processing.
// It supports wrapped errors.
func IsTimeout(err error) bool {
return ReasonForError(err) == metav1.StatusReasonTimeout
}
// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
// by the client.
// It supports wrapped errors.
func IsServerTimeout(err error) bool {
return ReasonForError(err) == metav1.StatusReasonServerTimeout
}
// IsInternalError determines if err is an error which indicates an internal server error.
// It supports wrapped errors.
func IsInternalError(err error) bool {
return ReasonForError(err) == metav1.StatusReasonInternalError
}
// IsTooManyRequests determines if err is an error which indicates that there are too many requests
// that the server cannot handle.
// It supports wrapped errors.
func IsTooManyRequests(err error) bool {
if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
return true
}
switch t := err.(type) {
case APIStatus:
return t.Status().Code == http.StatusTooManyRequests
if status := APIStatus(nil); errors.As(err, &status) {
return status.Status().Code == http.StatusTooManyRequests
}
return false
}
// IsRequestEntityTooLargeError determines if err is an error which indicates
// the request entity is too large.
// It supports wrapped errors.
func IsRequestEntityTooLargeError(err error) bool {
if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
return true
}
switch t := err.(type) {
case APIStatus:
return t.Status().Code == http.StatusRequestEntityTooLarge
if status := APIStatus(nil); errors.As(err, &status) {
return status.Status().Code == http.StatusRequestEntityTooLarge
}
return false
}
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
// and may be the result of another HTTP actor.
// It supports wrapped errors.
func IsUnexpectedServerError(err error) bool {
switch t := err.(type) {
case APIStatus:
if d := t.Status().Details; d != nil {
for _, cause := range d.Causes {
if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
return true
}
if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil {
for _, cause := range status.Status().Details.Causes {
if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
return true
}
}
}
@ -611,38 +618,37 @@ func IsUnexpectedServerError(err error) bool {
}
// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
// It supports wrapped errors.
func IsUnexpectedObjectError(err error) bool {
_, ok := err.(*UnexpectedObjectError)
return err != nil && ok
uoe := &UnexpectedObjectError{}
return err != nil && errors.As(err, &uoe)
}
// SuggestsClientDelay returns true if this error suggests a client delay as well as the
// suggested seconds to wait, or false if the error does not imply a wait. It does not
// address whether the error *should* be retried, since some errors (like a 3xx) may
// request delay without retry.
// It supports wrapped errors.
func SuggestsClientDelay(err error) (int, bool) {
switch t := err.(type) {
case APIStatus:
if t.Status().Details != nil {
switch t.Status().Reason {
// this StatusReason explicitly requests the caller to delay the action
case metav1.StatusReasonServerTimeout:
return int(t.Status().Details.RetryAfterSeconds), true
}
// If the client requests that we retry after a certain number of seconds
if t.Status().Details.RetryAfterSeconds > 0 {
return int(t.Status().Details.RetryAfterSeconds), true
}
if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil {
switch t.Status().Reason {
// this StatusReason explicitly requests the caller to delay the action
case metav1.StatusReasonServerTimeout:
return int(t.Status().Details.RetryAfterSeconds), true
}
// If the client requests that we retry after a certain number of seconds
if t.Status().Details.RetryAfterSeconds > 0 {
return int(t.Status().Details.RetryAfterSeconds), true
}
}
return 0, false
}
// ReasonForError returns the HTTP status for a particular error.
// It supports wrapped errors.
func ReasonForError(err error) metav1.StatusReason {
switch t := err.(type) {
case APIStatus:
return t.Status().Reason
if status := APIStatus(nil); errors.As(err, &status) {
return status.Status().Reason
}
return metav1.StatusReasonUnknown
}

View File

@ -1,20 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1
package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion"

View File

@ -1,86 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "meta.k8s.io"
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// addToGroupVersion registers common meta types into schemas.
func addToGroupVersion(scheme *runtime.Scheme) error {
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
return err
}
// ListOptions is the only options struct which needs conversion (it exposes labels and fields
// as selectors for convenience). The other types have only a single representation today.
scheme.AddKnownTypes(SchemeGroupVersion,
&ListOptions{},
&metav1.GetOptions{},
&metav1.ExportOptions{},
&metav1.DeleteOptions{},
&metav1.CreateOptions{},
&metav1.UpdateOptions{},
)
scheme.AddKnownTypes(SchemeGroupVersion,
&metav1.Table{},
&metav1.TableOptions{},
&metav1beta1.PartialObjectMetadata{},
&metav1beta1.PartialObjectMetadataList{},
)
if err := metav1beta1.AddMetaToScheme(scheme); err != nil {
return err
}
if err := metav1.AddMetaToScheme(scheme); err != nil {
return err
}
// Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this)
scheme.AddUnversionedTypes(SchemeGroupVersion,
&metav1.DeleteOptions{},
&metav1.CreateOptions{},
&metav1.UpdateOptions{})
metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
return nil
}
// Unlike other API groups, meta internal knows about all meta external versions, but keeps
// the logic for conversion private.
func init() {
localSchemeBuilder.Register(addToGroupVersion)
}

View File

@ -1,76 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
type ListOptions struct {
metav1.TypeMeta
// A selector based on labels
LabelSelector labels.Selector
// A selector based on fields
FieldSelector fields.Selector
// If true, watch for changes to this list
Watch bool
// allowWatchBookmarks requests watch events with type "BOOKMARK".
// Servers that do not implement bookmarks may ignore this flag and
// bookmarks are sent at the server's discretion. Clients should not
// assume bookmarks are returned at any specific interval, nor may they
// assume the server will send any BOOKMARK event during a session.
// If this is not a watch, this field is ignored.
// If the feature gate WatchBookmarks is not enabled in apiserver,
// this field is ignored.
AllowWatchBookmarks bool
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
ResourceVersion string
// Timeout for the list/watch call.
TimeoutSeconds *int64
// Limit specifies the maximum number of results to return from the server. The server may
// not support this field on all resource types, but if it does and more results remain it
// will set the continue field on the returned list object.
Limit int64
// Continue is a token returned by the server that lets a client retrieve chunks of results
// from the server by specifying limit. The server may reject requests for continuation tokens
// it does not recognize and will return a 410 error if the token can no longer be used because
// it has expired.
Continue string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []runtime.Object
}

View File

@ -1,143 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package internalversion
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_internalversion_List_To_v1_List(a.(*List), b.(*v1.List), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*List)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_List_To_internalversion_List(a.(*v1.List), b.(*List), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.RawExtension, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_internalversion_List_To_v1_List is an autogenerated conversion function.
func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
return autoConvert_internalversion_List_To_v1_List(in, out, s)
}
func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
// Convert_v1_List_To_internalversion_List is an autogenerated conversion function.
func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
return autoConvert_v1_List_To_internalversion_List(in, out, s)
}
func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
out.Watch = in.Watch
out.AllowWatchBookmarks = in.AllowWatchBookmarks
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
out.Limit = in.Limit
out.Continue = in.Continue
return nil
}
// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function.
func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s)
}
func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
return err
}
if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
return err
}
out.Watch = in.Watch
out.AllowWatchBookmarks = in.AllowWatchBookmarks
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
out.Limit = in.Limit
out.Continue = in.Continue
return nil
}
// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function.
func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s)
}

View File

@ -1,96 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package internalversion
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *List) DeepCopyInto(out *List) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]runtime.Object, len(*in))
for i := range *in {
if (*in)[i] != nil {
(*out)[i] = (*in)[i].DeepCopyObject()
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
func (in *List) DeepCopy() *List {
if in == nil {
return nil
}
out := new(List)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *List) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ListOptions) DeepCopyInto(out *ListOptions) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.LabelSelector != nil {
out.LabelSelector = in.LabelSelector.DeepCopySelector()
}
if in.FieldSelector != nil {
out.FieldSelector = in.FieldSelector.DeepCopySelector()
}
if in.TimeoutSeconds != nil {
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
func (in *ListOptions) DeepCopy() *ListOptions {
if in == nil {
return nil
}
out := new(ListOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ListOptions) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -1297,177 +1297,177 @@ func init() {
}
var fileDescriptor_cf52fa777ced5367 = []byte{
// 2713 bytes of a gzipped FileDescriptorProto
// 2714 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59,
0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad,
0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x20, 0xe2, 0xec, 0x2c, 0xaa,
0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4,
0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35,
0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f,
0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e,
0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb,
0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e,
0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e,
0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3,
0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1,
0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff,
0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99,
0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d,
0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e,
0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35,
0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d,
0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92,
0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b,
0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71,
0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb,
0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e,
0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b,
0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30,
0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75,
0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2,
0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38,
0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe,
0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c,
0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6,
0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad,
0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac,
0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2,
0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f,
0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f,
0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15,
0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8,
0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78,
0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed,
0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60,
0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a,
0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23,
0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d,
0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea,
0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0,
0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c,
0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e,
0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36,
0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f,
0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b,
0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa,
0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60,
0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63,
0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71,
0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47,
0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d,
0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c,
0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21,
0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01,
0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32,
0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde,
0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70,
0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93,
0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e,
0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63,
0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c,
0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa,
0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c,
0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1,
0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9,
0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60,
0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee,
0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd,
0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4,
0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b,
0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9,
0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0,
0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b,
0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62,
0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7,
0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19,
0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e,
0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a,
0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51,
0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1,
0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68,
0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25,
0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e,
0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40,
0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3,
0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd,
0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c,
0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73,
0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7,
0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9,
0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74,
0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24,
0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6,
0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e,
0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8,
0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8,
0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56,
0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed,
0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13,
0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49,
0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c,
0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02,
0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f,
0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe,
0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45,
0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d,
0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e,
0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3,
0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea,
0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1,
0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e,
0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8,
0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93,
0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47,
0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c,
0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f,
0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42,
0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c,
0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1,
0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38,
0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42,
0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde,
0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d,
0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6,
0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95,
0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0,
0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75,
0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00,
0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43,
0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4,
0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72,
0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06,
0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13,
0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44,
0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77,
0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d,
0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66,
0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd,
0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff,
0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62,
0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd,
0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4,
0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b,
0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c,
0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac,
0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37,
0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6,
0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0,
0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea,
0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4,
0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74,
0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b,
0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86,
0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79,
0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e,
0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6,
0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b,
0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5,
0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11,
0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef,
0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5,
0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d,
0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d,
0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff,
0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00,
0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xb8, 0x79, 0x7e, 0xdf, 0xbf, 0xf7, 0x7e,
0xef, 0xf7, 0x65, 0xd8, 0x3a, 0xbc, 0xee, 0xd7, 0x6c, 0x6f, 0xf9, 0xb0, 0xb3, 0x47, 0xa8, 0x4b,
0x02, 0xe2, 0x2f, 0x1f, 0x11, 0xd7, 0xf2, 0xe8, 0xb2, 0x44, 0x18, 0x6d, 0xbb, 0x65, 0x98, 0x07,
0xb6, 0x4b, 0x68, 0x77, 0xb9, 0x7d, 0xd8, 0x64, 0x00, 0x7f, 0xb9, 0x45, 0x02, 0x63, 0xf9, 0x68,
0x65, 0xb9, 0x49, 0x5c, 0x42, 0x8d, 0x80, 0x58, 0xb5, 0x36, 0xf5, 0x02, 0x0f, 0x7d, 0x41, 0x70,
0xd5, 0xe2, 0x5c, 0xb5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0x1a, 0xe3, 0xaa, 0x1d, 0xad, 0xcc, 0xbf,
0xd6, 0xb4, 0x83, 0x83, 0xce, 0x5e, 0xcd, 0xf4, 0x5a, 0xcb, 0x4d, 0xaf, 0xe9, 0x2d, 0x73, 0xe6,
0xbd, 0xce, 0x3e, 0xff, 0xe2, 0x1f, 0xfc, 0x97, 0x10, 0x3a, 0x3f, 0xd4, 0x14, 0xda, 0x71, 0x03,
0xbb, 0x45, 0xfa, 0xad, 0x98, 0x7f, 0xe3, 0x3c, 0x06, 0xdf, 0x3c, 0x20, 0x2d, 0xa3, 0x9f, 0x4f,
0xff, 0x73, 0x16, 0x0a, 0xab, 0x3b, 0x9b, 0xb7, 0xa8, 0xd7, 0x69, 0xa3, 0x45, 0x18, 0x77, 0x8d,
0x16, 0xa9, 0x68, 0x8b, 0xda, 0x52, 0xb1, 0x3e, 0xf9, 0xac, 0x57, 0x1d, 0x3b, 0xe9, 0x55, 0xc7,
0xef, 0x18, 0x2d, 0x82, 0x39, 0x06, 0x39, 0x50, 0x38, 0x22, 0xd4, 0xb7, 0x3d, 0xd7, 0xaf, 0x64,
0x16, 0xb3, 0x4b, 0xa5, 0x6b, 0x37, 0x6b, 0x69, 0xfc, 0xaf, 0x71, 0x05, 0xf7, 0x05, 0xeb, 0x86,
0x47, 0x1b, 0xb6, 0x6f, 0x7a, 0x47, 0x84, 0x76, 0xeb, 0x33, 0x52, 0x4b, 0x41, 0x22, 0x7d, 0x1c,
0x6a, 0x40, 0x3f, 0xd1, 0x60, 0xa6, 0x4d, 0xc9, 0x3e, 0xa1, 0x94, 0x58, 0x12, 0x5f, 0xc9, 0x2e,
0x6a, 0x9f, 0x80, 0xda, 0x8a, 0x54, 0x3b, 0xb3, 0xd3, 0x27, 0x1f, 0x0f, 0x68, 0x44, 0xbf, 0xd3,
0x60, 0xde, 0x27, 0xf4, 0x88, 0xd0, 0x55, 0xcb, 0xa2, 0xc4, 0xf7, 0xeb, 0xdd, 0x35, 0xc7, 0x26,
0x6e, 0xb0, 0xb6, 0xd9, 0xc0, 0x7e, 0x65, 0x9c, 0x9f, 0xc3, 0x37, 0xd2, 0x19, 0xb4, 0x3b, 0x4c,
0x4e, 0x5d, 0x97, 0x16, 0xcd, 0x0f, 0x25, 0xf1, 0xf1, 0x0b, 0xcc, 0xd0, 0xf7, 0x61, 0x52, 0x5d,
0xe4, 0x6d, 0xdb, 0x0f, 0xd0, 0x7d, 0xc8, 0x37, 0xd9, 0x87, 0x5f, 0xd1, 0xb8, 0x81, 0xb5, 0x74,
0x06, 0x2a, 0x19, 0xf5, 0x29, 0x69, 0x4f, 0x9e, 0x7f, 0xfa, 0x58, 0x4a, 0xd3, 0x7f, 0x31, 0x0e,
0xa5, 0xd5, 0x9d, 0x4d, 0x4c, 0x7c, 0xaf, 0x43, 0x4d, 0x92, 0x22, 0x68, 0xae, 0xc3, 0xa4, 0x6f,
0xbb, 0xcd, 0x8e, 0x63, 0x50, 0x06, 0xad, 0xe4, 0x39, 0xe5, 0x05, 0x49, 0x39, 0xb9, 0x1b, 0xc3,
0xe1, 0x04, 0x25, 0xba, 0x06, 0xc0, 0x24, 0xf8, 0x6d, 0xc3, 0x24, 0x56, 0x25, 0xb3, 0xa8, 0x2d,
0x15, 0xea, 0x48, 0xf2, 0xc1, 0x9d, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x55, 0xc8, 0x71, 0x4b, 0x2b,
0x05, 0xae, 0xa6, 0x2c, 0xc9, 0x73, 0xdc, 0x0d, 0x2c, 0x70, 0xe8, 0x0a, 0x4c, 0xc8, 0x28, 0xab,
0x14, 0x39, 0xd9, 0xb4, 0x24, 0x9b, 0x50, 0x61, 0xa0, 0xf0, 0xcc, 0xbf, 0x43, 0xdb, 0xb5, 0x78,
0xdc, 0xc5, 0xfc, 0x7b, 0xc7, 0x76, 0x2d, 0xcc, 0x31, 0xe8, 0x36, 0xe4, 0x8e, 0x08, 0xdd, 0x63,
0x91, 0xc0, 0x42, 0xf3, 0x4b, 0xe9, 0x0e, 0xfa, 0x3e, 0x63, 0xa9, 0x17, 0x99, 0x69, 0xfc, 0x27,
0x16, 0x42, 0x50, 0x0d, 0xc0, 0x3f, 0xf0, 0x68, 0xc0, 0xdd, 0xab, 0xe4, 0x16, 0xb3, 0x4b, 0xc5,
0xfa, 0x14, 0xf3, 0x77, 0x37, 0x84, 0xe2, 0x18, 0x05, 0xa3, 0x37, 0x8d, 0x80, 0x34, 0x3d, 0x6a,
0x13, 0xbf, 0x32, 0x11, 0xd1, 0xaf, 0x85, 0x50, 0x1c, 0xa3, 0x40, 0xdf, 0x02, 0xe4, 0x07, 0x1e,
0x35, 0x9a, 0x44, 0xba, 0xfa, 0xb6, 0xe1, 0x1f, 0x54, 0x80, 0x7b, 0x37, 0x2f, 0xbd, 0x43, 0xbb,
0x03, 0x14, 0xf8, 0x0c, 0x2e, 0xfd, 0x0f, 0x1a, 0x4c, 0xc7, 0x62, 0x81, 0xc7, 0xdd, 0x75, 0x98,
0x6c, 0xc6, 0x5e, 0x9d, 0x8c, 0x8b, 0xf0, 0xb6, 0xe3, 0x2f, 0x12, 0x27, 0x28, 0x11, 0x81, 0x22,
0x95, 0x92, 0x54, 0x76, 0x59, 0x49, 0x1d, 0xb4, 0xca, 0x86, 0x48, 0x53, 0x0c, 0xe8, 0xe3, 0x48,
0xb2, 0xfe, 0x4f, 0x8d, 0x07, 0xb0, 0xca, 0x37, 0x68, 0x29, 0x96, 0xd3, 0x34, 0x7e, 0x7c, 0x93,
0x43, 0xf2, 0xd1, 0x39, 0x89, 0x20, 0xf3, 0x7f, 0x91, 0x08, 0x6e, 0x14, 0x7e, 0xfd, 0x41, 0x75,
0xec, 0xbd, 0xbf, 0x2f, 0x8e, 0xe9, 0x2d, 0x28, 0xaf, 0x51, 0x62, 0x04, 0x64, 0xbb, 0x1d, 0x70,
0x07, 0x74, 0xc8, 0x5b, 0xb4, 0x8b, 0x3b, 0xae, 0x74, 0x14, 0xd8, 0xfb, 0x6e, 0x70, 0x08, 0x96,
0x18, 0x76, 0x7f, 0xfb, 0x36, 0x71, 0xac, 0x2d, 0xc3, 0x35, 0x9a, 0x84, 0xca, 0xb8, 0x0f, 0x4f,
0x75, 0x23, 0x86, 0xc3, 0x09, 0x4a, 0xfd, 0x67, 0x59, 0x28, 0x37, 0x88, 0x43, 0x22, 0x7d, 0x1b,
0x80, 0x9a, 0xd4, 0x30, 0xc9, 0x0e, 0xa1, 0xb6, 0x67, 0xed, 0x12, 0xd3, 0x73, 0x2d, 0x9f, 0x47,
0x44, 0xb6, 0xfe, 0x19, 0x16, 0x67, 0xb7, 0x06, 0xb0, 0xf8, 0x0c, 0x0e, 0xe4, 0x40, 0xb9, 0x4d,
0xf9, 0x6f, 0x3b, 0x90, 0xb5, 0x87, 0xbd, 0xb4, 0x2f, 0xa7, 0x3b, 0xea, 0x9d, 0x38, 0x6b, 0x7d,
0xf6, 0xa4, 0x57, 0x2d, 0x27, 0x40, 0x38, 0x29, 0x1c, 0x7d, 0x13, 0x66, 0x3c, 0xda, 0x3e, 0x30,
0xdc, 0x06, 0x69, 0x13, 0xd7, 0x22, 0x6e, 0xe0, 0xf3, 0x53, 0x28, 0xd4, 0x2f, 0xb0, 0x8a, 0xb1,
0xdd, 0x87, 0xc3, 0x03, 0xd4, 0xe8, 0x01, 0xcc, 0xb6, 0xa9, 0xd7, 0x36, 0x9a, 0x06, 0x93, 0xb8,
0xe3, 0x39, 0xb6, 0xd9, 0xe5, 0xd9, 0xa1, 0x58, 0xbf, 0x7a, 0xd2, 0xab, 0xce, 0xee, 0xf4, 0x23,
0x4f, 0x7b, 0xd5, 0x39, 0x7e, 0x74, 0x0c, 0x12, 0x21, 0xf1, 0xa0, 0x98, 0xd8, 0x1d, 0xe6, 0x86,
0xdd, 0xa1, 0xbe, 0x09, 0x85, 0x46, 0x87, 0x72, 0x2e, 0xf4, 0x16, 0x14, 0x2c, 0xf9, 0x5b, 0x9e,
0xfc, 0x2b, 0xaa, 0xe4, 0x2a, 0x9a, 0xd3, 0x5e, 0xb5, 0xcc, 0x9a, 0x84, 0x9a, 0x02, 0xe0, 0x90,
0x45, 0x7f, 0x08, 0xe5, 0xf5, 0xc7, 0x6d, 0x8f, 0x06, 0xea, 0x4e, 0x2f, 0x43, 0x9e, 0x70, 0x00,
0x97, 0x56, 0x88, 0xea, 0x84, 0x20, 0xc3, 0x12, 0xcb, 0xf2, 0x30, 0x79, 0x6c, 0x98, 0x81, 0x4c,
0xdb, 0x61, 0x1e, 0x5e, 0x67, 0x40, 0x2c, 0x70, 0xfa, 0x65, 0x28, 0xf0, 0x80, 0xf2, 0xef, 0xaf,
0xa0, 0x19, 0xc8, 0x62, 0xe3, 0x98, 0x4b, 0x9d, 0xc4, 0x59, 0x6a, 0x1c, 0xc7, 0x22, 0x79, 0x1b,
0xe0, 0x16, 0x09, 0x4d, 0x58, 0x85, 0x69, 0xf5, 0x9c, 0x93, 0x59, 0xe6, 0xb3, 0x52, 0xc9, 0x34,
0x4e, 0xa2, 0x71, 0x3f, 0xbd, 0xfe, 0x10, 0x8a, 0x3c, 0x13, 0xb1, 0x34, 0x1e, 0x95, 0x0c, 0xed,
0x05, 0x25, 0x43, 0xd5, 0x81, 0xcc, 0xb0, 0x3a, 0x10, 0x33, 0xd7, 0x81, 0xb2, 0xe0, 0x55, 0x45,
0x32, 0x95, 0x86, 0xab, 0x50, 0x50, 0x66, 0x4a, 0x2d, 0x61, 0x73, 0xa4, 0x04, 0xe1, 0x90, 0x22,
0xa6, 0xed, 0x00, 0x12, 0x59, 0x35, 0x9d, 0xb2, 0x58, 0x05, 0xcc, 0xbc, 0xb8, 0x02, 0xc6, 0x34,
0xfd, 0x18, 0x2a, 0xc3, 0x3a, 0xaa, 0x97, 0xc8, 0xfb, 0xe9, 0x4d, 0xd1, 0xdf, 0xd7, 0x60, 0x26,
0x2e, 0x29, 0xfd, 0xf5, 0xa5, 0x57, 0x72, 0x7e, 0xc5, 0x8f, 0x9d, 0xc8, 0x6f, 0x35, 0xb8, 0x90,
0x70, 0x6d, 0xa4, 0x1b, 0x1f, 0xc1, 0xa8, 0x78, 0x70, 0x64, 0x47, 0x08, 0x8e, 0xbf, 0x66, 0xa0,
0x7c, 0xdb, 0xd8, 0x23, 0xce, 0x2e, 0x71, 0x88, 0x19, 0x78, 0x14, 0xfd, 0x08, 0x4a, 0x2d, 0x23,
0x30, 0x0f, 0x38, 0x54, 0x75, 0x87, 0x8d, 0x74, 0xa9, 0x34, 0x21, 0xa9, 0xb6, 0x15, 0x89, 0x59,
0x77, 0x03, 0xda, 0xad, 0xcf, 0x49, 0x93, 0x4a, 0x31, 0x0c, 0x8e, 0x6b, 0xe3, 0x2d, 0x3d, 0xff,
0x5e, 0x7f, 0xdc, 0x66, 0xa5, 0x6b, 0xf4, 0x49, 0x22, 0x61, 0x02, 0x26, 0xef, 0x76, 0x6c, 0x4a,
0x5a, 0xc4, 0x0d, 0xa2, 0x96, 0x7e, 0xab, 0x4f, 0x3e, 0x1e, 0xd0, 0x38, 0x7f, 0x13, 0x66, 0xfa,
0x8d, 0x67, 0xf9, 0xe7, 0x90, 0x74, 0xc5, 0x7d, 0x61, 0xf6, 0x13, 0x5d, 0x80, 0xdc, 0x91, 0xe1,
0x74, 0xe4, 0x6b, 0xc4, 0xe2, 0xe3, 0x46, 0xe6, 0xba, 0xa6, 0xff, 0x5e, 0x83, 0xca, 0x30, 0x43,
0xd0, 0xe7, 0x63, 0x82, 0xea, 0x25, 0x69, 0x55, 0xf6, 0x1d, 0xd2, 0x15, 0x52, 0xd7, 0xa1, 0xe0,
0xb5, 0xd9, 0x10, 0xe6, 0x51, 0x79, 0xeb, 0x57, 0xd4, 0x4d, 0x6e, 0x4b, 0xf8, 0x69, 0xaf, 0x7a,
0x31, 0x21, 0x5e, 0x21, 0x70, 0xc8, 0xca, 0xea, 0x00, 0xb7, 0x87, 0xd5, 0xa6, 0xb0, 0x0e, 0xdc,
0xe7, 0x10, 0x2c, 0x31, 0xfa, 0x1f, 0x35, 0x18, 0xe7, 0x4d, 0xd9, 0x43, 0x28, 0xb0, 0xf3, 0xb3,
0x8c, 0xc0, 0xe0, 0x76, 0xa5, 0x1e, 0x07, 0x18, 0xf7, 0x16, 0x09, 0x8c, 0x28, 0xda, 0x14, 0x04,
0x87, 0x12, 0x11, 0x86, 0x9c, 0x1d, 0x90, 0x96, 0xba, 0xc8, 0xd7, 0x86, 0x8a, 0x96, 0xc3, 0x68,
0x0d, 0x1b, 0xc7, 0xeb, 0x8f, 0x03, 0xe2, 0xb2, 0xcb, 0x88, 0x9e, 0xc6, 0x26, 0x93, 0x81, 0x85,
0x28, 0xfd, 0xdf, 0x1a, 0x84, 0xaa, 0x58, 0xf0, 0xfb, 0xc4, 0xd9, 0xbf, 0x6d, 0xbb, 0x87, 0xf2,
0x58, 0x43, 0x73, 0x76, 0x25, 0x1c, 0x87, 0x14, 0x67, 0x95, 0x87, 0xcc, 0x68, 0xe5, 0x81, 0x29,
0x34, 0x3d, 0x37, 0xb0, 0xdd, 0xce, 0xc0, 0x6b, 0x5b, 0x93, 0x70, 0x1c, 0x52, 0xb0, 0x36, 0x87,
0x92, 0x96, 0x61, 0xbb, 0xb6, 0xdb, 0x64, 0x4e, 0xac, 0x79, 0x1d, 0x37, 0xe0, 0xf5, 0x5e, 0xb6,
0x39, 0x78, 0x00, 0x8b, 0xcf, 0xe0, 0xd0, 0xff, 0x92, 0x85, 0x12, 0xf3, 0x59, 0xd5, 0xb9, 0x37,
0xa1, 0xec, 0xc4, 0xa3, 0x40, 0xfa, 0x7e, 0x51, 0x9a, 0x92, 0x7c, 0xd7, 0x38, 0x49, 0xcb, 0x98,
0x79, 0x77, 0x16, 0x32, 0x67, 0x92, 0xcc, 0x1b, 0x71, 0x24, 0x4e, 0xd2, 0xb2, 0xec, 0x75, 0xcc,
0xde, 0x87, 0xec, 0x7b, 0xc2, 0x2b, 0xfa, 0x36, 0x03, 0x62, 0x81, 0x43, 0x5b, 0x30, 0x67, 0x38,
0x8e, 0x77, 0xcc, 0x81, 0x75, 0xcf, 0x3b, 0x6c, 0x19, 0xf4, 0xd0, 0xe7, 0x03, 0x55, 0xa1, 0xfe,
0x39, 0xc9, 0x32, 0xb7, 0x3a, 0x48, 0x82, 0xcf, 0xe2, 0x3b, 0xeb, 0xda, 0xc6, 0x47, 0xbc, 0xb6,
0x1b, 0x30, 0xc5, 0xe2, 0xcb, 0xeb, 0x04, 0xaa, 0xd7, 0xcc, 0xf1, 0x4b, 0x40, 0x27, 0xbd, 0xea,
0xd4, 0xdd, 0x04, 0x06, 0xf7, 0x51, 0x32, 0x97, 0x1d, 0xbb, 0x65, 0x07, 0x95, 0x09, 0xce, 0x12,
0xba, 0x7c, 0x9b, 0x01, 0xb1, 0xc0, 0x25, 0xe2, 0xa2, 0x70, 0x5e, 0x5c, 0xe8, 0xbf, 0xc9, 0x02,
0x12, 0xcd, 0xb1, 0x25, 0xba, 0x1c, 0x91, 0x68, 0xae, 0xc0, 0x44, 0x4b, 0x36, 0xd7, 0x5a, 0x32,
0xeb, 0xab, 0xbe, 0x5a, 0xe1, 0xd1, 0x16, 0x14, 0xc5, 0x83, 0x8f, 0x82, 0x78, 0x59, 0x12, 0x17,
0xb7, 0x15, 0xe2, 0xb4, 0x57, 0x9d, 0x4f, 0xa8, 0x09, 0x31, 0x77, 0xbb, 0x6d, 0x82, 0x23, 0x09,
0x6c, 0x9e, 0x36, 0xda, 0x76, 0x7c, 0x93, 0x52, 0x8c, 0xe6, 0xe9, 0x68, 0x26, 0xc2, 0x31, 0x2a,
0xf4, 0x36, 0x8c, 0xb3, 0x93, 0x92, 0xc3, 0xed, 0x17, 0xd3, 0xa5, 0x0d, 0x76, 0xd6, 0xf5, 0x02,
0xab, 0x9a, 0xec, 0x17, 0xe6, 0x12, 0x98, 0x76, 0x1e, 0x65, 0x3e, 0x33, 0x4b, 0x6e, 0x01, 0x42,
0xed, 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa0, 0xb0, 0x2f, 0x1b, 0x44, 0x7e, 0x31, 0xa9,
0x13, 0x97, 0x6a, 0x2b, 0xc5, 0x30, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x17, 0x8a, 0x5b, 0xb6,
0x49, 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x9d, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59,
0x9c, 0xb8, 0x86, 0xeb, 0x89, 0x19, 0x24, 0x17, 0xc5, 0xc9, 0x1d, 0x06, 0xc4, 0x02, 0x77, 0xe3,
0x02, 0xab, 0xbf, 0x3f, 0x7f, 0x5a, 0x1d, 0x7b, 0xf2, 0xb4, 0x3a, 0xf6, 0xc1, 0x53, 0x59, 0x8b,
0x4f, 0x01, 0x60, 0x7b, 0xef, 0x07, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xe6, 0x44, 0x2d, 0xec, 0xf8,
0xe6, 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0xee, 0x44, 0xe4,
0x45, 0xcf, 0xaa, 0xc0, 0x09, 0x17, 0x27, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c,
0x1d, 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x5d, 0x95, 0xb8, 0x7b, 0x9b, 0x8d, 0xd3,
0x5e, 0xf5, 0x95, 0x61, 0xab, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xde, 0x66, 0x03, 0x33, 0xe6,
0xb3, 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x0d, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23,
0xea, 0x56, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x86, 0x62, 0xf6, 0x3c, 0xec, 0x16,
0xf1, 0x03, 0xa3, 0x25, 0x76, 0x45, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18,
0x1e, 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x78, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53,
0xd8, 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0xef, 0xc3, 0xbc, 0x02, 0x0e, 0xce, 0xd8, 0x7c, 0xdb,
0x93, 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x05, 0x12, 0x90, 0x05, 0x79,
0x47, 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x9e, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31,
0x9c, 0x21, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0xc7, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xf5,
0x4f, 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab,
0x42, 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca,
0x5c, 0xfb, 0x57, 0x52, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa,
0xb1, 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x43, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xce, 0xdb, 0x08, 0xa1,
0x38, 0x46, 0x81, 0xbe, 0x0a, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0xbb, 0xd5, 0x69, 0xfe, 0x82,
0x42, 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5,
0xde, 0x5d, 0x4f, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f,
0x0d, 0x4a, 0xff, 0x65, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x53, 0x06,
0xa6, 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0xeb, 0x60,
0x95, 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x93, 0x3d, 0x6b, 0x00, 0xac, 0xcf,
0xa0, 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0xae, 0x7d, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e,
0x75, 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d,
0x00, 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3,
0x43, 0xc0, 0xa3, 0x81, 0x11, 0xe3, 0xf5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33,
0xf4, 0xbf, 0x69, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc2, 0x88, 0xf3, 0x28, 0x39, 0xe2, 0xbc, 0x99,
0x72, 0xf3, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0x95,
0x06, 0x93, 0xfc, 0xd7, 0x28, 0x5b, 0xdb, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0xbf,
0x15, 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x4b, 0xac, 0x75, 0xdf, 0xd7, 0x20, 0xb9, 0x2f, 0x45, 0x37,
0x45, 0xfc, 0x6a, 0xe1, 0x42, 0x73, 0xc4, 0xd8, 0x7d, 0x6b, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77,
0x77, 0x15, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e,
0x0d, 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd4, 0xe0, 0xd2, 0xd0, 0x4d, 0x3a, 0x4b, 0x01,
0x66, 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0xd6, 0xef,
0xfd, 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xca, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e,
0xff, 0x3f, 0x8e, 0xd8, 0xcb, 0x90, 0xf7, 0xb9, 0x1e, 0x69, 0x5e, 0x58, 0x63, 0x85, 0x76, 0x2c,
0xb1, 0x7c, 0x1a, 0x21, 0xbe, 0x6f, 0x34, 0x55, 0xc6, 0x8a, 0xa6, 0x11, 0x01, 0xc6, 0x0a, 0x8f,
0xde, 0x80, 0x3c, 0x25, 0x86, 0x1f, 0x0e, 0x66, 0x0b, 0x4a, 0x24, 0xe6, 0xd0, 0xd3, 0x5e, 0x75,
0x52, 0x0a, 0xe7, 0xdf, 0x58, 0x52, 0xa3, 0x07, 0x30, 0x61, 0x91, 0xc0, 0xb0, 0x1d, 0x31, 0x8f,
0xa5, 0x5e, 0xdc, 0x0b, 0x61, 0x0d, 0xc1, 0x5a, 0x2f, 0x31, 0x9b, 0xe4, 0x07, 0x56, 0x02, 0x59,
0xb6, 0x35, 0x3d, 0x4b, 0x8c, 0x13, 0xb9, 0x28, 0xdb, 0xae, 0x79, 0x16, 0xc1, 0x1c, 0xa3, 0x3f,
0xd1, 0xa0, 0x24, 0x24, 0xad, 0x19, 0x1d, 0x9f, 0xa0, 0x95, 0xd0, 0x0b, 0x71, 0xdd, 0xaa, 0x93,
0x1b, 0x67, 0x03, 0xc7, 0x69, 0xaf, 0x5a, 0xe4, 0x64, 0x7c, 0x12, 0x51, 0x0e, 0xc4, 0xce, 0x28,
0x73, 0xce, 0x19, 0xbd, 0x0a, 0x39, 0xfe, 0x7a, 0xe4, 0x61, 0x86, 0x6f, 0x9d, 0x3f, 0x30, 0x2c,
0x70, 0xfa, 0xc7, 0x19, 0x28, 0x27, 0x9c, 0x4b, 0x31, 0x0b, 0x84, 0x0b, 0xc5, 0x4c, 0x8a, 0x25,
0xf5, 0xf0, 0x3f, 0x2b, 0x65, 0xed, 0xc9, 0xbf, 0x4c, 0xed, 0xf9, 0x2e, 0xe4, 0x4d, 0x76, 0x46,
0xea, 0xbf, 0xef, 0x95, 0x51, 0xae, 0x93, 0x9f, 0x6e, 0x14, 0x8d, 0xfc, 0xd3, 0xc7, 0x52, 0x20,
0xba, 0x05, 0xb3, 0x94, 0x04, 0xb4, 0xbb, 0xba, 0x1f, 0x10, 0x1a, 0x1f, 0xe2, 0x73, 0x51, 0xc7,
0x8d, 0xfb, 0x09, 0xf0, 0x20, 0x8f, 0xbe, 0x07, 0x93, 0x77, 0x8d, 0x3d, 0x27, 0xfc, 0x2b, 0x0a,
0x43, 0xd9, 0x76, 0x4d, 0xa7, 0x63, 0x11, 0x91, 0x8d, 0x55, 0xf6, 0x52, 0x8f, 0x76, 0x33, 0x8e,
0x3c, 0xed, 0x55, 0xe7, 0x12, 0x00, 0xf1, 0xdf, 0x0b, 0x4e, 0x8a, 0xd0, 0x1d, 0x18, 0xff, 0x14,
0xa7, 0xc7, 0xef, 0x41, 0x31, 0xea, 0xef, 0x3f, 0x61, 0x95, 0xfa, 0x23, 0x28, 0xb0, 0x88, 0x57,
0x73, 0xe9, 0x39, 0x2d, 0x4e, 0xb2, 0x71, 0xca, 0xa4, 0x69, 0x9c, 0xf4, 0x16, 0x94, 0xef, 0xb5,
0xad, 0x97, 0xfc, 0x33, 0x32, 0x93, 0xba, 0x6a, 0x5d, 0x03, 0xf1, 0xb7, 0x3a, 0x2b, 0x10, 0xa2,
0x72, 0xc7, 0x0a, 0x44, 0xbc, 0xf0, 0xc6, 0x76, 0xe5, 0x3f, 0xd5, 0x00, 0xf8, 0x52, 0x6a, 0xfd,
0x88, 0xb8, 0x01, 0x3b, 0x07, 0x16, 0xf8, 0xfd, 0xe7, 0xc0, 0x33, 0x03, 0xc7, 0xa0, 0x7b, 0x90,
0xf7, 0x44, 0x34, 0x89, 0x3f, 0x24, 0x47, 0xdc, 0x7c, 0x86, 0x8f, 0x40, 0xc4, 0x13, 0x96, 0xc2,
0xea, 0x4b, 0xcf, 0x9e, 0x2f, 0x8c, 0x7d, 0xf8, 0x7c, 0x61, 0xec, 0xa3, 0xe7, 0x0b, 0x63, 0xef,
0x9d, 0x2c, 0x68, 0xcf, 0x4e, 0x16, 0xb4, 0x0f, 0x4f, 0x16, 0xb4, 0x8f, 0x4e, 0x16, 0xb4, 0x8f,
0x4f, 0x16, 0xb4, 0x27, 0xff, 0x58, 0x18, 0x7b, 0x90, 0x39, 0x5a, 0xf9, 0x4f, 0x00, 0x00, 0x00,
0xff, 0xff, 0x89, 0x39, 0x24, 0x64, 0xcc, 0x24, 0x00, 0x00,
}
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@ -4244,16 +4244,6 @@ func (this *ExportOptions) String() string {
}, "")
return s
}
func (this *FieldsV1) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&FieldsV1{`,
`Raw:` + valueToStringGenerated(this.Raw) + `,`,
`}`,
}, "")
return s
}
func (this *GetOptions) String() string {
if this == nil {
return "nil"
@ -4369,7 +4359,7 @@ func (this *ManagedFieldsEntry) String() string {
`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
`Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`,
`FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`,
`FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`,
`FieldsV1:` + strings.Replace(fmt.Sprintf("%v", this.FieldsV1), "FieldsV1", "FieldsV1", 1) + `,`,
`}`,
}, "")
return s

View File

@ -224,6 +224,7 @@ message ExportOptions {
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
// +protobuf.options.(gogoproto.goproto_stringer)=false
message FieldsV1 {
// Raw is the underlying serialization of this object.
optional bytes Raw = 1;
@ -546,7 +547,7 @@ message ObjectMeta {
// +optional
optional string generateName = 2;
// Namespace defines the space within each name must be unique. An empty namespace is
// Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.

View File

@ -135,7 +135,7 @@ type ObjectMeta struct {
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within each name must be unique. An empty namespace is
// Namespace defines the space within which each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
@ -1145,11 +1145,16 @@ const (
// If a key maps to an empty Fields value, the field that key represents is part of the set.
//
// The exact format is defined in sigs.k8s.io/structured-merge-diff
// +protobuf.options.(gogoproto.goproto_stringer)=false
type FieldsV1 struct {
// Raw is the underlying serialization of this object.
Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"`
}
func (f FieldsV1) String() string {
return string(f.Raw)
}
// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf
// generation to support a meta type that can accept any valid JSON. This can be introduced
// in a v1 because clients a) receive an error if they try to access proto today, and b)

View File

@ -223,7 +223,7 @@ var map_ObjectMeta = map[string]string{
"": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
"name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
"generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
"namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
"namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
"selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
"uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
"resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// NestedFieldCopy returns a deep copy of the value of a nested field.

View File

@ -1,27 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import "k8s.io/apimachinery/pkg/conversion"
// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value
func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
if len(*input) > 0 {
*out = IncludeObjectPolicy((*input)[0])
}
return nil
}

View File

@ -1,17 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1

View File

@ -1,23 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
// +groupName=meta.k8s.io
package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1"

View File

@ -1,415 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
package v1beta1
import (
fmt "fmt"
io "io"
proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (*PartialObjectMetadataList) ProtoMessage() {}
func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
return fileDescriptor_90ec10f86b91f9a8, []int{0}
}
func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
}
func (m *PartialObjectMetadataList) XXX_Size() int {
return m.Size()
}
func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
}
var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
func init() {
proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList")
}
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8)
}
var fileDescriptor_90ec10f86b91f9a8 = []byte{
// 317 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30,
0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0,
0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xbf, 0x5d, 0xac,
0x69, 0x4a, 0xf2, 0xef, 0xc0, 0x9b, 0x1f, 0xc1, 0x8f, 0xb5, 0xe3, 0x8e, 0x03, 0x61, 0xb8, 0xf8,
0x45, 0x24, 0x5d, 0x15, 0x19, 0x0a, 0xbb, 0xf5, 0x79, 0xca, 0xef, 0x97, 0x27, 0x24, 0x1c, 0xa7,
0x67, 0x96, 0x49, 0xcd, 0xd3, 0x22, 0x02, 0x93, 0x01, 0x82, 0xe5, 0x33, 0xc8, 0x26, 0xda, 0xf0,
0xea, 0x87, 0xc8, 0xa5, 0x12, 0xf1, 0x54, 0x66, 0x60, 0x9e, 0x79, 0x9e, 0x26, 0xbe, 0xb0, 0x5c,
0x01, 0x0a, 0x3e, 0x1b, 0x44, 0x80, 0x62, 0xc0, 0x13, 0xc8, 0xc0, 0x08, 0x84, 0x09, 0xcb, 0x8d,
0x46, 0xdd, 0x3c, 0xde, 0xa0, 0xec, 0x27, 0xca, 0xf2, 0x34, 0xf1, 0x85, 0x65, 0x1e, 0x65, 0x15,
0xda, 0xee, 0x27, 0x12, 0xa7, 0x45, 0xc4, 0x62, 0xad, 0x78, 0xa2, 0x13, 0xcd, 0x4b, 0x43, 0x54,
0x3c, 0x94, 0xa9, 0x0c, 0xe5, 0xd7, 0xc6, 0xdc, 0x3e, 0xd9, 0x65, 0xd4, 0xf6, 0x9e, 0xf6, 0xe9,
0x5f, 0x94, 0x29, 0x32, 0x94, 0x0a, 0xb8, 0x8d, 0xa7, 0xa0, 0xc4, 0x36, 0x77, 0xf4, 0x46, 0xc2,
0xc3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0xeb, 0xe8, 0x11, 0x62, 0xbc, 0x02, 0x14, 0x13, 0x81, 0xe2,
0x52, 0x5a, 0x6c, 0xde, 0x85, 0x75, 0x55, 0xe5, 0xd6, 0xbf, 0x2e, 0xe9, 0x35, 0x86, 0x8c, 0xed,
0x72, 0x71, 0xe6, 0x69, 0x6f, 0x1a, 0x1d, 0xcc, 0x57, 0x9d, 0xc0, 0xad, 0x3a, 0xf5, 0xaf, 0x66,
0xfc, 0x6d, 0x6c, 0xde, 0x87, 0x35, 0x89, 0xa0, 0x6c, 0x8b, 0x74, 0xff, 0xf7, 0x1a, 0xc3, 0xf3,
0xdd, 0xd4, 0xbf, 0xae, 0x1d, 0xed, 0x57, 0xe7, 0xd4, 0x2e, 0xbc, 0x71, 0xbc, 0x11, 0x8f, 0xfa,
0xf3, 0x35, 0x0d, 0x16, 0x6b, 0x1a, 0x2c, 0xd7, 0x34, 0x78, 0x71, 0x94, 0xcc, 0x1d, 0x25, 0x0b,
0x47, 0xc9, 0xd2, 0x51, 0xf2, 0xee, 0x28, 0x79, 0xfd, 0xa0, 0xc1, 0xed, 0x5e, 0xf5, 0x52, 0x9f,
0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x82, 0x5b, 0x80, 0x29, 0x02, 0x00, 0x00,
}
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Items) > 0 {
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *PartialObjectMetadataList) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Items) > 0 {
for _, e := range m.Items {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
l = m.ListMeta.Size()
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *PartialObjectMetadataList) String() string {
if this == nil {
return "nil"
}
repeatedStringForItems := "[]PartialObjectMetadata{"
for _, f := range this.Items {
repeatedStringForItems += fmt.Sprintf("%v", f) + ","
}
repeatedStringForItems += "}"
s := strings.Join([]string{`&PartialObjectMetadataList{`,
`Items:` + repeatedStringForItems + `,`,
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Items = append(m.Items, v1.PartialObjectMetadata{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)

View File

@ -1,41 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.apimachinery.pkg.apis.meta.v1beta1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1beta1";
// PartialObjectMetadataList contains a list of objects containing only their metadata.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message PartialObjectMetadataList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
// items contains each of the included items.
repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "meta.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// AddMetaToScheme registers base meta types into schemas.
func AddMetaToScheme(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Table{},
&TableOptions{},
&PartialObjectMetadata{},
&PartialObjectMetadataList{},
)
return nil
}

View File

@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package v1beta1 is alpha objects from meta that will be introduced.
package v1beta1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Table is a tabular representation of a set of API resources. The server transforms the
// object into a set of preferred columns for quickly reviewing the objects.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +protobuf=false
type Table = v1.Table
// TableColumnDefinition contains information about a column returned in the Table.
// +protobuf=false
type TableColumnDefinition = v1.TableColumnDefinition
// TableRow is an individual row in a table.
// +protobuf=false
type TableRow = v1.TableRow
// TableRowCondition allows a row to be marked with additional information.
// +protobuf=false
type TableRowCondition = v1.TableRowCondition
type RowConditionType = v1.RowConditionType
type ConditionStatus = v1.ConditionStatus
type IncludeObjectPolicy = v1.IncludeObjectPolicy
// TableOptions are used when a Table is requested by the caller.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TableOptions = v1.TableOptions
// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
// to get access to a particular ObjectMeta schema without knowing the details of the version.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PartialObjectMetadata = v1.PartialObjectMetadata
// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than
// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must
// remain independent of v1.PartialObjectMetadataList to preserve mappings.
// PartialObjectMetadataList contains a list of objects containing only their metadata.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PartialObjectMetadataList struct {
v1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"`
// items contains each of the included items.
Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"`
}
const (
RowCompleted = v1.RowCompleted
ConditionTrue = v1.ConditionTrue
ConditionFalse = v1.ConditionFalse
ConditionUnknown = v1.ConditionUnknown
IncludeNone = v1.IncludeNone
IncludeMetadata = v1.IncludeMetadata
IncludeObject = v1.IncludeObject
)

View File

@ -1,40 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_PartialObjectMetadataList = map[string]string{
"": "PartialObjectMetadataList contains a list of objects containing only their metadata.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"items": "items contains each of the included items.",
}
func (PartialObjectMetadataList) SwaggerDoc() map[string]string {
return map_PartialObjectMetadataList
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -1,59 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]v1.PartialObjectMetadata, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
if in == nil {
return nil
}
out := new(PartialObjectMetadataList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -1,32 +0,0 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@ -442,20 +442,20 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok {
return fn(src, dest, scope)
}
// TODO: consider everything past this point deprecated - we want to support only point to point top level
// conversions
dv, err := EnforcePtr(dest)
if err != nil {
return err
}
if !dv.CanAddr() && !dv.CanSet() {
return fmt.Errorf("can't write to dest")
}
sv, err := EnforcePtr(src)
if err != nil {
return err
}
return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type())
// TODO: Everything past this point is deprecated.
// Remove in 1.20 once we're sure it didn't break anything.
// Leave something on the stack, so that calls to struct tag getters never fail.
scope.srcStack.push(scopeStackElem{})
scope.destStack.push(scopeStackElem{})

View File

@ -57,14 +57,22 @@ func (ls Set) Get(label string) string {
return ls[label]
}
// AsSelector converts labels into a selectors.
// AsSelector converts labels into a selectors. It does not
// perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func (ls Set) AsSelector() Selector {
return SelectorFromSet(ls)
}
// AsValidatedSelector converts labels into a selectors.
// The Set is validated client-side, which allows to catch errors early.
func (ls Set) AsValidatedSelector() (Selector, error) {
return ValidatedSelectorFromSet(ls)
}
// AsSelectorPreValidated converts labels into a selector, but
// assumes that labels are already validated and thus don't
// preform any validation.
// assumes that labels are already validated and thus doesn't
// perform any validation.
// According to our measurements this is significantly faster
// in codepaths that matter at high scale.
func (ls Set) AsSelectorPreValidated() Selector {

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// Requirements is AND of all requirements.
@ -221,7 +221,7 @@ func (r *Requirement) Matches(ls Labels) bool {
return false
}
// There should be only one strValue in r.strValues, and can be converted to a integer.
// There should be only one strValue in r.strValues, and can be converted to an integer.
if len(r.strValues) != 1 {
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
@ -869,23 +869,30 @@ func validateLabelValue(k, v string) error {
// SelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// It does not perform any validation, which means the server will reject
// the request if the Set contains invalid values.
func SelectorFromSet(ls Set) Selector {
return SelectorFromValidatedSet(ls)
}
// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
// nil and empty Sets are considered equivalent to Everything().
// The Set is validated client-side, which allows to catch errors early.
func ValidatedSelectorFromSet(ls Set) (Selector, error) {
if ls == nil || len(ls) == 0 {
return internalSelector{}
return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
for label, value := range ls {
r, err := NewRequirement(label, selection.Equals, []string{value})
if err == nil {
requirements = append(requirements, *r)
} else {
//TODO: double check errors when input comes from serialization?
return internalSelector{}
if err != nil {
return nil, err
}
requirements = append(requirements, *r)
}
// sort to have deterministic string representation
sort.Sort(ByKey(requirements))
return internalSelector(requirements)
return internalSelector(requirements), nil
}
// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/conversion/queryparams"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// codec binds an encoder and decoder.

View File

@ -21,6 +21,7 @@ import (
"reflect"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/json"
)
// CheckCodec makes sure that the codec can encode objects like internalType,
@ -32,7 +33,14 @@ func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersi
return fmt.Errorf("Internal type not encodable: %v", err)
}
for _, et := range externalTypes {
exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String()))
typeMeta := TypeMeta{
Kind: et.Kind,
APIVersion: et.GroupVersion().String(),
}
exBytes, err := json.Marshal(&typeMeta)
if err != nil {
return err
}
obj, err := Decode(c, exBytes)
if err != nil {
return fmt.Errorf("external type %s not interpretable: %v", et, err)

View File

@ -33,7 +33,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/structured-merge-diff/v3/value"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// UnstructuredConverter is an interface for converting between interface{}

View File

@ -176,15 +176,6 @@ func (gv GroupVersion) Empty() bool {
// String puts "group" and "version" into a single "group/version" string. For the legacy v1
// it returns "v1".
func (gv GroupVersion) String() string {
// special case the internal apiVersion for the legacy kube types
if gv.Empty() {
return ""
}
// special case of "v1" for backward compatibility
if len(gv.Group) == 0 && gv.Version == "v1" {
return gv.Version
}
if len(gv.Group) > 0 {
return gv.Group + "/" + gv.Version
}

View File

@ -211,6 +211,19 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
}
}
s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
// if the type implements DeepCopyInto(<obj>), register a self-conversion
if m := reflect.ValueOf(obj).MethodByName("DeepCopyInto"); m.IsValid() && m.Type().NumIn() == 1 && m.Type().NumOut() == 0 && m.Type().In(0) == reflect.TypeOf(obj) {
if err := s.AddGeneratedConversionFunc(obj, obj, func(a, b interface{}, scope conversion.Scope) error {
// copy a to b
reflect.ValueOf(a).MethodByName("DeepCopyInto").Call([]reflect.Value{reflect.ValueOf(b)})
// clear TypeMeta to match legacy reflective conversion
b.(Object).GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
return nil
}); err != nil {
panic(err)
}
}
}
// KnownTypes returns the types known for the given version.

View File

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/util/framer"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.

View File

@ -114,6 +114,18 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
return ""
}
func commaSeparatedHeaderValues(header []string) []string {
var parsedClientProtocols []string
for i := range header {
for _, clientProtocol := range strings.Split(header[i], ",") {
if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
parsedClientProtocols = append(parsedClientProtocols, proto)
}
}
}
return parsedClientProtocols
}
// Handshake performs a subprotocol negotiation. If the client did request a
// subprotocol, Handshake will select the first common value found in
// serverProtocols. If a match is found, Handshake adds a response header
@ -121,7 +133,7 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string {
// returned, along with a response header containing the list of protocols the
// server can accept.
func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
if len(clientProtocols) == 0 {
return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
}

View File

@ -24,7 +24,7 @@ import (
"github.com/docker/spdystream"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// connection maintains state about a spdystream.Connection and its associated

View File

@ -76,19 +76,20 @@ var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
var _ utilnet.Dialer = &SpdyRoundTripper{}
// NewRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
// tlsConfig.
func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
return NewRoundTripperWithProxy(tlsConfig, followRedirects, requireSameHostRedirects, utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment))
}
// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
// the specified tlsConfig. This function is mostly meant for unit tests.
func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
// specified tlsConfig and proxy func.
func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
return &SpdyRoundTripper{
tlsConfig: tlsConfig,
followRedirects: followRedirects,
requireSameHostRedirects: requireSameHostRedirects,
proxier: proxier,
}
}
@ -116,11 +117,7 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
// dial dials the host specified by req, using TLS if appropriate, optionally
// using a proxy server if one is configured via environment variables.
func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
proxier := s.proxier
if proxier == nil {
proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
proxyURL, err := proxier(req)
proxyURL, err := s.proxier(req)
if err != nil {
return nil, err
}

View File

@ -26,7 +26,7 @@ import (
"strings"
"github.com/google/gofuzz"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// IntOrString is a type that can hold an int32 or a string. When used in

View File

@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v, 0)
case *interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertInterfaceNumbers(v, 0)
default:
return json.Unmarshal(data, v)
}
}
func convertInterfaceNumbers(v *interface{}, depth int) error {
var err error
switch v2 := (*v).(type) {
case json.Number:
*v, err = convertNumber(v2)
case map[string]interface{}:
err = convertMapNumbers(v2, depth+1)
case []interface{}:
err = convertSliceNumbers(v2, depth+1)
}
return err
}
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}, depth int) error {

View File

@ -32,7 +32,7 @@ import (
"strings"
"golang.org/x/net/http2"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,

View File

@ -26,7 +26,7 @@ import (
"strings"
"k8s.io/klog"
"k8s.io/klog/v2"
)
type AddressFamily uint

View File

@ -23,7 +23,7 @@ import (
"sync"
"time"
"k8s.io/klog"
"k8s.io/klog/v2"
)
var (

View File

@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
}
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
// undetermined behavior.
// The BackoffManager is supposed to be called in a single-threaded environment.
type BackoffManager interface {
Backoff() clock.Timer
@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du
Steps: math.MaxInt32,
Cap: maxBackoff,
},
backoffTimer: c.NewTimer(0),
backoffTimer: nil,
initialBackoff: initBackoff,
lastBackoffStart: c.Now(),
backoffResetDuration: resetDuration,
@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
return b.backoff.Step()
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
// The returned timer must be drained before calling Backoff() the second time
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
b.backoffTimer.Reset(b.getNextBackoff())
if b.backoffTimer == nil {
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
} else {
b.backoffTimer.Reset(b.getNextBackoff())
}
return b.backoffTimer
}
@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C
clock: c,
duration: duration,
jitter: jitter,
backoffTimer: c.NewTimer(0),
backoffTimer: nil,
}
}
@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
return jitteredPeriod
}
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
// The returned timer must be drained before calling Backoff() the second time
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
j.backoffTimer.Reset(j.getNextBackoff())
backoff := j.getNextBackoff()
if j.backoffTimer == nil {
j.backoffTimer = j.clock.NewTimer(backoff)
} else {
j.backoffTimer.Reset(backoff)
}
return j.backoffTimer
}

View File

@ -26,7 +26,7 @@ import (
"strings"
"unicode"
"k8s.io/klog"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
)
@ -92,6 +92,10 @@ type YAMLDecoder struct {
// the caller in framing the chunk.
func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
scanner := bufio.NewScanner(r)
// the size of initial allocation for buffer 4k
buf := make([]byte, 4*1024)
// the maximum size used to buffer a token 5M
scanner.Buffer(buf, 5*1024*1024)
scanner.Split(splitYAMLDocument)
return &YAMLDecoder{
r: r,

View File

@ -21,7 +21,7 @@ import (
"io"
"sync"
"k8s.io/klog"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/net"

View File

@ -20,7 +20,7 @@ import (
"fmt"
"sync"
"k8s.io/klog"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
)
@ -32,8 +32,8 @@ type Interface interface {
Stop()
// Returns a chan which will receive all the events. If an error occurs
// or Stop() is called, this channel will be closed, in which case the
// watch should be completely cleaned up.
// or Stop() is called, the implementation will close this channel and
// release any resources used by the watch.
ResultChan() <-chan Event
}
@ -46,7 +46,9 @@ const (
Deleted EventType = "DELETED"
Bookmark EventType = "BOOKMARK"
Error EventType = "ERROR"
)
var (
DefaultChanSize int32 = 100
)

View File

@ -16,7 +16,7 @@ import (
// that type.
type Equalities map[reflect.Type]reflect.Value
// For convenience, panics on errrors
// For convenience, panics on errors
func EqualitiesOrDie(funcs ...interface{}) Equalities {
e := Equalities{}
if err := e.AddFuncs(funcs...); err != nil {
@ -229,7 +229,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool,
//
// An empty slice *is* equal to a nil slice for our purposes; same for maps.
//
// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality
// Unexported field members cannot be compared and will cause an informative panic; you must add an Equality
// function for these types.
func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
if a1 == nil || a2 == nil {

45
vendor/k8s.io/apiserver/go.mod generated vendored
View File

@ -5,52 +5,51 @@ module k8s.io/apiserver
go 1.13
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/coreos/go-oidc v2.1.0+incompatible
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
github.com/davecgh/go-spew v1.1.1
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible
github.com/evanphx/json-patch v4.2.0+incompatible
github.com/go-openapi/jsonreference v0.19.3 // indirect
github.com/go-openapi/spec v0.19.3
github.com/gogo/protobuf v1.3.1
github.com/google/go-cmp v0.3.0
github.com/google/go-cmp v0.4.0
github.com/google/gofuzz v1.1.0
github.com/google/uuid v1.1.1
github.com/googleapis/gnostic v0.1.0
github.com/googleapis/gnostic v0.4.1
github.com/gorilla/websocket v1.4.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/hashicorp/golang-lru v0.5.1
github.com/mailru/easyjson v0.7.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/pkg/errors v0.8.1
github.com/pkg/errors v0.9.1
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 // indirect
github.com/prometheus/client_model v0.2.0
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.4.0
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
go.etcd.io/etcd v0.5.0-alpha.5.0.20200401174654-e694b7bb0875
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba // indirect
google.golang.org/grpc v1.26.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/square/go-jose.v2 v2.2.2
gopkg.in/yaml.v2 v2.2.8
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v0.18.2
k8s.io/component-base v0.18.2
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7
k8s.io/api v0.19.0-beta.2
k8s.io/apimachinery v0.19.0-beta.2
k8s.io/client-go v0.19.0-beta.2
k8s.io/component-base v0.19.0-beta.2
k8s.io/klog/v2 v2.1.0
k8s.io/kube-openapi v0.0.0-20200427153329-656914f816f9
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9-0.20200513220823-33b997865007
sigs.k8s.io/structured-merge-diff/v3 v3.0.0
sigs.k8s.io/yaml v1.2.0
)
@ -58,8 +57,8 @@ require (
replace (
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
k8s.io/api => k8s.io/api v0.18.2
k8s.io/apimachinery => k8s.io/apimachinery v0.18.2
k8s.io/client-go => k8s.io/client-go v0.18.2
k8s.io/component-base => k8s.io/component-base v0.18.2
k8s.io/api => k8s.io/api v0.19.0-beta.2
k8s.io/apimachinery => k8s.io/apimachinery v0.19.0-beta.2
k8s.io/client-go => k8s.io/client-go v0.19.0-beta.2
k8s.io/component-base => k8s.io/component-base v0.19.0-beta.2
)

View File

@ -25,7 +25,7 @@ import (
"runtime"
"time"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// StacktracePred returns true if a stacktrace should be logged for this status.
@ -156,7 +156,7 @@ func (rl *respLogger) Addf(format string, data ...interface{}) {
// Log is intended to be called once at the end of your request handler, via defer
func (rl *respLogger) Log() {
latency := time.Since(rl.startTime)
if klog.V(3) {
if klog.V(3).Enabled() {
if !rl.hijacked {
klog.InfoDepth(1, fmt.Sprintf("verb=%q URI=%q latency=%v resp=%v UserAgent=%q srcIP=%q: %v%v",
rl.req.Method, rl.req.RequestURI,
@ -194,7 +194,7 @@ func (rl *respLogger) Write(b []byte) (int, error) {
func (rl *respLogger) Flush() {
if flusher, ok := rl.w.(http.Flusher); ok {
flusher.Flush()
} else if klog.V(2) {
} else if klog.V(2).Enabled() {
klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w))
}
}

Some files were not shown because too many files have changed in this diff Show More