Merge pull request #125759 from dims/bump-prometheus/common-v0.55.0
Bump `prometheus/common` to v0.55.0
This commit is contained in:
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
6
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format {
|
||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format.FormatType() {
|
||||
case TypeProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
return &protoDecoder{r: bufio.NewReader(r)}
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||
type protoDecoder struct {
|
||||
r io.Reader
|
||||
r protodelim.Reader
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
@@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
opts := protodelim.UnmarshalOptions{
|
||||
MaxSize: -1,
|
||||
}
|
||||
if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
|
||||
if err := opts.UnmarshalFrom(d.r, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
|
||||
13
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
13
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -21,9 +21,10 @@ import (
|
||||
"google.golang.org/protobuf/encoding/protodelim"
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/munnerz/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
@@ -139,7 +140,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
// interface is kept for backwards compatibility.
|
||||
// In cases where the Format does not allow for UTF-8 names, the global
|
||||
// NameEscapingScheme will be applied.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
//
|
||||
// NewEncoder can be called with additional options to customize the OpenMetrics text output.
|
||||
// For example:
|
||||
// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
|
||||
//
|
||||
// Extra options are ignored for all other formats.
|
||||
func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
|
||||
escapingScheme := format.ToEscapingScheme()
|
||||
|
||||
switch format.FormatType() {
|
||||
@@ -178,7 +185,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
case TypeOpenMetrics:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme))
|
||||
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
|
||||
return err
|
||||
},
|
||||
close: func() error {
|
||||
|
||||
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
22
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
@@ -63,7 +64,7 @@ const (
|
||||
type FormatType int
|
||||
|
||||
const (
|
||||
TypeUnknown = iota
|
||||
TypeUnknown FormatType = iota
|
||||
TypeProtoCompact
|
||||
TypeProtoDelim
|
||||
TypeProtoText
|
||||
@@ -73,7 +74,8 @@ const (
|
||||
|
||||
// NewFormat generates a new Format from the type provided. Mostly used for
|
||||
// tests, most Formats should be generated as part of content negotiation in
|
||||
// encode.go.
|
||||
// encode.go. If a type has more than one version, the latest version will be
|
||||
// returned.
|
||||
func NewFormat(t FormatType) Format {
|
||||
switch t {
|
||||
case TypeProtoCompact:
|
||||
@@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format {
|
||||
}
|
||||
}
|
||||
|
||||
// NewOpenMetricsFormat generates a new OpenMetrics format matching the
|
||||
// specified version number.
|
||||
func NewOpenMetricsFormat(version string) (Format, error) {
|
||||
if version == OpenMetricsVersion_0_0_1 {
|
||||
return fmtOpenMetrics_0_0_1, nil
|
||||
}
|
||||
if version == OpenMetricsVersion_1_0_0 {
|
||||
return fmtOpenMetrics_1_0_0, nil
|
||||
}
|
||||
return fmtUnknown, fmt.Errorf("unknown open metrics version string")
|
||||
}
|
||||
|
||||
// FormatType deduces an overall FormatType for the given format.
|
||||
func (f Format) FormatType() FormatType {
|
||||
toks := strings.Split(string(f), ";")
|
||||
if len(toks) < 2 {
|
||||
return TypeUnknown
|
||||
}
|
||||
|
||||
params := make(map[string]string)
|
||||
for i, t := range toks {
|
||||
if i == 0 {
|
||||
|
||||
202
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
202
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@@ -22,11 +22,47 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
type encoderOption struct {
|
||||
withCreatedLines bool
|
||||
withUnit bool
|
||||
}
|
||||
|
||||
type EncoderOption func(*encoderOption)
|
||||
|
||||
// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
|
||||
// to include _created lines (See
|
||||
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
|
||||
// Created timestamps can improve the accuracy of series reset detection, but
|
||||
// come with a bandwidth cost.
|
||||
//
|
||||
// At the time of writing, created timestamp ingestion is still experimental in
|
||||
// Prometheus and need to be enabled with the feature-flag
|
||||
// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
|
||||
// still possible. Therefore, it is recommended to use this feature with caution.
|
||||
func WithCreatedLines() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withCreatedLines = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithUnit is an EncoderOption enabling a set unit to be written to the output
|
||||
// and to be added to the metric name, if it's not there already, as a suffix.
|
||||
// Without opting in this way, the unit will not be added to the metric name and,
|
||||
// on top of that, the unit will not be passed onto the output, even if it
|
||||
// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
|
||||
func WithUnit() EncoderOption {
|
||||
return func(t *encoderOption) {
|
||||
t.withUnit = true
|
||||
}
|
||||
}
|
||||
|
||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||
// the number of bytes written and any error encountered. The output will have
|
||||
@@ -59,20 +95,34 @@ import (
|
||||
// Prometheus to OpenMetrics or vice versa:
|
||||
//
|
||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||
// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
|
||||
// lines. A counter with a missing `_total` suffix is not an error. However,
|
||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||
// output.
|
||||
//
|
||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||
// line, info type, stateset type, gaugehistogram type.
|
||||
// - According to the OM specs, the `# UNIT` line is optional, but if populated,
|
||||
// the unit has to be present in the metric name as its suffix:
|
||||
// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
|
||||
// However, in order to accommodate any potential scenario where such a change in the
|
||||
// metric name is not desirable, the users are here given the choice of either explicitly
|
||||
// opt in, in case they wish for the unit to be included in the output AND in the metric name
|
||||
// as a suffix (see the description of the WithUnit function above),
|
||||
// or not to opt in, in case they don't want for any of that to happen.
|
||||
//
|
||||
// - No support for the following (optional) features: info type,
|
||||
// stateset type, gaugehistogram type.
|
||||
//
|
||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||
//
|
||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||
// with a `NaN` value.)
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
|
||||
toOM := encoderOption{}
|
||||
for _, option := range options {
|
||||
option(&toOM)
|
||||
}
|
||||
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
@@ -95,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
|
||||
var (
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
shortName = name
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
compliantName = name
|
||||
)
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||
shortName = name[:len(name)-6]
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
|
||||
compliantName = name[:len(name)-6]
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
|
||||
compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
@@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
@@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, shortName)
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
@@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if toOM.withUnit && in.Unit != nil {
|
||||
n, err = w.WriteString("# UNIT ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeName(w, compliantName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Unit, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var createdTsBytesWritten int
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
|
||||
compliantName = compliantName + "_total"
|
||||
}
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
"expected counter in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
// Note that we have ensured above that either the name
|
||||
// ends on `_total` or that the rendered type is
|
||||
// `unknown`. Therefore, no `_total` must be added here.
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Counter.GetValue(), 0, false,
|
||||
metric.Counter.Exemplar,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
"expected gauge in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
"expected untyped in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
w, compliantName, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
"expected summary in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric,
|
||||
w, compliantName, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(), 0, false,
|
||||
nil,
|
||||
@@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
@@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Summary.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
"expected histogram in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
0, b.GetCumulativeCount(), true,
|
||||
b.Exemplar,
|
||||
@@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
w, compliantName, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
@@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
w, compliantName, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
@@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
w, compliantName, "_count", metric, "", 0,
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
|
||||
createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
|
||||
n += createdTsBytesWritten
|
||||
}
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
"unexpected type in metric %s %s", compliantName, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
@@ -350,7 +445,7 @@ func writeOpenMetricsSample(
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
if exemplar != nil {
|
||||
if exemplar != nil && len(exemplar.Label) > 0 {
|
||||
n, err = writeExemplar(w, exemplar)
|
||||
written += n
|
||||
if err != nil {
|
||||
@@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs(
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsCreated writes the created timestamp for a single time series
|
||||
// following OpenMetrics text format to w, given the metric name, the metric proto
|
||||
// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
|
||||
// an additional label name with a float64 value (use empty string as label name if
|
||||
// not required) and the timestamp that represents the created timestamp.
|
||||
// The function returns the number of bytes written and any error encountered.
|
||||
func writeOpenMetricsCreated(w enhancedWriter,
|
||||
name, suffixToTrim string, metric *dto.Metric,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
createdTimestamp *timestamppb.Timestamp,
|
||||
) (int, error) {
|
||||
written := 0
|
||||
n, err := writeOpenMetricsNameAndLabelPairs(
|
||||
w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
// TODO(beorn7): Format this directly from components of ts to
|
||||
// avoid overflow/underflow and precision issues of the float
|
||||
// conversion.
|
||||
n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||
|
||||
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
@@ -1,67 +0,0 @@
|
||||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
||||
160
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
160
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
@@ -1,160 +0,0 @@
|
||||
/*
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
27
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
27
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
|
||||
// Status returns the status of the alert.
|
||||
func (a *Alert) Status() AlertStatus {
|
||||
if a.Resolved() {
|
||||
return a.StatusAt(time.Now())
|
||||
}
|
||||
|
||||
// StatusAt returns the status of the alert at the given timestamp.
|
||||
func (a *Alert) StatusAt(ts time.Time) AlertStatus {
|
||||
if a.ResolvedAt(ts) {
|
||||
return AlertResolved
|
||||
}
|
||||
return AlertFiring
|
||||
@@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HasFiringAt returns true iff one of the alerts is not resolved
|
||||
// at the time ts.
|
||||
func (as Alerts) HasFiringAt(ts time.Time) bool {
|
||||
for _, a := range as {
|
||||
if !a.ResolvedAt(ts) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||
func (as Alerts) Status() AlertStatus {
|
||||
if as.HasFiring() {
|
||||
@@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus {
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
||||
|
||||
// StatusAt returns StatusFiring iff at least one of the alerts is firing
|
||||
// at the time ts.
|
||||
func (as Alerts) StatusAt(ts time.Time) AlertStatus {
|
||||
if as.HasFiringAt(ts) {
|
||||
return AlertFiring
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
||||
|
||||
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
11
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
@@ -17,7 +17,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
@@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
|
||||
return result
|
||||
}
|
||||
|
||||
func (l LabelSet) String() string {
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for l, v := range l {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
|
||||
}
|
||||
|
||||
sort.Strings(lstrs)
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
|
||||
// Fingerprint returns the LabelSet's fingerprint.
|
||||
func (ls LabelSet) Fingerprint() Fingerprint {
|
||||
return labelSetToFingerprint(ls)
|
||||
|
||||
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/common/model/labelset_string.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
|
||||
func (l LabelSet) String() string {
|
||||
var lna [32]string // On stack to avoid memory allocation for sorting names.
|
||||
labelNames := lna[:0]
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
slices.Sort(labelNames)
|
||||
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
|
||||
b := bytes.NewBuffer(bytea[:0])
|
||||
b.WriteByte('{')
|
||||
for i, name := range labelNames {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(name)
|
||||
b.WriteByte('=')
|
||||
b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.String()
|
||||
}
|
||||
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/common/model/labelset_string_go120.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.21
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// String was optimized using functions not available for go 1.20
|
||||
// or lower. We keep the old implementation for compatibility with client_golang.
|
||||
// Once client golang drops support for go 1.20 (scheduled for August 2024), this
|
||||
// file can be removed.
|
||||
func (l LabelSet) String() string {
|
||||
labelNames := make([]string, 0, len(l))
|
||||
for name := range l {
|
||||
labelNames = append(labelNames, string(name))
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for _, name := range labelNames {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
|
||||
}
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
1
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
|
||||
out := &dto.MetricFamily{
|
||||
Help: v.Help,
|
||||
Type: v.Type,
|
||||
Unit: v.Unit,
|
||||
}
|
||||
|
||||
// If the name is nil, copy as-is, don't try to escape.
|
||||
|
||||
7
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
7
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@@ -1,9 +1,16 @@
|
||||
---
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- godot
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linter-settings:
|
||||
godot:
|
||||
|
||||
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
3
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
@@ -1,2 +1,3 @@
|
||||
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
|
||||
* Paul Gier <pgier@redhat.com> @pgier
|
||||
* Paul Gier <paulgier@gmail.com> @pgier
|
||||
* Ben Kochie <superq@gmail.com> @SuperQ
|
||||
|
||||
26
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
26
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@@ -49,23 +49,23 @@ endif
|
||||
GOTEST := $(GO) test
|
||||
GOTEST_DIR :=
|
||||
ifneq ($(CIRCLE_JOB),)
|
||||
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||
ifneq ($(shell command -v gotestsum 2> /dev/null),)
|
||||
GOTEST_DIR := test-results
|
||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.15.0
|
||||
PROMU_VERSION ?= 0.17.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifneq (,$(SKIP_GOLANGCI_LINT))
|
||||
@@ -169,16 +169,20 @@ common-vet:
|
||||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-lint-fix
|
||||
common-lint-fix: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint fix"
|
||||
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-yamllint
|
||||
common-yamllint:
|
||||
@echo ">> running yamllint on all YAML files in the repository"
|
||||
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||
ifeq (, $(shell command -v yamllint 2> /dev/null))
|
||||
@echo "yamllint not installed so skipping"
|
||||
else
|
||||
yamllint .
|
||||
@@ -204,6 +208,10 @@ common-tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
.PHONY: common-docker-repo-name
|
||||
common-docker-repo-name:
|
||||
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
||||
|
||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
6
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@@ -55,7 +55,7 @@ type ARPEntry struct {
|
||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
||||
return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
|
||||
}
|
||||
|
||||
return parseARPEntries(data)
|
||||
@@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
||||
} else if width == expectedDataWidth {
|
||||
entry, err := parseARPEntry(columns)
|
||||
if err != nil {
|
||||
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
||||
return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
} else {
|
||||
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
||||
return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
6
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
@@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
|
||||
}
|
||||
|
||||
node := strings.TrimRight(parts[1], ",")
|
||||
zone := strings.TrimRight(parts[3], ",")
|
||||
node := strings.TrimSuffix(parts[1], ",")
|
||||
zone := strings.TrimSuffix(parts[3], ",")
|
||||
arraySize := len(parts[4:])
|
||||
|
||||
if bucketCount == -1 {
|
||||
@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||
for i := 0; i < arraySize; i++ {
|
||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
||||
return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
4
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
|
||||
if !match || !strings.Contains(firstLine, ":") {
|
||||
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
|
||||
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
@@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
|
||||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
6
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
@@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
|
||||
path := fs.proc.Path("crypto")
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
|
||||
|
||||
}
|
||||
|
||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
|
||||
}
|
||||
|
||||
return crypto, nil
|
||||
@@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
|
||||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
4
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
||||
|
||||
m, err := parseFscacheinfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
|
||||
return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
@@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
||||
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
||||
var err error
|
||||
if len(fields) < len(setFields) {
|
||||
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
||||
return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
|
||||
}
|
||||
|
||||
for i := range setFields {
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
6
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
|
||||
case 46:
|
||||
ip = net.ParseIP(s[1:40])
|
||||
if ip == nil {
|
||||
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
||||
return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
|
||||
}
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
||||
return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
|
||||
}
|
||||
|
||||
portString := s[len(s)-4:]
|
||||
if len(portString) != 4 {
|
||||
return nil, 0,
|
||||
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
||||
fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
|
||||
}
|
||||
port, err := strconv.ParseUint(portString, 16, 16)
|
||||
if err != nil {
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
@@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
|
||||
for i, load := range parts[0:3] {
|
||||
loads[i], err = strconv.ParseFloat(load, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
|
||||
}
|
||||
}
|
||||
return &LoadAvg{
|
||||
|
||||
60
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
60
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
var (
|
||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
|
||||
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
||||
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
||||
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
||||
@@ -50,6 +50,8 @@ type MDStat struct {
|
||||
BlocksTotal int64
|
||||
// Number of blocks on the device that are in sync.
|
||||
BlocksSynced int64
|
||||
// Number of blocks on the device that need to be synced.
|
||||
BlocksToBeSynced int64
|
||||
// progress percentage of current sync
|
||||
BlocksSyncedPct float64
|
||||
// estimated finishing time for current sync (in minutes)
|
||||
@@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
|
||||
}
|
||||
mdstat, err := parseMDStat(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
|
||||
}
|
||||
return mdstat, nil
|
||||
}
|
||||
@@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
|
||||
deviceFields := strings.Fields(line)
|
||||
if len(deviceFields) < 3 {
|
||||
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
|
||||
return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
|
||||
}
|
||||
mdName := deviceFields[0] // mdx
|
||||
state := deviceFields[2] // active or inactive
|
||||
@@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
|
||||
}
|
||||
|
||||
syncLineIdx := i + 2
|
||||
@@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
|
||||
// If device is syncing at the moment, get the number of currently
|
||||
// synced bytes, otherwise that number equals the size of the device.
|
||||
syncedBlocks := size
|
||||
blocksSynced := size
|
||||
blocksToBeSynced := size
|
||||
speed := float64(0)
|
||||
finish := float64(0)
|
||||
pct := float64(0)
|
||||
@@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
// Handle case when resync=PENDING or resync=DELAYED.
|
||||
if strings.Contains(lines[syncLineIdx], "PENDING") ||
|
||||
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||
syncedBlocks = 0
|
||||
blocksSynced = 0
|
||||
} else {
|
||||
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
DisksSpare: spare,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
BlocksSynced: blocksSynced,
|
||||
BlocksToBeSynced: blocksToBeSynced,
|
||||
BlocksSyncedPct: pct,
|
||||
BlocksSyncedFinishTime: finish,
|
||||
BlocksSyncedSpeed: speed,
|
||||
@@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||
statusFields := strings.Fields(statusLine)
|
||||
if len(statusFields) < 1 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
sizeStr := statusFields[0]
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||
@@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
|
||||
|
||||
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||
if len(matches) != 5 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
|
||||
}
|
||||
|
||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
|
||||
}
|
||||
down = int64(strings.Count(matches[4], "_"))
|
||||
|
||||
return active, total, down, size, nil
|
||||
}
|
||||
|
||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
|
||||
func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
|
||||
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
|
||||
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
|
||||
}
|
||||
|
||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
blocks := strings.Split(matches[1], "/")
|
||||
blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
|
||||
return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
|
||||
}
|
||||
|
||||
blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
|
||||
if err != nil {
|
||||
return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
|
||||
}
|
||||
|
||||
// Get percentage complete
|
||||
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
|
||||
}
|
||||
|
||||
// Get time expected left to complete
|
||||
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
finish, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
|
||||
}
|
||||
|
||||
// Get recovery speed
|
||||
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
|
||||
}
|
||||
speed, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
|
||||
}
|
||||
|
||||
return syncedBlocks, pct, finish, speed, nil
|
||||
return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
|
||||
}
|
||||
|
||||
func evalComponentDevices(deviceFields []string) []string {
|
||||
|
||||
220
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
220
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
@@ -126,6 +126,7 @@ type Meminfo struct {
|
||||
VmallocUsed *uint64
|
||||
// largest contiguous block of vmalloc area which is free
|
||||
VmallocChunk *uint64
|
||||
Percpu *uint64
|
||||
HardwareCorrupted *uint64
|
||||
AnonHugePages *uint64
|
||||
ShmemHugePages *uint64
|
||||
@@ -140,6 +141,55 @@ type Meminfo struct {
|
||||
DirectMap4k *uint64
|
||||
DirectMap2M *uint64
|
||||
DirectMap1G *uint64
|
||||
|
||||
// The struct fields below are the byte-normalized counterparts to the
|
||||
// existing struct fields. Values are normalized using the optional
|
||||
// unit field in the meminfo line.
|
||||
MemTotalBytes *uint64
|
||||
MemFreeBytes *uint64
|
||||
MemAvailableBytes *uint64
|
||||
BuffersBytes *uint64
|
||||
CachedBytes *uint64
|
||||
SwapCachedBytes *uint64
|
||||
ActiveBytes *uint64
|
||||
InactiveBytes *uint64
|
||||
ActiveAnonBytes *uint64
|
||||
InactiveAnonBytes *uint64
|
||||
ActiveFileBytes *uint64
|
||||
InactiveFileBytes *uint64
|
||||
UnevictableBytes *uint64
|
||||
MlockedBytes *uint64
|
||||
SwapTotalBytes *uint64
|
||||
SwapFreeBytes *uint64
|
||||
DirtyBytes *uint64
|
||||
WritebackBytes *uint64
|
||||
AnonPagesBytes *uint64
|
||||
MappedBytes *uint64
|
||||
ShmemBytes *uint64
|
||||
SlabBytes *uint64
|
||||
SReclaimableBytes *uint64
|
||||
SUnreclaimBytes *uint64
|
||||
KernelStackBytes *uint64
|
||||
PageTablesBytes *uint64
|
||||
NFSUnstableBytes *uint64
|
||||
BounceBytes *uint64
|
||||
WritebackTmpBytes *uint64
|
||||
CommitLimitBytes *uint64
|
||||
CommittedASBytes *uint64
|
||||
VmallocTotalBytes *uint64
|
||||
VmallocUsedBytes *uint64
|
||||
VmallocChunkBytes *uint64
|
||||
PercpuBytes *uint64
|
||||
HardwareCorruptedBytes *uint64
|
||||
AnonHugePagesBytes *uint64
|
||||
ShmemHugePagesBytes *uint64
|
||||
ShmemPmdMappedBytes *uint64
|
||||
CmaTotalBytes *uint64
|
||||
CmaFreeBytes *uint64
|
||||
HugepagesizeBytes *uint64
|
||||
DirectMap4kBytes *uint64
|
||||
DirectMap2MBytes *uint64
|
||||
DirectMap1GBytes *uint64
|
||||
}
|
||||
|
||||
// Meminfo returns an information about current kernel/system memory statistics.
|
||||
@@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
|
||||
|
||||
m, err := parseMemInfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
|
||||
return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
@@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
||||
var m Meminfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Each line has at least a name and value; we ignore the unit.
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
var val, valBytes uint64
|
||||
|
||||
v, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
val, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// No unit present, use the parsed the value as bytes directly.
|
||||
valBytes = val
|
||||
case 3:
|
||||
// Unit present in optional 3rd field, convert it to
|
||||
// bytes. The only unit supported within the Linux
|
||||
// kernel is `kB`.
|
||||
if fields[2] != "kB" {
|
||||
return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
|
||||
}
|
||||
|
||||
valBytes = 1024 * val
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "MemTotal:":
|
||||
m.MemTotal = &v
|
||||
m.MemTotal = &val
|
||||
m.MemTotalBytes = &valBytes
|
||||
case "MemFree:":
|
||||
m.MemFree = &v
|
||||
m.MemFree = &val
|
||||
m.MemFreeBytes = &valBytes
|
||||
case "MemAvailable:":
|
||||
m.MemAvailable = &v
|
||||
m.MemAvailable = &val
|
||||
m.MemAvailableBytes = &valBytes
|
||||
case "Buffers:":
|
||||
m.Buffers = &v
|
||||
m.Buffers = &val
|
||||
m.BuffersBytes = &valBytes
|
||||
case "Cached:":
|
||||
m.Cached = &v
|
||||
m.Cached = &val
|
||||
m.CachedBytes = &valBytes
|
||||
case "SwapCached:":
|
||||
m.SwapCached = &v
|
||||
m.SwapCached = &val
|
||||
m.SwapCachedBytes = &valBytes
|
||||
case "Active:":
|
||||
m.Active = &v
|
||||
m.Active = &val
|
||||
m.ActiveBytes = &valBytes
|
||||
case "Inactive:":
|
||||
m.Inactive = &v
|
||||
m.Inactive = &val
|
||||
m.InactiveBytes = &valBytes
|
||||
case "Active(anon):":
|
||||
m.ActiveAnon = &v
|
||||
m.ActiveAnon = &val
|
||||
m.ActiveAnonBytes = &valBytes
|
||||
case "Inactive(anon):":
|
||||
m.InactiveAnon = &v
|
||||
m.InactiveAnon = &val
|
||||
m.InactiveAnonBytes = &valBytes
|
||||
case "Active(file):":
|
||||
m.ActiveFile = &v
|
||||
m.ActiveFile = &val
|
||||
m.ActiveFileBytes = &valBytes
|
||||
case "Inactive(file):":
|
||||
m.InactiveFile = &v
|
||||
m.InactiveFile = &val
|
||||
m.InactiveFileBytes = &valBytes
|
||||
case "Unevictable:":
|
||||
m.Unevictable = &v
|
||||
m.Unevictable = &val
|
||||
m.UnevictableBytes = &valBytes
|
||||
case "Mlocked:":
|
||||
m.Mlocked = &v
|
||||
m.Mlocked = &val
|
||||
m.MlockedBytes = &valBytes
|
||||
case "SwapTotal:":
|
||||
m.SwapTotal = &v
|
||||
m.SwapTotal = &val
|
||||
m.SwapTotalBytes = &valBytes
|
||||
case "SwapFree:":
|
||||
m.SwapFree = &v
|
||||
m.SwapFree = &val
|
||||
m.SwapFreeBytes = &valBytes
|
||||
case "Dirty:":
|
||||
m.Dirty = &v
|
||||
m.Dirty = &val
|
||||
m.DirtyBytes = &valBytes
|
||||
case "Writeback:":
|
||||
m.Writeback = &v
|
||||
m.Writeback = &val
|
||||
m.WritebackBytes = &valBytes
|
||||
case "AnonPages:":
|
||||
m.AnonPages = &v
|
||||
m.AnonPages = &val
|
||||
m.AnonPagesBytes = &valBytes
|
||||
case "Mapped:":
|
||||
m.Mapped = &v
|
||||
m.Mapped = &val
|
||||
m.MappedBytes = &valBytes
|
||||
case "Shmem:":
|
||||
m.Shmem = &v
|
||||
m.Shmem = &val
|
||||
m.ShmemBytes = &valBytes
|
||||
case "Slab:":
|
||||
m.Slab = &v
|
||||
m.Slab = &val
|
||||
m.SlabBytes = &valBytes
|
||||
case "SReclaimable:":
|
||||
m.SReclaimable = &v
|
||||
m.SReclaimable = &val
|
||||
m.SReclaimableBytes = &valBytes
|
||||
case "SUnreclaim:":
|
||||
m.SUnreclaim = &v
|
||||
m.SUnreclaim = &val
|
||||
m.SUnreclaimBytes = &valBytes
|
||||
case "KernelStack:":
|
||||
m.KernelStack = &v
|
||||
m.KernelStack = &val
|
||||
m.KernelStackBytes = &valBytes
|
||||
case "PageTables:":
|
||||
m.PageTables = &v
|
||||
m.PageTables = &val
|
||||
m.PageTablesBytes = &valBytes
|
||||
case "NFS_Unstable:":
|
||||
m.NFSUnstable = &v
|
||||
m.NFSUnstable = &val
|
||||
m.NFSUnstableBytes = &valBytes
|
||||
case "Bounce:":
|
||||
m.Bounce = &v
|
||||
m.Bounce = &val
|
||||
m.BounceBytes = &valBytes
|
||||
case "WritebackTmp:":
|
||||
m.WritebackTmp = &v
|
||||
m.WritebackTmp = &val
|
||||
m.WritebackTmpBytes = &valBytes
|
||||
case "CommitLimit:":
|
||||
m.CommitLimit = &v
|
||||
m.CommitLimit = &val
|
||||
m.CommitLimitBytes = &valBytes
|
||||
case "Committed_AS:":
|
||||
m.CommittedAS = &v
|
||||
m.CommittedAS = &val
|
||||
m.CommittedASBytes = &valBytes
|
||||
case "VmallocTotal:":
|
||||
m.VmallocTotal = &v
|
||||
m.VmallocTotal = &val
|
||||
m.VmallocTotalBytes = &valBytes
|
||||
case "VmallocUsed:":
|
||||
m.VmallocUsed = &v
|
||||
m.VmallocUsed = &val
|
||||
m.VmallocUsedBytes = &valBytes
|
||||
case "VmallocChunk:":
|
||||
m.VmallocChunk = &v
|
||||
m.VmallocChunk = &val
|
||||
m.VmallocChunkBytes = &valBytes
|
||||
case "Percpu:":
|
||||
m.Percpu = &val
|
||||
m.PercpuBytes = &valBytes
|
||||
case "HardwareCorrupted:":
|
||||
m.HardwareCorrupted = &v
|
||||
m.HardwareCorrupted = &val
|
||||
m.HardwareCorruptedBytes = &valBytes
|
||||
case "AnonHugePages:":
|
||||
m.AnonHugePages = &v
|
||||
m.AnonHugePages = &val
|
||||
m.AnonHugePagesBytes = &valBytes
|
||||
case "ShmemHugePages:":
|
||||
m.ShmemHugePages = &v
|
||||
m.ShmemHugePages = &val
|
||||
m.ShmemHugePagesBytes = &valBytes
|
||||
case "ShmemPmdMapped:":
|
||||
m.ShmemPmdMapped = &v
|
||||
m.ShmemPmdMapped = &val
|
||||
m.ShmemPmdMappedBytes = &valBytes
|
||||
case "CmaTotal:":
|
||||
m.CmaTotal = &v
|
||||
m.CmaTotal = &val
|
||||
m.CmaTotalBytes = &valBytes
|
||||
case "CmaFree:":
|
||||
m.CmaFree = &v
|
||||
m.CmaFree = &val
|
||||
m.CmaFreeBytes = &valBytes
|
||||
case "HugePages_Total:":
|
||||
m.HugePagesTotal = &v
|
||||
m.HugePagesTotal = &val
|
||||
case "HugePages_Free:":
|
||||
m.HugePagesFree = &v
|
||||
m.HugePagesFree = &val
|
||||
case "HugePages_Rsvd:":
|
||||
m.HugePagesRsvd = &v
|
||||
m.HugePagesRsvd = &val
|
||||
case "HugePages_Surp:":
|
||||
m.HugePagesSurp = &v
|
||||
m.HugePagesSurp = &val
|
||||
case "Hugepagesize:":
|
||||
m.Hugepagesize = &v
|
||||
m.Hugepagesize = &val
|
||||
m.HugepagesizeBytes = &valBytes
|
||||
case "DirectMap4k:":
|
||||
m.DirectMap4k = &v
|
||||
m.DirectMap4k = &val
|
||||
m.DirectMap4kBytes = &valBytes
|
||||
case "DirectMap2M:":
|
||||
m.DirectMap2M = &v
|
||||
m.DirectMap2M = &val
|
||||
m.DirectMap2MBytes = &valBytes
|
||||
case "DirectMap1G:":
|
||||
m.DirectMap1G = &v
|
||||
m.DirectMap1G = &val
|
||||
m.DirectMap1GBytes = &valBytes
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
2
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
@@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
|
||||
if mountInfo[6] != "" {
|
||||
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
|
||||
}
|
||||
}
|
||||
return mount, nil
|
||||
|
||||
11
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
11
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@@ -88,7 +88,7 @@ type MountStatsNFS struct {
|
||||
// Statistics broken down by filesystem operation.
|
||||
Operations []NFSOperationStats
|
||||
// Statistics about the NFS RPC transport.
|
||||
Transport NFSTransportStats
|
||||
Transport []NFSTransportStats
|
||||
}
|
||||
|
||||
// mountStats implements MountStats.
|
||||
@@ -194,8 +194,6 @@ type NFSOperationStats struct {
|
||||
CumulativeTotalResponseMilliseconds uint64
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestMilliseconds uint64
|
||||
// The average time from the point the client sends RPC requests until it receives the response.
|
||||
AverageRTTMilliseconds float64
|
||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||
Errors uint64
|
||||
}
|
||||
@@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Transport = *tstats
|
||||
stats.Transport = append(stats.Transport, *tstats)
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
@@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
CumulativeTotalResponseMilliseconds: ns[6],
|
||||
CumulativeTotalRequestMilliseconds: ns[7],
|
||||
}
|
||||
if ns[0] != 0 {
|
||||
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
|
||||
}
|
||||
|
||||
if len(ns) > 8 {
|
||||
opStats.Errors = ns[8]
|
||||
@@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
||||
return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
|
||||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
@@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
||||
|
||||
stat, err := parseConntrackStat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
@@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||
entries, err := util.ParseHexUint64s(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
|
||||
}
|
||||
numEntries := len(entries)
|
||||
if numEntries < 16 || numEntries > 17 {
|
||||
|
||||
46
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
46
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@@ -50,10 +50,13 @@ type (
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
||||
Drops *uint64
|
||||
}
|
||||
|
||||
// netIPSocketLine represents the fields parsed from a single line
|
||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netIPSocketLine struct {
|
||||
Sl uint64
|
||||
@@ -66,6 +69,7 @@ type (
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
Inode uint64
|
||||
Drops *uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocket NetIPSocket
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
|
||||
defer f.Close()
|
||||
|
||||
var netIPSocketSummary NetIPSocketSummary
|
||||
var udpPacketDrops uint64
|
||||
isUDP := strings.Contains(file, "udp")
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetIPSocketLine(fields)
|
||||
line, err := parseNetIPSocketLine(fields, isUDP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netIPSocketSummary.TxQueueLength += line.TxQueue
|
||||
netIPSocketSummary.RxQueueLength += line.RxQueue
|
||||
netIPSocketSummary.UsedSockets++
|
||||
if isUDP {
|
||||
udpPacketDrops += *line.Drops
|
||||
netIPSocketSummary.Drops = &udpPacketDrops
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
@@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
|
||||
var byteIP []byte
|
||||
byteIP, err := hex.DecodeString(hexIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
|
||||
}
|
||||
switch len(byteIP) {
|
||||
case 4:
|
||||
@@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
|
||||
}
|
||||
return i, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
|
||||
return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
|
||||
line := &netIPSocketLine{}
|
||||
if len(fields) < 10 {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
@@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
return nil, err
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
@@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
return nil, err
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
@@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
|
||||
}
|
||||
|
||||
// inode
|
||||
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
|
||||
}
|
||||
|
||||
// drops
|
||||
if isUDP {
|
||||
drops, err := strconv.ParseUint(fields[12], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
|
||||
}
|
||||
line.Drops = &drops
|
||||
}
|
||||
|
||||
return line, nil
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
@@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
|
||||
|
||||
stat, err := parseSockstat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
|
||||
return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
@@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
|
||||
// The remaining fields are key/value pairs.
|
||||
kvs, err := parseSockstatKVs(fields[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
||||
return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
|
||||
}
|
||||
|
||||
// The first field is the protocol. We must trim its colon suffix.
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
2
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
|
||||
|
||||
entries, err := parseSoftnet(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
|
||||
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
119
vendor/github.com/prometheus/procfs/net_tls_stat.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2023 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TLSStat struct represents data in /proc/net/tls_stat.
|
||||
// See https://docs.kernel.org/networking/tls.html#statistics
|
||||
type TLSStat struct {
|
||||
// number of TX sessions currently installed where host handles cryptography
|
||||
TLSCurrTxSw int
|
||||
// number of RX sessions currently installed where host handles cryptography
|
||||
TLSCurrRxSw int
|
||||
// number of TX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrTxDevice int
|
||||
// number of RX sessions currently installed where NIC handles cryptography
|
||||
TLSCurrRxDevice int
|
||||
//number of TX sessions opened with host cryptography
|
||||
TLSTxSw int
|
||||
//number of RX sessions opened with host cryptography
|
||||
TLSRxSw int
|
||||
// number of TX sessions opened with NIC cryptography
|
||||
TLSTxDevice int
|
||||
// number of RX sessions opened with NIC cryptography
|
||||
TLSRxDevice int
|
||||
// record decryption failed (e.g. due to incorrect authentication tag)
|
||||
TLSDecryptError int
|
||||
// number of RX resyncs sent to NICs handling cryptography
|
||||
TLSRxDeviceResync int
|
||||
// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
|
||||
TLSDecryptRetry int
|
||||
// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
|
||||
TLSRxNoPadViolation int
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func NewTLSStat() (TLSStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewTLSStat()
|
||||
}
|
||||
|
||||
// NewTLSStat reads the tls_stat statistics.
|
||||
func (fs FS) NewTLSStat() (TLSStat, error) {
|
||||
file, err := os.Open(fs.proc.Path("net/tls_stat"))
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
tlsstat = TLSStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return TLSStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "TlsCurrTxSw":
|
||||
tlsstat.TLSCurrTxSw = value
|
||||
case "TlsCurrRxSw":
|
||||
tlsstat.TLSCurrRxSw = value
|
||||
case "TlsCurrTxDevice":
|
||||
tlsstat.TLSCurrTxDevice = value
|
||||
case "TlsCurrRxDevice":
|
||||
tlsstat.TLSCurrRxDevice = value
|
||||
case "TlsTxSw":
|
||||
tlsstat.TLSTxSw = value
|
||||
case "TlsRxSw":
|
||||
tlsstat.TLSRxSw = value
|
||||
case "TlsTxDevice":
|
||||
tlsstat.TLSTxDevice = value
|
||||
case "TlsRxDevice":
|
||||
tlsstat.TLSRxDevice = value
|
||||
case "TlsDecryptError":
|
||||
tlsstat.TLSDecryptError = value
|
||||
case "TlsRxDeviceResync":
|
||||
tlsstat.TLSRxDeviceResync = value
|
||||
case "TlsDecryptRetry":
|
||||
tlsstat.TLSDecryptRetry = value
|
||||
case "TlsRxNoPadViolation":
|
||||
tlsstat.TLSRxNoPadViolation = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return tlsstat, s.Err()
|
||||
}
|
||||
14
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
14
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
line := s.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
nu.Rows = append(nu.Rows, item)
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return &nu, nil
|
||||
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
||||
|
||||
users, err := u.parseUsers(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
|
||||
return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
|
||||
}
|
||||
|
||||
flags, err := u.parseFlags(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
||||
return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
|
||||
}
|
||||
|
||||
typ, err := u.parseType(fields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
||||
return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
|
||||
}
|
||||
|
||||
state, err := u.parseState(fields[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
||||
return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
|
||||
}
|
||||
|
||||
var inode uint64
|
||||
if hasInode {
|
||||
inode, err = u.parseInode(fields[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
||||
return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
22
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
22
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
@@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
|
||||
|
||||
m, err := parseWireless(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
|
||||
return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
@@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
||||
|
||||
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
||||
return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
|
||||
}
|
||||
|
||||
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
||||
return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
|
||||
}
|
||||
|
||||
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
||||
return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
|
||||
}
|
||||
|
||||
dnwid, err := strconv.Atoi(stats[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
|
||||
}
|
||||
|
||||
dcrypt, err := strconv.Atoi(stats[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
|
||||
}
|
||||
|
||||
dfrag, err := strconv.Atoi(stats[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
|
||||
}
|
||||
|
||||
dretry, err := strconv.Atoi(stats[7])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
|
||||
}
|
||||
|
||||
dmisc, err := strconv.Atoi(stats[8])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
||||
return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
|
||||
}
|
||||
|
||||
mbeacon, err := strconv.Atoi(stats[9])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
||||
return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
|
||||
}
|
||||
|
||||
w := &Wireless{
|
||||
@@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
||||
return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
|
||||
}
|
||||
|
||||
return interfaces, nil
|
||||
|
||||
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
}
|
||||
|
||||
p := Procs{}
|
||||
@@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||
return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
|
||||
}
|
||||
|
||||
// Wchan returns the wchan (wait channel) of a process.
|
||||
@@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||
for i, n := range names {
|
||||
fd, err := strconv.ParseInt(n, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
||||
return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
|
||||
}
|
||||
fds[i] = uintptr(fd)
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
|
||||
}
|
||||
i, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
|
||||
return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
4
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
|
||||
return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
|
||||
}
|
||||
|
||||
ns := make(Namespaces, len(names))
|
||||
@@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
|
||||
typ := fields[0]
|
||||
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
|
||||
return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
|
||||
}
|
||||
|
||||
ns[name] = Namespace{typ, uint32(inode)}
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
@@ -61,7 +61,7 @@ type PSIStats struct {
|
||||
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||
if err != nil {
|
||||
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||
return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
|
||||
}
|
||||
|
||||
return parsePSIStats(bytes.NewReader(data))
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||
}
|
||||
|
||||
v := strings.TrimSpace(kv[1])
|
||||
v = strings.TrimRight(v, " kB")
|
||||
v = strings.TrimSuffix(v, " kB")
|
||||
|
||||
vKBytes, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
|
||||
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
7
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@@ -110,6 +110,11 @@ type ProcStat struct {
|
||||
Policy uint
|
||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||
DelayAcctBlkIOTicks uint64
|
||||
// Guest time of the process (time spent running a virtual CPU for a guest
|
||||
// operating system), measured in clock ticks.
|
||||
GuestTime int
|
||||
// Guest time of the process's children, measured in clock ticks.
|
||||
CGuestTime int
|
||||
|
||||
proc FS
|
||||
}
|
||||
@@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
||||
&s.RTPriority,
|
||||
&s.Policy,
|
||||
&s.DelayAcctBlkIOTicks,
|
||||
&s.GuestTime,
|
||||
&s.CGuestTime,
|
||||
)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
|
||||
29
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
29
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@@ -15,6 +15,7 @@ package procfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/bits"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -76,9 +77,9 @@ type ProcStatus struct {
|
||||
NonVoluntaryCtxtSwitches uint64
|
||||
|
||||
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
|
||||
UIDs [4]string
|
||||
UIDs [4]uint64
|
||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||
GIDs [4]string
|
||||
GIDs [4]uint64
|
||||
|
||||
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
||||
CpusAllowedList []uint64
|
||||
@@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
|
||||
// convert kB to B
|
||||
vBytes := vKBytes * 1024
|
||||
|
||||
s.fillStatus(k, v, vKBytes, vBytes)
|
||||
err = s.fillStatus(k, v, vKBytes, vBytes)
|
||||
if err != nil {
|
||||
return ProcStatus{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
|
||||
switch k {
|
||||
case "Tgid":
|
||||
s.TGID = int(vUint)
|
||||
case "Name":
|
||||
s.Name = vString
|
||||
case "Uid":
|
||||
copy(s.UIDs[:], strings.Split(vString, "\t"))
|
||||
var err error
|
||||
for i, v := range strings.Split(vString, "\t") {
|
||||
s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "Gid":
|
||||
copy(s.GIDs[:], strings.Split(vString, "\t"))
|
||||
var err error
|
||||
for i, v := range strings.Split(vString, "\t") {
|
||||
s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "NSpid":
|
||||
s.NSpids = calcNSPidsList(vString)
|
||||
case "VmPeak":
|
||||
@@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||
s.CpusAllowedList = calcCpusAllowedList(vString)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TotalCtxtSwitches returns the total context switch.
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
@@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
|
||||
vp := util.NewValueParser(f)
|
||||
values[i] = vp.Int()
|
||||
if err := vp.Err(); err != nil {
|
||||
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
||||
return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
|
||||
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
@@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
@@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
@@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
@@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
@@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
@@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
@@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
@@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
@@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
@@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
|
||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
|
||||
}
|
||||
|
||||
return softirqs, scanner.Err()
|
||||
|
||||
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
22
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
|
||||
}
|
||||
if count == 0 {
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
|
||||
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||
|
||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||
if err != nil {
|
||||
return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
||||
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
return cpuStat, cpuID, nil
|
||||
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||
|
||||
if err != nil {
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
|
||||
}
|
||||
|
||||
return softIRQStat, total, nil
|
||||
@@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
||||
switch {
|
||||
case parts[0] == "btime":
|
||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "intr":
|
||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
numberedIRQs := parts[2:]
|
||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||
for i, count := range numberedIRQs {
|
||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "ctxt":
|
||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "processes":
|
||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_running":
|
||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_blocked":
|
||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
|
||||
}
|
||||
case parts[0] == "softirq":
|
||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||
@@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
||||
return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
|
||||
6
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
6
vendor/github.com/prometheus/procfs/swaps.go
generated
vendored
@@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
|
||||
|
||||
swap.Size, err = strconv.Atoi(swapFields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
|
||||
}
|
||||
swap.Used, err = strconv.Atoi(swapFields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
|
||||
}
|
||||
swap.Priority, err = strconv.Atoi(swapFields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
||||
return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
|
||||
}
|
||||
|
||||
return swap, nil
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
2
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
@@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
|
||||
return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
|
||||
}
|
||||
|
||||
t := Procs{}
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
4
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
@@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
||||
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
||||
return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
|
||||
}
|
||||
zoneinfo, err := parseZoneinfo(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
||||
return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
|
||||
}
|
||||
return zoneinfo, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/stretchr/objx/README.md
generated
vendored
8
vendor/github.com/stretchr/objx/README.md
generated
vendored
@@ -4,20 +4,20 @@
|
||||
[](https://codeclimate.com/github/stretchr/objx/maintainability)
|
||||
[](https://codeclimate.com/github/stretchr/objx/test_coverage)
|
||||
[](https://sourcegraph.com/github.com/stretchr/objx)
|
||||
[](https://godoc.org/github.com/stretchr/objx)
|
||||
[](https://pkg.go.dev/github.com/stretchr/objx)
|
||||
|
||||
Objx - Go package for dealing with maps, slices, JSON and other data.
|
||||
|
||||
Get started:
|
||||
|
||||
- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date)
|
||||
- Check out the API Documentation http://godoc.org/github.com/stretchr/objx
|
||||
- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx
|
||||
|
||||
## Overview
|
||||
Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc.
|
||||
|
||||
### Pattern
|
||||
Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going:
|
||||
Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going:
|
||||
|
||||
m, err := objx.FromJSON(json)
|
||||
|
||||
@@ -74,7 +74,7 @@ To update Objx to the latest version, run:
|
||||
go get -u github.com/stretchr/objx
|
||||
|
||||
### Supported go versions
|
||||
We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment.
|
||||
We currently support the three recent major Go versions.
|
||||
|
||||
## Contributing
|
||||
Please feel free to submit issues, fork the repository and send pull requests!
|
||||
|
||||
5
vendor/github.com/stretchr/objx/Taskfile.yml
generated
vendored
5
vendor/github.com/stretchr/objx/Taskfile.yml
generated
vendored
@@ -1,7 +1,4 @@
|
||||
version: '2'
|
||||
|
||||
env:
|
||||
GOFLAGS: -mod=vendor
|
||||
version: '3'
|
||||
|
||||
tasks:
|
||||
default:
|
||||
|
||||
24
vendor/github.com/stretchr/objx/accessors.go
generated
vendored
24
vendor/github.com/stretchr/objx/accessors.go
generated
vendored
@@ -14,17 +14,17 @@ const (
|
||||
// For example, `location.address.city`
|
||||
PathSeparator string = "."
|
||||
|
||||
// arrayAccesRegexString is the regex used to extract the array number
|
||||
// arrayAccessRegexString is the regex used to extract the array number
|
||||
// from the access path
|
||||
arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
|
||||
arrayAccessRegexString = `^(.+)\[([0-9]+)\]$`
|
||||
|
||||
// mapAccessRegexString is the regex used to extract the map key
|
||||
// from the access path
|
||||
mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$`
|
||||
)
|
||||
|
||||
// arrayAccesRegex is the compiled arrayAccesRegexString
|
||||
var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
|
||||
// arrayAccessRegex is the compiled arrayAccessRegexString
|
||||
var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString)
|
||||
|
||||
// mapAccessRegex is the compiled mapAccessRegexString
|
||||
var mapAccessRegex = regexp.MustCompile(mapAccessRegexString)
|
||||
@@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString)
|
||||
//
|
||||
// Get can only operate directly on map[string]interface{} and []interface.
|
||||
//
|
||||
// Example
|
||||
// # Example
|
||||
//
|
||||
// To access the title of the third chapter of the second book, do:
|
||||
//
|
||||
// o.Get("books[1].chapters[2].title")
|
||||
// o.Get("books[1].chapters[2].title")
|
||||
func (m Map) Get(selector string) *Value {
|
||||
rawObj := access(m, selector, nil, false)
|
||||
return &Value{data: rawObj}
|
||||
@@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value {
|
||||
//
|
||||
// Set can only operate directly on map[string]interface{} and []interface
|
||||
//
|
||||
// Example
|
||||
// # Example
|
||||
//
|
||||
// To set the title of the third chapter of the second book, do:
|
||||
//
|
||||
// o.Set("books[1].chapters[2].title","Time to Go")
|
||||
// o.Set("books[1].chapters[2].title","Time to Go")
|
||||
func (m Map) Set(selector string, value interface{}) Map {
|
||||
access(m, selector, value, true)
|
||||
return m
|
||||
}
|
||||
|
||||
// getIndex returns the index, which is hold in s by two braches.
|
||||
// It also returns s withour the index part, e.g. name[1] will return (1, name).
|
||||
// getIndex returns the index, which is hold in s by two branches.
|
||||
// It also returns s without the index part, e.g. name[1] will return (1, name).
|
||||
// If no index is found, -1 is returned
|
||||
func getIndex(s string) (int, string) {
|
||||
arrayMatches := arrayAccesRegex.FindStringSubmatch(s)
|
||||
arrayMatches := arrayAccessRegex.FindStringSubmatch(s)
|
||||
if len(arrayMatches) > 0 {
|
||||
// Get the key into the map
|
||||
selector := arrayMatches[1]
|
||||
// Get the index into the array at the key
|
||||
// We know this cannt fail because arrayMatches[2] is an int for sure
|
||||
// We know this can't fail because arrayMatches[2] is an int for sure
|
||||
index, _ := strconv.Atoi(arrayMatches[2])
|
||||
return index, selector
|
||||
}
|
||||
|
||||
4
vendor/github.com/stretchr/objx/conversions.go
generated
vendored
4
vendor/github.com/stretchr/objx/conversions.go
generated
vendored
@@ -15,7 +15,7 @@ import (
|
||||
const SignatureSeparator = "_"
|
||||
|
||||
// URLValuesSliceKeySuffix is the character that is used to
|
||||
// specify a suffic for slices parsed by URLValues.
|
||||
// specify a suffix for slices parsed by URLValues.
|
||||
// If the suffix is set to "[i]", then the index of the slice
|
||||
// is used in place of i
|
||||
// Ex: Suffix "[]" would have the form a[]=b&a[]=c
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
)
|
||||
|
||||
// SetURLValuesSliceKeySuffix sets the character that is used to
|
||||
// specify a suffic for slices parsed by URLValues.
|
||||
// specify a suffix for slices parsed by URLValues.
|
||||
// If the suffix is set to "[i]", then the index of the slice
|
||||
// is used in place of i
|
||||
// Ex: Suffix "[]" would have the form a[]=b&a[]=c
|
||||
|
||||
44
vendor/github.com/stretchr/objx/doc.go
generated
vendored
44
vendor/github.com/stretchr/objx/doc.go
generated
vendored
@@ -1,19 +1,19 @@
|
||||
/*
|
||||
Objx - Go package for dealing with maps, slices, JSON and other data.
|
||||
Package objx provides utilities for dealing with maps, slices, JSON and other data.
|
||||
|
||||
Overview
|
||||
# Overview
|
||||
|
||||
Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
|
||||
a powerful `Get` method (among others) that allows you to easily and quickly get
|
||||
access to data within the map, without having to worry too much about type assertions,
|
||||
missing data, default values etc.
|
||||
|
||||
Pattern
|
||||
# Pattern
|
||||
|
||||
Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy.
|
||||
Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy.
|
||||
Call one of the `objx.` functions to create your `objx.Map` to get going:
|
||||
|
||||
m, err := objx.FromJSON(json)
|
||||
m, err := objx.FromJSON(json)
|
||||
|
||||
NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
|
||||
the rest will be optimistic and try to figure things out without panicking.
|
||||
@@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking.
|
||||
Use `Get` to access the value you're interested in. You can use dot and array
|
||||
notation too:
|
||||
|
||||
m.Get("places[0].latlng")
|
||||
m.Get("places[0].latlng")
|
||||
|
||||
Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type.
|
||||
|
||||
if m.Get("code").IsStr() { // Your code... }
|
||||
if m.Get("code").IsStr() { // Your code... }
|
||||
|
||||
Or you can just assume the type, and use one of the strong type methods to extract the real value:
|
||||
|
||||
m.Get("code").Int()
|
||||
m.Get("code").Int()
|
||||
|
||||
If there's no value there (or if it's the wrong type) then a default value will be returned,
|
||||
or you can be explicit about the default value.
|
||||
|
||||
Get("code").Int(-1)
|
||||
Get("code").Int(-1)
|
||||
|
||||
If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating,
|
||||
manipulating and selecting that data. You can find out more by exploring the index below.
|
||||
|
||||
Reading data
|
||||
# Reading data
|
||||
|
||||
A simple example of how to use Objx:
|
||||
|
||||
// Use MustFromJSON to make an objx.Map from some JSON
|
||||
m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
|
||||
// Use MustFromJSON to make an objx.Map from some JSON
|
||||
m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
|
||||
|
||||
// Get the details
|
||||
name := m.Get("name").Str()
|
||||
age := m.Get("age").Int()
|
||||
// Get the details
|
||||
name := m.Get("name").Str()
|
||||
age := m.Get("age").Int()
|
||||
|
||||
// Get their nickname (or use their name if they don't have one)
|
||||
nickname := m.Get("nickname").Str(name)
|
||||
// Get their nickname (or use their name if they don't have one)
|
||||
nickname := m.Get("nickname").Str(name)
|
||||
|
||||
Ranging
|
||||
# Ranging
|
||||
|
||||
Since `objx.Map` is a `map[string]interface{}` you can treat it as such.
|
||||
For example, to `range` the data, do what you would expect:
|
||||
|
||||
m := objx.MustFromJSON(json)
|
||||
for key, value := range m {
|
||||
// Your code...
|
||||
}
|
||||
m := objx.MustFromJSON(json)
|
||||
for key, value := range m {
|
||||
// Your code...
|
||||
}
|
||||
*/
|
||||
package objx
|
||||
|
||||
9
vendor/github.com/stretchr/objx/map.go
generated
vendored
9
vendor/github.com/stretchr/objx/map.go
generated
vendored
@@ -47,17 +47,16 @@ func New(data interface{}) Map {
|
||||
//
|
||||
// The arguments follow a key, value pattern.
|
||||
//
|
||||
//
|
||||
// Returns nil if any key argument is non-string or if there are an odd number of arguments.
|
||||
//
|
||||
// Example
|
||||
// # Example
|
||||
//
|
||||
// To easily create Maps:
|
||||
//
|
||||
// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
|
||||
// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
|
||||
//
|
||||
// // creates an Map equivalent to
|
||||
// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}}
|
||||
// // creates an Map equivalent to
|
||||
// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}}
|
||||
func MSI(keyAndValuePairs ...interface{}) Map {
|
||||
newMap := Map{}
|
||||
keyAndValuePairsLen := len(keyAndValuePairs)
|
||||
|
||||
28
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
28
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
@@ -28,6 +28,8 @@ var (
|
||||
uint32Type = reflect.TypeOf(uint32(1))
|
||||
uint64Type = reflect.TypeOf(uint64(1))
|
||||
|
||||
uintptrType = reflect.TypeOf(uintptr(1))
|
||||
|
||||
float32Type = reflect.TypeOf(float32(1))
|
||||
float64Type = reflect.TypeOf(float64(1))
|
||||
|
||||
@@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
case reflect.Struct:
|
||||
{
|
||||
// All structs enter here. We're not interested in most types.
|
||||
if !canConvert(obj1Value, timeType) {
|
||||
if !obj1Value.CanConvert(timeType) {
|
||||
break
|
||||
}
|
||||
|
||||
// time.Time can compared!
|
||||
// time.Time can be compared!
|
||||
timeObj1, ok := obj1.(time.Time)
|
||||
if !ok {
|
||||
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
|
||||
@@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
case reflect.Slice:
|
||||
{
|
||||
// We only care about the []byte type.
|
||||
if !canConvert(obj1Value, bytesType) {
|
||||
if !obj1Value.CanConvert(bytesType) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
case reflect.Uintptr:
|
||||
{
|
||||
uintptrObj1, ok := obj1.(uintptr)
|
||||
if !ok {
|
||||
uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
|
||||
}
|
||||
uintptrObj2, ok := obj2.(uintptr)
|
||||
if !ok {
|
||||
uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
|
||||
}
|
||||
if uintptrObj1 > uintptrObj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uintptrObj1 == uintptrObj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uintptrObj1 < uintptrObj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return compareEqual, false
|
||||
|
||||
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_legacy.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Wrapper around reflect.Value.CanConvert, for compatibility
|
||||
// reasons.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return value.CanConvert(to)
|
||||
}
|
||||
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_can_convert.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Older versions of Go does not have the reflect.Value.CanConvert
|
||||
// method.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return false
|
||||
}
|
||||
32
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
32
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
@@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package assert
|
||||
|
||||
@@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
|
||||
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
@@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
|
||||
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotNilf asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNilf(t, err, "error message %s", "formatted")
|
||||
@@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
|
||||
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
|
||||
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
||||
59
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
59
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package assert
|
||||
|
||||
@@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
|
||||
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123))
|
||||
@@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
|
||||
@@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
|
||||
return NotErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotImplements asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplements((*MyInterface)(nil), new(MyObject))
|
||||
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplementsf(a.t, interfaceObject, object, msg, args...)
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err)
|
||||
@@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
|
||||
return NotSamef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotSubset asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubset asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
|
||||
// a.NotSubset([1, 3, 4], [1, 2])
|
||||
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
|
||||
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
|
||||
return NotSubset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
|
||||
return Samef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// Subset asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subset asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
|
||||
// a.Subset([1, 2, 3], [1, 2])
|
||||
// a.Subset({"x": 1, "y": 2}, {"x": 1})
|
||||
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
|
||||
return Subset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
||||
207
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
207
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
|
||||
@@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} {
|
||||
return result.Interface()
|
||||
|
||||
case reflect.Array, reflect.Slice:
|
||||
result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
|
||||
var result reflect.Value
|
||||
if expectedKind == reflect.Array {
|
||||
result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
|
||||
} else {
|
||||
result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
|
||||
}
|
||||
for i := 0; i < expectedValue.Len(); i++ {
|
||||
index := expectedValue.Index(i)
|
||||
if isNil(index) {
|
||||
@@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} {
|
||||
// structures.
|
||||
//
|
||||
// This function does no assertion of any kind.
|
||||
//
|
||||
// Deprecated: Use [EqualExportedValues] instead.
|
||||
func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
|
||||
expectedCleaned := copyExportedFields(expected)
|
||||
actualCleaned := copyExportedFields(actual)
|
||||
@@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
actualType := reflect.TypeOf(actual)
|
||||
if actualType == nil {
|
||||
expectedValue := reflect.ValueOf(expected)
|
||||
actualValue := reflect.ValueOf(actual)
|
||||
if !expectedValue.IsValid() || !actualValue.IsValid() {
|
||||
return false
|
||||
}
|
||||
expectedValue := reflect.ValueOf(expected)
|
||||
if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
|
||||
// Attempt comparison after type conversion
|
||||
return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
|
||||
|
||||
expectedType := expectedValue.Type()
|
||||
actualType := actualValue.Type()
|
||||
if !expectedType.ConvertibleTo(actualType) {
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
if !isNumericType(expectedType) || !isNumericType(actualType) {
|
||||
// Attempt comparison after type conversion
|
||||
return reflect.DeepEqual(
|
||||
expectedValue.Convert(actualType).Interface(), actual,
|
||||
)
|
||||
}
|
||||
|
||||
// If BOTH values are numeric, there are chances of false positives due
|
||||
// to overflow or underflow. So, we need to make sure to always convert
|
||||
// the smaller type to a larger type before comparing.
|
||||
if expectedType.Size() >= actualType.Size() {
|
||||
return actualValue.Convert(expectedType).Interface() == expected
|
||||
}
|
||||
|
||||
return expectedValue.Convert(actualType).Interface() == actual
|
||||
}
|
||||
|
||||
// isNumericType returns true if the type is one of:
|
||||
// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
|
||||
// float32, float64, complex64, complex128
|
||||
func isNumericType(t reflect.Type) bool {
|
||||
return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
|
||||
}
|
||||
|
||||
/* CallerInfo is necessary because the assert functions use the testing object
|
||||
@@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
|
||||
|
||||
// Aligns the provided message so that all lines after the first line start at the same location as the first line.
|
||||
// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
|
||||
// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
|
||||
// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
|
||||
// basis on which the alignment occurs).
|
||||
func indentMessageLines(message string, longestLabelLen int) string {
|
||||
outBuf := new(bytes.Buffer)
|
||||
@@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
|
||||
return true
|
||||
}
|
||||
|
||||
// NotImplements asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
|
||||
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
interfaceType := reflect.TypeOf(interfaceObject).Elem()
|
||||
|
||||
if object == nil {
|
||||
return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
|
||||
}
|
||||
if reflect.TypeOf(object).Implements(interfaceType) {
|
||||
return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// IsType asserts that the specified objects are of the same type.
|
||||
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
@@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool {
|
||||
// representations appropriate to be presented to the user.
|
||||
//
|
||||
// If the values are not of like type, the returned strings will be prefixed
|
||||
// with the type name, and the value will be enclosed in parenthesis similar
|
||||
// with the type name, and the value will be enclosed in parentheses similar
|
||||
// to a type conversion in the Go grammar.
|
||||
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
|
||||
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
|
||||
@@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string {
|
||||
return value
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValues(t, uint32(123), int32(123))
|
||||
@@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
|
||||
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
|
||||
}
|
||||
|
||||
if aType.Kind() == reflect.Ptr {
|
||||
aType = aType.Elem()
|
||||
}
|
||||
if bType.Kind() == reflect.Ptr {
|
||||
bType = bType.Elem()
|
||||
}
|
||||
|
||||
if aType.Kind() != reflect.Struct {
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
}
|
||||
|
||||
if bType.Kind() != reflect.Struct {
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
|
||||
}
|
||||
|
||||
expected = copyExportedFields(expected)
|
||||
@@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
|
||||
}
|
||||
|
||||
// containsKind checks if a specified kind in the slice of kinds.
|
||||
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
|
||||
for i := 0; i < len(kinds); i++ {
|
||||
if kind == kinds[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isNil checks if a specified object is nil or not, without Failing.
|
||||
func isNil(object interface{}) bool {
|
||||
if object == nil {
|
||||
@@ -638,16 +683,13 @@ func isNil(object interface{}) bool {
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(object)
|
||||
kind := value.Kind()
|
||||
isNilableKind := containsKind(
|
||||
[]reflect.Kind{
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
|
||||
kind)
|
||||
switch value.Kind() {
|
||||
case
|
||||
reflect.Chan, reflect.Func,
|
||||
reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
|
||||
|
||||
if isNilableKind && value.IsNil() {
|
||||
return true
|
||||
return value.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
@@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
|
||||
}
|
||||
|
||||
// getLen try to get length of object.
|
||||
// return (false, 0) if impossible.
|
||||
func getLen(x interface{}) (ok bool, length int) {
|
||||
// getLen tries to get the length of an object.
|
||||
// It returns (0, false) if impossible.
|
||||
func getLen(x interface{}) (length int, ok bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
ok = false
|
||||
}
|
||||
ok = recover() == nil
|
||||
}()
|
||||
return true, v.Len()
|
||||
return v.Len(), true
|
||||
}
|
||||
|
||||
// Len asserts that the specified object has specific length.
|
||||
@@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
ok, l := getLen(object)
|
||||
l, ok := getLen(object)
|
||||
if !ok {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
|
||||
}
|
||||
|
||||
if l != length {
|
||||
return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
|
||||
return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
|
||||
|
||||
}
|
||||
|
||||
// Subset asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subset asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
|
||||
// assert.Subset(t, [1, 2, 3], [1, 2])
|
||||
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
|
||||
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||
return true
|
||||
}
|
||||
|
||||
// NotSubset asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubset asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
|
||||
// assert.NotSubset(t, [1, 3, 4], [1, 2])
|
||||
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
|
||||
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
|
||||
h.Helper()
|
||||
}
|
||||
if math.IsNaN(epsilon) {
|
||||
return Fail(t, "epsilon must not be NaN")
|
||||
return Fail(t, "epsilon must not be NaN", msgAndArgs...)
|
||||
}
|
||||
actualEpsilon, err := calcRelativeError(expected, actual)
|
||||
if err != nil {
|
||||
@@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if expected == nil || actual == nil ||
|
||||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
|
||||
reflect.TypeOf(expected).Kind() != reflect.Slice {
|
||||
|
||||
if expected == nil || actual == nil {
|
||||
return Fail(t, "Parameters must be slice", msgAndArgs...)
|
||||
}
|
||||
|
||||
actualSlice := reflect.ValueOf(actual)
|
||||
expectedSlice := reflect.ValueOf(expected)
|
||||
actualSlice := reflect.ValueOf(actual)
|
||||
|
||||
for i := 0; i < actualSlice.Len(); i++ {
|
||||
result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
|
||||
if !result {
|
||||
return result
|
||||
if expectedSlice.Type().Kind() != reflect.Slice {
|
||||
return Fail(t, "Expected value must be slice", msgAndArgs...)
|
||||
}
|
||||
|
||||
expectedLen := expectedSlice.Len()
|
||||
if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < expectedLen; i++ {
|
||||
if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// FailNow panics.
|
||||
func (c *CollectT) FailNow() {
|
||||
func (*CollectT) FailNow() {
|
||||
panic("Assertion failed")
|
||||
}
|
||||
|
||||
// Reset clears the collected errors.
|
||||
func (c *CollectT) Reset() {
|
||||
c.errors = nil
|
||||
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
|
||||
func (*CollectT) Reset() {
|
||||
panic("Reset() is deprecated")
|
||||
}
|
||||
|
||||
// Copy copies the collected errors to the supplied t.
|
||||
func (c *CollectT) Copy(t TestingT) {
|
||||
if tt, ok := t.(tHelper); ok {
|
||||
tt.Helper()
|
||||
}
|
||||
for _, err := range c.errors {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
|
||||
func (*CollectT) Copy(TestingT) {
|
||||
panic("Copy() is deprecated")
|
||||
}
|
||||
|
||||
// EventuallyWithT asserts that given condition will be met in waitFor time,
|
||||
@@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
|
||||
h.Helper()
|
||||
}
|
||||
|
||||
collect := new(CollectT)
|
||||
ch := make(chan bool, 1)
|
||||
var lastFinishedTickErrs []error
|
||||
ch := make(chan []error, 1)
|
||||
|
||||
timer := time.NewTimer(waitFor)
|
||||
defer timer.Stop()
|
||||
@@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
|
||||
for tick := ticker.C; ; {
|
||||
select {
|
||||
case <-timer.C:
|
||||
collect.Copy(t)
|
||||
for _, err := range lastFinishedTickErrs {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
return Fail(t, "Condition never satisfied", msgAndArgs...)
|
||||
case <-tick:
|
||||
tick = nil
|
||||
collect.Reset()
|
||||
go func() {
|
||||
collect := new(CollectT)
|
||||
defer func() {
|
||||
ch <- collect.errors
|
||||
}()
|
||||
condition(collect)
|
||||
ch <- len(collect.errors) == 0
|
||||
}()
|
||||
case v := <-ch:
|
||||
if v {
|
||||
case errs := <-ch:
|
||||
if len(errs) == 0 {
|
||||
return true
|
||||
}
|
||||
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
|
||||
lastFinishedTickErrs = errs
|
||||
tick = ticker.C
|
||||
}
|
||||
}
|
||||
|
||||
27
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
27
vendor/github.com/stretchr/testify/assert/http_assertions.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
// an error if building a new request fails.
|
||||
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url, nil)
|
||||
req, err := http.NewRequest(method, url, http.NoBody)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
@@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
|
||||
}
|
||||
|
||||
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
|
||||
if !isSuccessCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
|
||||
}
|
||||
|
||||
return isSuccessCode
|
||||
@@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
|
||||
}
|
||||
|
||||
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
|
||||
if !isRedirectCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
|
||||
}
|
||||
|
||||
return isRedirectCode
|
||||
@@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
|
||||
}
|
||||
|
||||
isErrorCode := code >= http.StatusBadRequest
|
||||
if !isErrorCode {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
|
||||
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
|
||||
}
|
||||
|
||||
return isErrorCode
|
||||
@@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
|
||||
}
|
||||
code, err := httpCode(handler, method, url, values)
|
||||
if err != nil {
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
|
||||
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
|
||||
}
|
||||
|
||||
successful := code == statuscode
|
||||
if !successful {
|
||||
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
|
||||
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
|
||||
}
|
||||
|
||||
return successful
|
||||
@@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
|
||||
// empty string if building a new request fails.
|
||||
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
|
||||
w := httptest.NewRecorder()
|
||||
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
|
||||
if len(values) > 0 {
|
||||
url += "?" + values.Encode()
|
||||
}
|
||||
req, err := http.NewRequest(method, url, http.NoBody)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if !contains {
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
|
||||
}
|
||||
|
||||
return contains
|
||||
@@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
|
||||
|
||||
contains := strings.Contains(body, fmt.Sprint(str))
|
||||
if contains {
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
|
||||
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
|
||||
}
|
||||
|
||||
return !contains
|
||||
|
||||
129
vendor/github.com/stretchr/testify/mock/mock.go
generated
vendored
129
vendor/github.com/stretchr/testify/mock/mock.go
generated
vendored
@@ -18,6 +18,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// regex for GCCGO functions
|
||||
var gccgoRE = regexp.MustCompile(`\.pN\d+_`)
|
||||
|
||||
// TestingT is an interface wrapper around *testing.T
|
||||
type TestingT interface {
|
||||
Logf(format string, args ...interface{})
|
||||
@@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call {
|
||||
return c
|
||||
}
|
||||
|
||||
// Panic specifies if the functon call should fail and the panic message
|
||||
// Panic specifies if the function call should fail and the panic message
|
||||
//
|
||||
// Mock.On("DoSomething").Panic("test panic")
|
||||
func (c *Call) Panic(msg string) *Call {
|
||||
@@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call {
|
||||
return c
|
||||
}
|
||||
|
||||
// Once indicates that that the mock should only return the value once.
|
||||
// Once indicates that the mock should only return the value once.
|
||||
//
|
||||
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
|
||||
func (c *Call) Once() *Call {
|
||||
return c.Times(1)
|
||||
}
|
||||
|
||||
// Twice indicates that that the mock should only return the value twice.
|
||||
// Twice indicates that the mock should only return the value twice.
|
||||
//
|
||||
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
|
||||
func (c *Call) Twice() *Call {
|
||||
return c.Times(2)
|
||||
}
|
||||
|
||||
// Times indicates that that the mock should only return the indicated number
|
||||
// Times indicates that the mock should only return the indicated number
|
||||
// of times.
|
||||
//
|
||||
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
|
||||
@@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments {
|
||||
// For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
|
||||
// uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
|
||||
// With GCCGO we need to remove interface information starting from pN<dd>.
|
||||
re := regexp.MustCompile("\\.pN\\d+_")
|
||||
if re.MatchString(functionPath) {
|
||||
functionPath = re.Split(functionPath, -1)[0]
|
||||
if gccgoRE.MatchString(functionPath) {
|
||||
functionPath = gccgoRE.Split(functionPath, -1)[0]
|
||||
}
|
||||
parts := strings.Split(functionPath, ".")
|
||||
functionName := parts[len(parts)-1]
|
||||
@@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
|
||||
found, call := m.findExpectedCall(methodName, arguments...)
|
||||
|
||||
if found < 0 {
|
||||
// expected call found but it has already been called with repeatable times
|
||||
// expected call found, but it has already been called with repeatable times
|
||||
if call != nil {
|
||||
m.mutex.Unlock()
|
||||
m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo())
|
||||
@@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
|
||||
Assertions
|
||||
*/
|
||||
|
||||
type assertExpectationser interface {
|
||||
type assertExpectationiser interface {
|
||||
AssertExpectations(TestingT) bool
|
||||
}
|
||||
|
||||
@@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
|
||||
t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)")
|
||||
obj = m
|
||||
}
|
||||
m := obj.(assertExpectationser)
|
||||
m := obj.(assertExpectationiser)
|
||||
if !m.AssertExpectations(t) {
|
||||
t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m))
|
||||
return false
|
||||
@@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
|
||||
// AssertExpectations asserts that everything specified with On and Return was
|
||||
// in fact called as expected. Calls may have occurred in any order.
|
||||
func (m *Mock) AssertExpectations(t TestingT) bool {
|
||||
if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() {
|
||||
return true
|
||||
}
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
@@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool {
|
||||
satisfied, reason := m.checkExpectation(expectedCall)
|
||||
if !satisfied {
|
||||
failedExpectations++
|
||||
t.Logf(reason)
|
||||
}
|
||||
t.Logf(reason)
|
||||
}
|
||||
|
||||
if failedExpectations != 0 {
|
||||
@@ -758,25 +763,33 @@ const (
|
||||
Anything = "mock.Anything"
|
||||
)
|
||||
|
||||
// AnythingOfTypeArgument is a string that contains the type of an argument
|
||||
// AnythingOfTypeArgument contains the type of an argument
|
||||
// for use when type checking. Used in Diff and Assert.
|
||||
type AnythingOfTypeArgument string
|
||||
//
|
||||
// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead.
|
||||
type AnythingOfTypeArgument = anythingOfTypeArgument
|
||||
|
||||
// AnythingOfType returns an AnythingOfTypeArgument object containing the
|
||||
// name of the type to check for. Used in Diff and Assert.
|
||||
// anythingOfTypeArgument is a string that contains the type of an argument
|
||||
// for use when type checking. Used in Diff and Assert.
|
||||
type anythingOfTypeArgument string
|
||||
|
||||
// AnythingOfType returns a special value containing the
|
||||
// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String].
|
||||
//
|
||||
// Used in Diff and Assert.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// Assert(t, AnythingOfType("string"), AnythingOfType("int"))
|
||||
func AnythingOfType(t string) AnythingOfTypeArgument {
|
||||
return AnythingOfTypeArgument(t)
|
||||
return anythingOfTypeArgument(t)
|
||||
}
|
||||
|
||||
// IsTypeArgument is a struct that contains the type of an argument
|
||||
// for use when type checking. This is an alternative to AnythingOfType.
|
||||
// Used in Diff and Assert.
|
||||
type IsTypeArgument struct {
|
||||
t interface{}
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// IsType returns an IsTypeArgument object containing the type to check for.
|
||||
@@ -786,7 +799,7 @@ type IsTypeArgument struct {
|
||||
// For example:
|
||||
// Assert(t, IsType(""), IsType(0))
|
||||
func IsType(t interface{}) *IsTypeArgument {
|
||||
return &IsTypeArgument{t: t}
|
||||
return &IsTypeArgument{t: reflect.TypeOf(t)}
|
||||
}
|
||||
|
||||
// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument
|
||||
@@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher)
|
||||
}
|
||||
} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
|
||||
// type checking
|
||||
if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
|
||||
// not match
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt)
|
||||
}
|
||||
} else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) {
|
||||
t := expected.(*IsTypeArgument).t
|
||||
if reflect.TypeOf(t) != reflect.TypeOf(actual) {
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt)
|
||||
}
|
||||
} else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) {
|
||||
t := expected.(*FunctionalOptionsArgument).value
|
||||
} else {
|
||||
switch expected := expected.(type) {
|
||||
case anythingOfTypeArgument:
|
||||
// type checking
|
||||
if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) {
|
||||
// not match
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt)
|
||||
}
|
||||
case *IsTypeArgument:
|
||||
actualT := reflect.TypeOf(actual)
|
||||
if actualT != expected.t {
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt)
|
||||
}
|
||||
case *FunctionalOptionsArgument:
|
||||
t := expected.value
|
||||
|
||||
var name string
|
||||
tValue := reflect.ValueOf(t)
|
||||
if tValue.Len() > 0 {
|
||||
name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String()
|
||||
}
|
||||
var name string
|
||||
tValue := reflect.ValueOf(t)
|
||||
if tValue.Len() > 0 {
|
||||
name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String()
|
||||
}
|
||||
|
||||
tName := reflect.TypeOf(t).Name()
|
||||
if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 {
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt)
|
||||
} else {
|
||||
if ef, af := assertOpts(t, actual); ef == "" && af == "" {
|
||||
tName := reflect.TypeOf(t).Name()
|
||||
if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 {
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt)
|
||||
} else {
|
||||
if ef, af := assertOpts(t, actual); ef == "" && af == "" {
|
||||
// match
|
||||
output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName)
|
||||
} else {
|
||||
// not match
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
|
||||
// match
|
||||
output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName)
|
||||
output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt)
|
||||
} else {
|
||||
// not match
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef)
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// normal checking
|
||||
|
||||
if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
|
||||
// match
|
||||
output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt)
|
||||
} else {
|
||||
// not match
|
||||
differences++
|
||||
output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
65
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
65
vendor/github.com/stretchr/testify/require/require.go
generated
vendored
@@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package require
|
||||
|
||||
@@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValues(t, uint32(123), int32(123))
|
||||
@@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
@@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotImplements asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
|
||||
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
if assert.NotImplementsf(t, interfaceObject, object, msg, args...) {
|
||||
return
|
||||
}
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNil(t, err)
|
||||
@@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotSubset asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubset asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
|
||||
// assert.NotSubset(t, [1, 3, 4], [1, 2])
|
||||
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
|
||||
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Subset asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subset asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
|
||||
// assert.Subset(t, [1, 2, 3], [1, 2])
|
||||
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
|
||||
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
||||
59
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
59
vendor/github.com/stretchr/testify/require/require_forward.go
generated
vendored
@@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package require
|
||||
|
||||
@@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
|
||||
EqualExportedValuesf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123))
|
||||
@@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
|
||||
@@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
|
||||
NotErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotImplements asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplements((*MyInterface)(nil), new(MyObject))
|
||||
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotImplements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
NotImplementsf(a.t, interfaceObject, object, msg, args...)
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err)
|
||||
@@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
|
||||
NotSamef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotSubset asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubset asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
|
||||
// a.NotSubset([1, 3, 4], [1, 2])
|
||||
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
|
||||
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
|
||||
NotSubset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
|
||||
Samef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// Subset asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subset asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
|
||||
// a.Subset([1, 2, 3], [1, 2])
|
||||
// a.Subset({"x": 1, "y": 2}, {"x": 1})
|
||||
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
|
||||
Subset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
||||
8
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
8
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@@ -904,6 +904,10 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
|
||||
return errors.New("ssh: signature did not verify")
|
||||
}
|
||||
|
||||
func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||
return &k.PublicKey
|
||||
}
|
||||
|
||||
type skEd25519PublicKey struct {
|
||||
// application is a URL-like string, typically "ssh:" for SSH.
|
||||
// see openssh/PROTOCOL.u2f for details.
|
||||
@@ -1000,6 +1004,10 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||
return k.PublicKey
|
||||
}
|
||||
|
||||
// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
|
||||
// *ecdsa.PrivateKey or any other crypto.Signer and returns a
|
||||
// corresponding Signer instance. ECDSA keys must use P-256, P-384 or
|
||||
|
||||
30
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
30
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
@@ -462,6 +462,24 @@ func (p *PartialSuccessError) Error() string {
|
||||
// It is returned in ServerAuthError.Errors from NewServerConn.
|
||||
var ErrNoAuth = errors.New("ssh: no auth passed yet")
|
||||
|
||||
// BannerError is an error that can be returned by authentication handlers in
|
||||
// ServerConfig to send a banner message to the client.
|
||||
type BannerError struct {
|
||||
Err error
|
||||
Message string
|
||||
}
|
||||
|
||||
func (b *BannerError) Unwrap() error {
|
||||
return b.Err
|
||||
}
|
||||
|
||||
func (b *BannerError) Error() string {
|
||||
if b.Err == nil {
|
||||
return b.Message
|
||||
}
|
||||
return b.Err.Error()
|
||||
}
|
||||
|
||||
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
|
||||
sessionID := s.transport.getSessionID()
|
||||
var cache pubKeyCache
|
||||
@@ -734,6 +752,18 @@ userAuthLoop:
|
||||
config.AuthLogCallback(s, userAuthReq.Method, authErr)
|
||||
}
|
||||
|
||||
var bannerErr *BannerError
|
||||
if errors.As(authErr, &bannerErr) {
|
||||
if bannerErr.Message != "" {
|
||||
bannerMsg := &userAuthBannerMsg{
|
||||
Message: bannerErr.Message,
|
||||
}
|
||||
if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if authErr == nil {
|
||||
break userAuthLoop
|
||||
}
|
||||
|
||||
19
vendor/golang.org/x/net/http2/http2.go
generated
vendored
19
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
@@ -210,12 +212,6 @@ type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A gate lets two goroutines coordinate their activities.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) Done() { g <- struct{}{} }
|
||||
func (g gate) Wait() { <-g }
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
@@ -383,3 +379,14 @@ func validPseudoPath(v string) bool {
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
|
||||
// It's defined as an interface here to let us keep synctestGroup entirely test-only
|
||||
// and not a part of non-test builds.
|
||||
type synctestGroupInterface interface {
|
||||
Join()
|
||||
Now() time.Time
|
||||
NewTimer(d time.Duration) timer
|
||||
AfterFunc(d time.Duration, f func()) timer
|
||||
ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
|
||||
}
|
||||
|
||||
94
vendor/golang.org/x/net/http2/server.go
generated
vendored
94
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -154,6 +154,39 @@ type Server struct {
|
||||
// so that we don't embed a Mutex in this struct, which will make the
|
||||
// struct non-copyable, which might break some callers.
|
||||
state *serverInternalState
|
||||
|
||||
// Synchronization group used for testing.
|
||||
// Outside of tests, this is nil.
|
||||
group synctestGroupInterface
|
||||
}
|
||||
|
||||
func (s *Server) markNewGoroutine() {
|
||||
if s.group != nil {
|
||||
s.group.Join()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) now() time.Time {
|
||||
if s.group != nil {
|
||||
return s.group.Now()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (s *Server) newTimer(d time.Duration) timer {
|
||||
if s.group != nil {
|
||||
return s.group.NewTimer(d)
|
||||
}
|
||||
return timeTimer{time.NewTimer(d)}
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (s *Server) afterFunc(d time.Duration, f func()) timer {
|
||||
if s.group != nil {
|
||||
return s.group.AfterFunc(d, f)
|
||||
}
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
||||
@@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler {
|
||||
//
|
||||
// The opts parameter is optional. If nil, default values are used.
|
||||
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
s.serveConn(c, opts, nil)
|
||||
}
|
||||
|
||||
func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
|
||||
baseCtx, cancel := serverConnBaseContext(c, opts)
|
||||
defer cancel()
|
||||
|
||||
@@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
pushEnabled: true,
|
||||
sawClientPreface: opts.SawClientPreface,
|
||||
}
|
||||
if newf != nil {
|
||||
newf(sc)
|
||||
}
|
||||
|
||||
s.state.registerConn(sc)
|
||||
defer s.state.unregisterConn(sc)
|
||||
@@ -599,8 +639,8 @@ type serverConn struct {
|
||||
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
||||
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
||||
goAwayCode ErrCode
|
||||
shutdownTimer *time.Timer // nil until used
|
||||
idleTimer *time.Timer // nil if unused
|
||||
shutdownTimer timer // nil until used
|
||||
idleTimer timer // nil if unused
|
||||
|
||||
// Owned by the writeFrameAsync goroutine:
|
||||
headerWriteBuf bytes.Buffer
|
||||
@@ -649,12 +689,12 @@ type stream struct {
|
||||
flow outflow // limits writing from Handler to client
|
||||
inflow inflow // what the client is allowed to POST/etc to us
|
||||
state streamState
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline *time.Timer // nil if unused
|
||||
writeDeadline *time.Timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline timer // nil if unused
|
||||
writeDeadline timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
reqTrailer http.Header // handler's Request.Trailer
|
||||
@@ -811,8 +851,9 @@ type readFrameResult struct {
|
||||
// consumer is done with the frame.
|
||||
// It's run on its own goroutine.
|
||||
func (sc *serverConn) readFrames() {
|
||||
gate := make(gate)
|
||||
gateDone := gate.Done
|
||||
sc.srv.markNewGoroutine()
|
||||
gate := make(chan struct{})
|
||||
gateDone := func() { gate <- struct{}{} }
|
||||
for {
|
||||
f, err := sc.framer.ReadFrame()
|
||||
select {
|
||||
@@ -843,6 +884,7 @@ type frameWriteResult struct {
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
sc.srv.markNewGoroutine()
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
@@ -922,13 +964,13 @@ func (sc *serverConn) serve() {
|
||||
sc.setConnState(http.StateIdle)
|
||||
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||
|
||||
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
defer settingsTimer.Stop()
|
||||
|
||||
loopNum := 0
|
||||
@@ -1057,10 +1099,10 @@ func (sc *serverConn) readPreface() error {
|
||||
errc <- nil
|
||||
}
|
||||
}()
|
||||
timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
|
||||
timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-timer.C():
|
||||
return errPrefaceTimeout
|
||||
case err := <-errc:
|
||||
if err == nil {
|
||||
@@ -1425,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) {
|
||||
|
||||
func (sc *serverConn) shutDownIn(d time.Duration) {
|
||||
sc.serveG.check()
|
||||
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
|
||||
sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
|
||||
}
|
||||
|
||||
func (sc *serverConn) resetStream(se StreamError) {
|
||||
@@ -1639,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
delete(sc.streams, st.id)
|
||||
if len(sc.streams) == 0 {
|
||||
sc.setConnState(http.StateIdle)
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
|
||||
sc.idleTimer.Reset(sc.srv.IdleTimeout)
|
||||
}
|
||||
if h1ServerKeepAlivesDisabled(sc.hs) {
|
||||
@@ -1661,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
|
||||
}
|
||||
}
|
||||
st.closeErr = err
|
||||
st.cancelCtx()
|
||||
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
|
||||
sc.writeSched.CloseStream(st.id)
|
||||
}
|
||||
@@ -2021,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
@@ -2119,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
|
||||
sc.streams[id] = st
|
||||
@@ -2343,6 +2386,7 @@ func (sc *serverConn) handlerDone() {
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
sc.srv.markNewGoroutine()
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
@@ -2639,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
var date string
|
||||
if _, ok := rws.snapHeader["Date"]; !ok {
|
||||
// TODO(bradfitz): be faster here, like net/http? measure.
|
||||
date = time.Now().UTC().Format(http.TimeFormat)
|
||||
date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
for _, v := range rws.snapHeader["Trailer"] {
|
||||
@@ -2761,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
||||
|
||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onReadTimeout()
|
||||
@@ -2777,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
if deadline.IsZero() {
|
||||
st.readDeadline = nil
|
||||
} else if st.readDeadline == nil {
|
||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
||||
st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
|
||||
} else {
|
||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
||||
st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
@@ -2787,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
|
||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onWriteTimeout()
|
||||
@@ -2803,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
if deadline.IsZero() {
|
||||
st.writeDeadline = nil
|
||||
} else if st.writeDeadline == nil {
|
||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
||||
st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
|
||||
} else {
|
||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
||||
st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
|
||||
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
331
vendor/golang.org/x/net/http2/testsync.go
generated
vendored
@@ -1,331 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// testSyncHooks coordinates goroutines in tests.
|
||||
//
|
||||
// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
|
||||
// - the goroutine running RoundTrip;
|
||||
// - the clientStream.doRequest goroutine, which writes the request; and
|
||||
// - the clientStream.readLoop goroutine, which reads the response.
|
||||
//
|
||||
// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
|
||||
// are blocked waiting for some condition such as reading the Request.Body or waiting for
|
||||
// flow control to become available.
|
||||
//
|
||||
// The testSyncHooks also manage timers and synthetic time in tests.
|
||||
// This permits us to, for example, start a request and cause it to time out waiting for
|
||||
// response headers without resorting to time.Sleep calls.
|
||||
type testSyncHooks struct {
|
||||
// active/inactive act as a mutex and condition variable.
|
||||
//
|
||||
// - neither chan contains a value: testSyncHooks is locked.
|
||||
// - active contains a value: unlocked, and at least one goroutine is not blocked
|
||||
// - inactive contains a value: unlocked, and all goroutines are blocked
|
||||
active chan struct{}
|
||||
inactive chan struct{}
|
||||
|
||||
// goroutine counts
|
||||
total int // total goroutines
|
||||
condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
|
||||
blocked []*testBlockedGoroutine // otherwise blocked
|
||||
|
||||
// fake time
|
||||
now time.Time
|
||||
timers []*fakeTimer
|
||||
|
||||
// Transport testing: Report various events.
|
||||
newclientconn func(*ClientConn)
|
||||
newstream func(*clientStream)
|
||||
}
|
||||
|
||||
// testBlockedGoroutine is a blocked goroutine.
|
||||
type testBlockedGoroutine struct {
|
||||
f func() bool // blocked until f returns true
|
||||
ch chan struct{} // closed when unblocked
|
||||
}
|
||||
|
||||
func newTestSyncHooks() *testSyncHooks {
|
||||
h := &testSyncHooks{
|
||||
active: make(chan struct{}, 1),
|
||||
inactive: make(chan struct{}, 1),
|
||||
condwait: map[*sync.Cond]int{},
|
||||
}
|
||||
h.inactive <- struct{}{}
|
||||
h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
return h
|
||||
}
|
||||
|
||||
// lock acquires the testSyncHooks mutex.
|
||||
func (h *testSyncHooks) lock() {
|
||||
select {
|
||||
case <-h.active:
|
||||
case <-h.inactive:
|
||||
}
|
||||
}
|
||||
|
||||
// waitInactive waits for all goroutines to become inactive.
|
||||
func (h *testSyncHooks) waitInactive() {
|
||||
for {
|
||||
<-h.inactive
|
||||
if !h.unlock() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unlock releases the testSyncHooks mutex.
|
||||
// It reports whether any goroutines are active.
|
||||
func (h *testSyncHooks) unlock() (active bool) {
|
||||
// Look for a blocked goroutine which can be unblocked.
|
||||
blocked := h.blocked[:0]
|
||||
unblocked := false
|
||||
for _, b := range h.blocked {
|
||||
if !unblocked && b.f() {
|
||||
unblocked = true
|
||||
close(b.ch)
|
||||
} else {
|
||||
blocked = append(blocked, b)
|
||||
}
|
||||
}
|
||||
h.blocked = blocked
|
||||
|
||||
// Count goroutines blocked on condition variables.
|
||||
condwait := 0
|
||||
for _, count := range h.condwait {
|
||||
condwait += count
|
||||
}
|
||||
|
||||
if h.total > condwait+len(blocked) {
|
||||
h.active <- struct{}{}
|
||||
return true
|
||||
} else {
|
||||
h.inactive <- struct{}{}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (h *testSyncHooks) goRun(f func()) {
|
||||
h.lock()
|
||||
h.total++
|
||||
h.unlock()
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
|
||||
// It waits until f returns true before proceeding.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// h.blockUntil(func() bool {
|
||||
// // Is the context done yet?
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// default:
|
||||
// return false
|
||||
// }
|
||||
// return true
|
||||
// })
|
||||
// // Wait for the context to become done.
|
||||
// <-ctx.Done()
|
||||
//
|
||||
// The function f passed to blockUntil must be non-blocking and idempotent.
|
||||
func (h *testSyncHooks) blockUntil(f func() bool) {
|
||||
if f() {
|
||||
return
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
h.lock()
|
||||
h.blocked = append(h.blocked, &testBlockedGoroutine{
|
||||
f: f,
|
||||
ch: ch,
|
||||
})
|
||||
h.unlock()
|
||||
<-ch
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Broadcast.
|
||||
func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
|
||||
h.lock()
|
||||
delete(h.condwait, cond)
|
||||
h.unlock()
|
||||
cond.Broadcast()
|
||||
}
|
||||
|
||||
// broadcast is sync.Cond.Wait.
|
||||
func (h *testSyncHooks) condWait(cond *sync.Cond) {
|
||||
h.lock()
|
||||
h.condwait[cond]++
|
||||
h.unlock()
|
||||
}
|
||||
|
||||
// newTimer creates a new fake timer.
|
||||
func (h *testSyncHooks) newTimer(d time.Duration) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
c: make(chan time.Time),
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// afterFunc creates a new fake AfterFunc timer.
|
||||
func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
t := &fakeTimer{
|
||||
hooks: h,
|
||||
when: h.now.Add(d),
|
||||
f: f,
|
||||
}
|
||||
h.timers = append(h.timers, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t := h.afterFunc(d, cancel)
|
||||
return ctx, func() {
|
||||
t.Stop()
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *testSyncHooks) timeUntilEvent() time.Duration {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
var next time.Time
|
||||
for _, t := range h.timers {
|
||||
if next.IsZero() || t.when.Before(next) {
|
||||
next = t.when
|
||||
}
|
||||
}
|
||||
if d := next.Sub(h.now); d > 0 {
|
||||
return d
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// advance advances time and causes synthetic timers to fire.
|
||||
func (h *testSyncHooks) advance(d time.Duration) {
|
||||
h.lock()
|
||||
defer h.unlock()
|
||||
h.now = h.now.Add(d)
|
||||
timers := h.timers[:0]
|
||||
for _, t := range h.timers {
|
||||
t := t // remove after go.mod depends on go1.22
|
||||
t.mu.Lock()
|
||||
switch {
|
||||
case t.when.After(h.now):
|
||||
timers = append(timers, t)
|
||||
case t.when.IsZero():
|
||||
// stopped timer
|
||||
default:
|
||||
t.when = time.Time{}
|
||||
if t.c != nil {
|
||||
close(t.c)
|
||||
}
|
||||
if t.f != nil {
|
||||
h.total++
|
||||
go func() {
|
||||
defer func() {
|
||||
h.lock()
|
||||
h.total--
|
||||
h.unlock()
|
||||
}()
|
||||
t.f()
|
||||
}()
|
||||
}
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
h.timers = timers
|
||||
}
|
||||
|
||||
// A timer wraps a time.Timer, or a synthetic equivalent in tests.
|
||||
// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
|
||||
type timer interface {
|
||||
C() <-chan time.Time
|
||||
Stop() bool
|
||||
Reset(d time.Duration) bool
|
||||
}
|
||||
|
||||
// timeTimer implements timer using real time.
|
||||
type timeTimer struct {
|
||||
t *time.Timer
|
||||
c chan time.Time
|
||||
}
|
||||
|
||||
// newTimeTimer creates a new timer using real time.
|
||||
func newTimeTimer(d time.Duration) timer {
|
||||
ch := make(chan time.Time)
|
||||
t := time.AfterFunc(d, func() {
|
||||
close(ch)
|
||||
})
|
||||
return &timeTimer{t, ch}
|
||||
}
|
||||
|
||||
// newTimeAfterFunc creates an AfterFunc timer using real time.
|
||||
func newTimeAfterFunc(d time.Duration, f func()) timer {
|
||||
return &timeTimer{
|
||||
t: time.AfterFunc(d, f),
|
||||
}
|
||||
}
|
||||
|
||||
func (t timeTimer) C() <-chan time.Time { return t.c }
|
||||
func (t timeTimer) Stop() bool { return t.t.Stop() }
|
||||
func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
|
||||
|
||||
// fakeTimer implements timer using fake time.
|
||||
type fakeTimer struct {
|
||||
hooks *testSyncHooks
|
||||
|
||||
mu sync.Mutex
|
||||
when time.Time // when the timer will fire
|
||||
c chan time.Time // closed when the timer fires; mutually exclusive with f
|
||||
f func() // called when the timer fires; mutually exclusive with c
|
||||
}
|
||||
|
||||
func (t *fakeTimer) C() <-chan time.Time { return t.c }
|
||||
|
||||
func (t *fakeTimer) Stop() bool {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
stopped := t.when.IsZero()
|
||||
t.when = time.Time{}
|
||||
return stopped
|
||||
}
|
||||
|
||||
func (t *fakeTimer) Reset(d time.Duration) bool {
|
||||
if t.c != nil || t.f == nil {
|
||||
panic("fakeTimer only supports Reset on AfterFunc timers")
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.hooks.lock()
|
||||
defer t.hooks.unlock()
|
||||
active := !t.when.IsZero()
|
||||
t.when = t.hooks.now.Add(d)
|
||||
if !active {
|
||||
t.hooks.timers = append(t.hooks.timers, t)
|
||||
}
|
||||
return active
|
||||
}
|
||||
20
vendor/golang.org/x/net/http2/timer.go
generated
vendored
Normal file
20
vendor/golang.org/x/net/http2/timer.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package http2
|
||||
|
||||
import "time"
|
||||
|
||||
// A timer is a time.Timer, as an interface which can be replaced in tests.
|
||||
type timer = interface {
|
||||
C() <-chan time.Time
|
||||
Reset(d time.Duration) bool
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
// timeTimer adapts a time.Timer to the timer interface.
|
||||
type timeTimer struct {
|
||||
*time.Timer
|
||||
}
|
||||
|
||||
func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
|
||||
310
vendor/golang.org/x/net/http2/transport.go
generated
vendored
310
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -185,7 +185,45 @@ type Transport struct {
|
||||
connPoolOnce sync.Once
|
||||
connPoolOrDef ClientConnPool // non-nil version of ConnPool
|
||||
|
||||
syncHooks *testSyncHooks
|
||||
*transportTestHooks
|
||||
}
|
||||
|
||||
// Hook points used for testing.
|
||||
// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations.
|
||||
// Inside tests, see the testSyncHooks function docs.
|
||||
|
||||
type transportTestHooks struct {
|
||||
newclientconn func(*ClientConn)
|
||||
group synctestGroupInterface
|
||||
}
|
||||
|
||||
func (t *Transport) markNewGoroutine() {
|
||||
if t != nil && t.transportTestHooks != nil {
|
||||
t.transportTestHooks.group.Join()
|
||||
}
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (t *Transport) newTimer(d time.Duration) timer {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.NewTimer(d)
|
||||
}
|
||||
return timeTimer{time.NewTimer(d)}
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (t *Transport) afterFunc(d time.Duration, f func()) timer {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.AfterFunc(d, f)
|
||||
}
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
|
||||
}
|
||||
return context.WithTimeout(ctx, d)
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
@@ -352,60 +390,6 @@ type ClientConn struct {
|
||||
werr error // first write error that has occurred
|
||||
hbuf bytes.Buffer // HPACK encoder writes into this
|
||||
henc *hpack.Encoder
|
||||
|
||||
syncHooks *testSyncHooks // can be nil
|
||||
}
|
||||
|
||||
// Hook points used for testing.
|
||||
// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
|
||||
// Inside tests, see the testSyncHooks function docs.
|
||||
|
||||
// goRun starts a new goroutine.
|
||||
func (cc *ClientConn) goRun(f func()) {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.goRun(f)
|
||||
return
|
||||
}
|
||||
go f()
|
||||
}
|
||||
|
||||
// condBroadcast is cc.cond.Broadcast.
|
||||
func (cc *ClientConn) condBroadcast() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condBroadcast(cc.cond)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
}
|
||||
|
||||
// condWait is cc.cond.Wait.
|
||||
func (cc *ClientConn) condWait() {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.condWait(cc.cond)
|
||||
}
|
||||
cc.cond.Wait()
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) newTimer(d time.Duration) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.newTimer(d)
|
||||
}
|
||||
return newTimeTimer(d)
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.afterFunc(d, f)
|
||||
}
|
||||
return newTimeAfterFunc(d, f)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
if cc.syncHooks != nil {
|
||||
return cc.syncHooks.contextWithTimeout(ctx, d)
|
||||
}
|
||||
return context.WithTimeout(ctx, d)
|
||||
}
|
||||
|
||||
// clientStream is the state for a single HTTP/2 stream. One of these
|
||||
@@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
|
||||
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
|
||||
if cs.cc.cond != nil {
|
||||
// Wake up writeRequestBody if it is waiting on flow control.
|
||||
cs.cc.condBroadcast()
|
||||
cs.cc.cond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
|
||||
defer cc.mu.Unlock()
|
||||
if cs.reqBody != nil && cs.reqBodyClosed == nil {
|
||||
cs.closeReqBodyLocked()
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
cs.reqBodyClosed = make(chan struct{})
|
||||
reqBodyClosed := cs.reqBodyClosed
|
||||
cs.cc.goRun(func() {
|
||||
go func() {
|
||||
cs.cc.t.markNewGoroutine()
|
||||
cs.reqBody.Close()
|
||||
close(reqBodyClosed)
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
@@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
var tm timer
|
||||
if t.syncHooks != nil {
|
||||
tm = t.syncHooks.newTimer(d)
|
||||
t.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-tm.C():
|
||||
case <-req.Context().Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
tm = newTimeTimer(d)
|
||||
}
|
||||
tm := t.newTimer(d)
|
||||
select {
|
||||
case <-tm.C():
|
||||
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
||||
@@ -725,8 +696,8 @@ func canRetryError(err error) bool {
|
||||
}
|
||||
|
||||
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
|
||||
if t.syncHooks != nil {
|
||||
return t.newClientConn(nil, singleUse, t.syncHooks)
|
||||
if t.transportTestHooks != nil {
|
||||
return t.newClientConn(nil, singleUse)
|
||||
}
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
@@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.newClientConn(tconn, singleUse, nil)
|
||||
return t.newClientConn(tconn, singleUse)
|
||||
}
|
||||
|
||||
func (t *Transport) newTLSConfig(host string) *tls.Config {
|
||||
@@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives(), nil)
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
@@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
syncHooks: hooks,
|
||||
}
|
||||
if hooks != nil {
|
||||
hooks.newclientconn(cc)
|
||||
if t.transportTestHooks != nil {
|
||||
t.markNewGoroutine()
|
||||
t.transportTestHooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
}
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
}
|
||||
@@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
|
||||
return nil, cc.werr
|
||||
}
|
||||
|
||||
cc.goRun(cc.readLoop)
|
||||
// Start the idle timer after the connection is fully initialized.
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
|
||||
go cc.readLoop()
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
@@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
|
||||
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
cc.vlogf("http2: Transport sending health check")
|
||||
err := cc.Ping(ctx)
|
||||
@@ -1144,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
// Wait for all in-flight streams to complete or connection to close
|
||||
done := make(chan struct{})
|
||||
cancelled := false // guarded by cc.mu
|
||||
cc.goRun(func() {
|
||||
go func() {
|
||||
cc.t.markNewGoroutine()
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
@@ -1156,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
if cancelled {
|
||||
break
|
||||
}
|
||||
cc.condWait()
|
||||
cc.cond.Wait()
|
||||
}
|
||||
})
|
||||
}()
|
||||
shutdownEnterWaitStateHook()
|
||||
select {
|
||||
case <-done:
|
||||
@@ -1168,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
cc.mu.Lock()
|
||||
// Free the goroutine above
|
||||
cancelled = true
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -1206,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) {
|
||||
for _, cs := range cc.streams {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
cc.closeConn()
|
||||
}
|
||||
@@ -1321,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
respHeaderRecv: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
cc.goRun(func() {
|
||||
cs.doRequest(req)
|
||||
})
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
!cs.isHead {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
cs.requestedGzip = true
|
||||
}
|
||||
|
||||
go cs.doRequest(req, streamf)
|
||||
|
||||
waitDone := func() error {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.donec:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.donec:
|
||||
return nil
|
||||
@@ -1398,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
return err
|
||||
}
|
||||
|
||||
if streamf != nil {
|
||||
streamf(cs)
|
||||
}
|
||||
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.respHeaderRecv:
|
||||
return handleResponseHeaders()
|
||||
@@ -1445,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
// doRequest runs for the duration of the request lifetime.
|
||||
//
|
||||
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
|
||||
func (cs *clientStream) doRequest(req *http.Request) {
|
||||
err := cs.writeRequest(req)
|
||||
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
|
||||
cs.cc.t.markNewGoroutine()
|
||||
err := cs.writeRequest(req, streamf)
|
||||
cs.cleanupWriteRequest(err)
|
||||
}
|
||||
|
||||
@@ -1457,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) {
|
||||
//
|
||||
// It returns non-nil if the request ends otherwise.
|
||||
// If the returned error is StreamError, the error Code may be used in resetting the stream.
|
||||
func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) {
|
||||
cc := cs.cc
|
||||
ctx := cs.ctx
|
||||
|
||||
@@ -1471,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
if cc.reqHeaderMu == nil {
|
||||
panic("RoundTrip on uninitialized ClientConn") // for tests
|
||||
}
|
||||
var newStreamHook func(*clientStream)
|
||||
if cc.syncHooks != nil {
|
||||
newStreamHook = cc.syncHooks.newstream
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
<-cc.reqHeaderMu
|
||||
case <-cs.reqCancel:
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case cc.reqHeaderMu <- struct{}{}:
|
||||
case <-cs.reqCancel:
|
||||
@@ -1510,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
if newStreamHook != nil {
|
||||
newStreamHook(cs)
|
||||
}
|
||||
|
||||
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
|
||||
if !cc.t.disableCompression() &&
|
||||
req.Header.Get("Accept-Encoding") == "" &&
|
||||
req.Header.Get("Range") == "" &&
|
||||
!cs.isHead {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
// http://trac.nginx.org/nginx/ticket/358
|
||||
// https://golang.org/issue/5522
|
||||
//
|
||||
// We don't request gzip if the request is for a range, since
|
||||
// auto-decoding a portion of a gzipped document will just fail
|
||||
// anyway. See https://golang.org/issue/8923
|
||||
cs.requestedGzip = true
|
||||
if streamf != nil {
|
||||
streamf(cs)
|
||||
}
|
||||
|
||||
continueTimeout := cc.t.expectContinueTimeout()
|
||||
@@ -1594,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
var respHeaderTimer <-chan time.Time
|
||||
var respHeaderRecv chan struct{}
|
||||
if d := cc.responseHeaderTimeout(); d != 0 {
|
||||
timer := cc.newTimer(d)
|
||||
timer := cc.t.newTimer(d)
|
||||
defer timer.Stop()
|
||||
respHeaderTimer = timer.C()
|
||||
respHeaderRecv = cs.respHeaderRecv
|
||||
@@ -1603,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
|
||||
// or until the request is aborted (via context, error, or otherwise),
|
||||
// whichever comes first.
|
||||
for {
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
case <-respHeaderTimer:
|
||||
case <-respHeaderRecv:
|
||||
case <-cs.abort:
|
||||
case <-ctx.Done():
|
||||
case <-cs.reqCancel:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
select {
|
||||
case <-cs.peerClosed:
|
||||
return nil
|
||||
@@ -1766,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
||||
return nil
|
||||
}
|
||||
cc.pendingRequests++
|
||||
cc.condWait()
|
||||
cc.cond.Wait()
|
||||
cc.pendingRequests--
|
||||
select {
|
||||
case <-cs.abort:
|
||||
@@ -2028,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
cs.flow.take(take)
|
||||
return take, nil
|
||||
}
|
||||
cc.condWait()
|
||||
cc.cond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2311,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
|
||||
}
|
||||
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
||||
// wake up RoundTrip if there is a pending request.
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
|
||||
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
|
||||
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
|
||||
@@ -2333,6 +2248,7 @@ type clientConnReadLoop struct {
|
||||
|
||||
// readLoop runs in its own goroutine and reads and dispatches frames.
|
||||
func (cc *ClientConn) readLoop() {
|
||||
cc.t.markNewGoroutine()
|
||||
rl := &clientConnReadLoop{cc: cc}
|
||||
defer rl.cleanup()
|
||||
cc.readerErr = rl.run()
|
||||
@@ -2399,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
cs.abortStreamLocked(err)
|
||||
}
|
||||
}
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -2436,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error {
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
@@ -3034,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
|
||||
for _, cs := range cc.streams {
|
||||
cs.flow.add(delta)
|
||||
}
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
|
||||
cc.initialWindowSize = s.Val
|
||||
case SettingHeaderTableSize:
|
||||
@@ -3089,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
||||
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
cc.condBroadcast()
|
||||
cc.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3133,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
}
|
||||
var pingError error
|
||||
errc := make(chan struct{})
|
||||
cc.goRun(func() {
|
||||
go func() {
|
||||
cc.t.markNewGoroutine()
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
||||
@@ -3144,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
close(errc)
|
||||
return
|
||||
}
|
||||
})
|
||||
if cc.syncHooks != nil {
|
||||
cc.syncHooks.blockUntil(func() bool {
|
||||
select {
|
||||
case <-c:
|
||||
case <-errc:
|
||||
case <-ctx.Done():
|
||||
case <-cc.readerDone:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-c:
|
||||
return nil
|
||||
|
||||
4
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
4
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
@@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
|
||||
}
|
||||
|
||||
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
k.setParent(n.parent)
|
||||
for n.kids != nil {
|
||||
n.kids.setParent(n.parent)
|
||||
}
|
||||
n.setParent(nil)
|
||||
delete(ws.nodes, n.id)
|
||||
|
||||
8
vendor/golang.org/x/net/proxy/per_host.go
generated
vendored
8
vendor/golang.org/x/net/proxy/per_host.go
generated
vendored
@@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) {
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
zone = strings.TrimSuffix(zone, ".")
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
@@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) {
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
host = strings.TrimSuffix(host, ".")
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
5
vendor/golang.org/x/net/websocket/hybi.go
generated
vendored
5
vendor/golang.org/x/net/websocket/hybi.go
generated
vendored
@@ -16,7 +16,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -279,7 +278,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er
|
||||
}
|
||||
}
|
||||
if header := frame.HeaderReader(); header != nil {
|
||||
io.Copy(ioutil.Discard, header)
|
||||
io.Copy(io.Discard, header)
|
||||
}
|
||||
switch frame.PayloadType() {
|
||||
case ContinuationFrame:
|
||||
@@ -294,7 +293,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(ioutil.Discard, frame)
|
||||
io.Copy(io.Discard, frame)
|
||||
if frame.PayloadType() == PingFrame {
|
||||
if _, err := handler.WritePong(b[:n]); err != nil {
|
||||
return nil, err
|
||||
|
||||
7
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
7
vendor/golang.org/x/net/websocket/websocket.go
generated
vendored
@@ -17,7 +17,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -208,7 +207,7 @@ again:
|
||||
n, err = ws.frameReader.Read(msg)
|
||||
if err == io.EOF {
|
||||
if trailer := ws.frameReader.TrailerReader(); trailer != nil {
|
||||
io.Copy(ioutil.Discard, trailer)
|
||||
io.Copy(io.Discard, trailer)
|
||||
}
|
||||
ws.frameReader = nil
|
||||
goto again
|
||||
@@ -330,7 +329,7 @@ func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
|
||||
ws.rio.Lock()
|
||||
defer ws.rio.Unlock()
|
||||
if ws.frameReader != nil {
|
||||
_, err = io.Copy(ioutil.Discard, ws.frameReader)
|
||||
_, err = io.Copy(io.Discard, ws.frameReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,7 +361,7 @@ again:
|
||||
return ErrFrameTooLarge
|
||||
}
|
||||
payloadType := frame.PayloadType()
|
||||
data, err := ioutil.ReadAll(frame)
|
||||
data, err := io.ReadAll(frame)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
@@ -263,6 +263,7 @@ struct ltchars {
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/serial.h>
|
||||
#include <linux/sock_diag.h>
|
||||
#include <linux/sockios.h>
|
||||
#include <linux/taskstats.h>
|
||||
#include <linux/tipc.h>
|
||||
@@ -549,6 +550,7 @@ ccflags="$@"
|
||||
$2 !~ "NLA_TYPE_MASK" &&
|
||||
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
|
||||
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
|
||||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
|
||||
$2 ~ /^FIORDCHK$/ ||
|
||||
$2 ~ /^SIOC/ ||
|
||||
$2 ~ /^TIOC/ ||
|
||||
|
||||
20
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
20
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
@@ -502,6 +502,7 @@ const (
|
||||
BPF_IMM = 0x0
|
||||
BPF_IND = 0x40
|
||||
BPF_JA = 0x0
|
||||
BPF_JCOND = 0xe0
|
||||
BPF_JEQ = 0x10
|
||||
BPF_JGE = 0x30
|
||||
BPF_JGT = 0x20
|
||||
@@ -657,6 +658,9 @@ const (
|
||||
CAN_NPROTO = 0x8
|
||||
CAN_RAW = 0x1
|
||||
CAN_RAW_FILTER_MAX = 0x200
|
||||
CAN_RAW_XL_VCID_RX_FILTER = 0x4
|
||||
CAN_RAW_XL_VCID_TX_PASS = 0x2
|
||||
CAN_RAW_XL_VCID_TX_SET = 0x1
|
||||
CAN_RTR_FLAG = 0x40000000
|
||||
CAN_SFF_ID_BITS = 0xb
|
||||
CAN_SFF_MASK = 0x7ff
|
||||
@@ -1339,6 +1343,7 @@ const (
|
||||
F_OFD_SETLK = 0x25
|
||||
F_OFD_SETLKW = 0x26
|
||||
F_OK = 0x0
|
||||
F_SEAL_EXEC = 0x20
|
||||
F_SEAL_FUTURE_WRITE = 0x10
|
||||
F_SEAL_GROW = 0x4
|
||||
F_SEAL_SEAL = 0x1
|
||||
@@ -1627,6 +1632,7 @@ const (
|
||||
IP_FREEBIND = 0xf
|
||||
IP_HDRINCL = 0x3
|
||||
IP_IPSEC_POLICY = 0x10
|
||||
IP_LOCAL_PORT_RANGE = 0x33
|
||||
IP_MAXPACKET = 0xffff
|
||||
IP_MAX_MEMBERSHIPS = 0x14
|
||||
IP_MF = 0x2000
|
||||
@@ -1653,6 +1659,7 @@ const (
|
||||
IP_PMTUDISC_OMIT = 0x5
|
||||
IP_PMTUDISC_PROBE = 0x3
|
||||
IP_PMTUDISC_WANT = 0x1
|
||||
IP_PROTOCOL = 0x34
|
||||
IP_RECVERR = 0xb
|
||||
IP_RECVERR_RFC4884 = 0x1a
|
||||
IP_RECVFRAGSIZE = 0x19
|
||||
@@ -2169,7 +2176,7 @@ const (
|
||||
NFT_SECMARK_CTX_MAXLEN = 0x100
|
||||
NFT_SET_MAXNAMELEN = 0x100
|
||||
NFT_SOCKET_MAX = 0x3
|
||||
NFT_TABLE_F_MASK = 0x3
|
||||
NFT_TABLE_F_MASK = 0x7
|
||||
NFT_TABLE_MAXNAMELEN = 0x100
|
||||
NFT_TRACETYPE_MAX = 0x3
|
||||
NFT_TUNNEL_F_MASK = 0x7
|
||||
@@ -2403,6 +2410,7 @@ const (
|
||||
PERF_RECORD_MISC_USER = 0x2
|
||||
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
|
||||
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
|
||||
PID_FS_MAGIC = 0x50494446
|
||||
PIPEFS_MAGIC = 0x50495045
|
||||
PPPIOCGNPMODE = 0xc008744c
|
||||
PPPIOCNEWUNIT = 0xc004743e
|
||||
@@ -2896,8 +2904,9 @@ const (
|
||||
RWF_APPEND = 0x10
|
||||
RWF_DSYNC = 0x2
|
||||
RWF_HIPRI = 0x1
|
||||
RWF_NOAPPEND = 0x20
|
||||
RWF_NOWAIT = 0x8
|
||||
RWF_SUPPORTED = 0x1f
|
||||
RWF_SUPPORTED = 0x3f
|
||||
RWF_SYNC = 0x4
|
||||
RWF_WRITE_LIFE_NOT_SET = 0x0
|
||||
SCHED_BATCH = 0x3
|
||||
@@ -2918,7 +2927,9 @@ const (
|
||||
SCHED_RESET_ON_FORK = 0x40000000
|
||||
SCHED_RR = 0x2
|
||||
SCM_CREDENTIALS = 0x2
|
||||
SCM_PIDFD = 0x4
|
||||
SCM_RIGHTS = 0x1
|
||||
SCM_SECURITY = 0x3
|
||||
SCM_TIMESTAMP = 0x1d
|
||||
SC_LOG_FLUSH = 0x100000
|
||||
SECCOMP_ADDFD_FLAG_SEND = 0x2
|
||||
@@ -3051,6 +3062,8 @@ const (
|
||||
SIOCSMIIREG = 0x8949
|
||||
SIOCSRARP = 0x8962
|
||||
SIOCWANDEV = 0x894a
|
||||
SK_DIAG_BPF_STORAGE_MAX = 0x3
|
||||
SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1
|
||||
SMACK_MAGIC = 0x43415d53
|
||||
SMART_AUTOSAVE = 0xd2
|
||||
SMART_AUTO_OFFLINE = 0xdb
|
||||
@@ -3071,6 +3084,8 @@ const (
|
||||
SOCKFS_MAGIC = 0x534f434b
|
||||
SOCK_BUF_LOCK_MASK = 0x3
|
||||
SOCK_DCCP = 0x6
|
||||
SOCK_DESTROY = 0x15
|
||||
SOCK_DIAG_BY_FAMILY = 0x14
|
||||
SOCK_IOC_TYPE = 0x89
|
||||
SOCK_PACKET = 0xa
|
||||
SOCK_RAW = 0x3
|
||||
@@ -3260,6 +3275,7 @@ const (
|
||||
TCP_MAX_WINSHIFT = 0xe
|
||||
TCP_MD5SIG = 0xe
|
||||
TCP_MD5SIG_EXT = 0x20
|
||||
TCP_MD5SIG_FLAG_IFINDEX = 0x2
|
||||
TCP_MD5SIG_FLAG_PREFIX = 0x1
|
||||
TCP_MD5SIG_MAXKEYLEN = 0x50
|
||||
TCP_MSS = 0x200
|
||||
|
||||
1
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
@@ -118,6 +118,7 @@ const (
|
||||
IXOFF = 0x1000
|
||||
IXON = 0x400
|
||||
MAP_32BIT = 0x40
|
||||
MAP_ABOVE4G = 0x80
|
||||
MAP_ANON = 0x20
|
||||
MAP_ANONYMOUS = 0x20
|
||||
MAP_DENYWRITE = 0x800
|
||||
|
||||
1
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
@@ -118,6 +118,7 @@ const (
|
||||
IXOFF = 0x1000
|
||||
IXON = 0x400
|
||||
MAP_32BIT = 0x40
|
||||
MAP_ABOVE4G = 0x80
|
||||
MAP_ANON = 0x20
|
||||
MAP_ANONYMOUS = 0x20
|
||||
MAP_DENYWRITE = 0x800
|
||||
|
||||
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
@@ -87,6 +87,7 @@ const (
|
||||
FICLONE = 0x40049409
|
||||
FICLONERANGE = 0x4020940d
|
||||
FLUSHO = 0x1000
|
||||
FPMR_MAGIC = 0x46504d52
|
||||
FPSIMD_MAGIC = 0x46508001
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
|
||||
37
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
37
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
@@ -4605,7 +4605,7 @@ const (
|
||||
NL80211_ATTR_MAC_HINT = 0xc8
|
||||
NL80211_ATTR_MAC_MASK = 0xd7
|
||||
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
|
||||
NL80211_ATTR_MAX = 0x149
|
||||
NL80211_ATTR_MAX = 0x14a
|
||||
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
|
||||
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
|
||||
NL80211_ATTR_MAX_MATCH_SETS = 0x85
|
||||
@@ -5209,7 +5209,7 @@ const (
|
||||
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
|
||||
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
|
||||
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
|
||||
NL80211_FREQUENCY_ATTR_MAX = 0x1f
|
||||
NL80211_FREQUENCY_ATTR_MAX = 0x20
|
||||
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
|
||||
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
|
||||
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
|
||||
@@ -5703,7 +5703,7 @@ const (
|
||||
NL80211_STA_FLAG_ASSOCIATED = 0x7
|
||||
NL80211_STA_FLAG_AUTHENTICATED = 0x5
|
||||
NL80211_STA_FLAG_AUTHORIZED = 0x1
|
||||
NL80211_STA_FLAG_MAX = 0x7
|
||||
NL80211_STA_FLAG_MAX = 0x8
|
||||
NL80211_STA_FLAG_MAX_OLD_API = 0x6
|
||||
NL80211_STA_FLAG_MFP = 0x4
|
||||
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
|
||||
@@ -6001,3 +6001,34 @@ type CachestatRange struct {
|
||||
Off uint64
|
||||
Len uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SK_MEMINFO_RMEM_ALLOC = 0x0
|
||||
SK_MEMINFO_RCVBUF = 0x1
|
||||
SK_MEMINFO_WMEM_ALLOC = 0x2
|
||||
SK_MEMINFO_SNDBUF = 0x3
|
||||
SK_MEMINFO_FWD_ALLOC = 0x4
|
||||
SK_MEMINFO_WMEM_QUEUED = 0x5
|
||||
SK_MEMINFO_OPTMEM = 0x6
|
||||
SK_MEMINFO_BACKLOG = 0x7
|
||||
SK_MEMINFO_DROPS = 0x8
|
||||
SK_MEMINFO_VARS = 0x9
|
||||
SKNLGRP_NONE = 0x0
|
||||
SKNLGRP_INET_TCP_DESTROY = 0x1
|
||||
SKNLGRP_INET_UDP_DESTROY = 0x2
|
||||
SKNLGRP_INET6_TCP_DESTROY = 0x3
|
||||
SKNLGRP_INET6_UDP_DESTROY = 0x4
|
||||
SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1
|
||||
SK_DIAG_BPF_STORAGE_REP_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE = 0x1
|
||||
SK_DIAG_BPF_STORAGE_NONE = 0x0
|
||||
SK_DIAG_BPF_STORAGE_PAD = 0x1
|
||||
SK_DIAG_BPF_STORAGE_MAP_ID = 0x2
|
||||
SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3
|
||||
)
|
||||
|
||||
type SockDiagReq struct {
|
||||
Family uint8
|
||||
Protocol uint8
|
||||
}
|
||||
|
||||
1
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
1
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
@@ -68,6 +68,7 @@ type UserInfo10 struct {
|
||||
//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
|
||||
//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation
|
||||
//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
|
||||
//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum
|
||||
|
||||
const (
|
||||
// do not reorder
|
||||
|
||||
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
@@ -401,6 +401,7 @@ var (
|
||||
procTransmitFile = modmswsock.NewProc("TransmitFile")
|
||||
procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree")
|
||||
procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation")
|
||||
procNetUserEnum = modnetapi32.NewProc("NetUserEnum")
|
||||
procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo")
|
||||
procNtCreateFile = modntdll.NewProc("NtCreateFile")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
@@ -3486,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
|
||||
return
|
||||
}
|
||||
|
||||
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
|
||||
r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
|
||||
if r0 != 0 {
|
||||
neterr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
|
||||
r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
|
||||
if r0 != 0 {
|
||||
|
||||
21
vendor/golang.org/x/text/message/message.go
generated
vendored
21
vendor/golang.org/x/text/message/message.go
generated
vendored
@@ -138,21 +138,20 @@ func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) {
|
||||
|
||||
func lookupAndFormat(p *printer, r Reference, a []interface{}) {
|
||||
p.fmt.Reset(a)
|
||||
var id, msg string
|
||||
switch v := r.(type) {
|
||||
case string:
|
||||
id, msg = v, v
|
||||
case key:
|
||||
id, msg = v.id, v.fallback
|
||||
default:
|
||||
panic("key argument is not a Reference")
|
||||
}
|
||||
|
||||
if p.catContext.Execute(id) == catalog.ErrNotFound {
|
||||
if p.catContext.Execute(msg) == catalog.ErrNotFound {
|
||||
p.Render(msg)
|
||||
if p.catContext.Execute(v) == catalog.ErrNotFound {
|
||||
p.Render(v)
|
||||
return
|
||||
}
|
||||
case key:
|
||||
if p.catContext.Execute(v.id) == catalog.ErrNotFound &&
|
||||
p.catContext.Execute(v.fallback) == catalog.ErrNotFound {
|
||||
p.Render(v.fallback)
|
||||
return
|
||||
}
|
||||
default:
|
||||
panic("key argument is not a Reference")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/google.golang.org/protobuf/encoding/protojson/decode.go
generated
vendored
4
vendor/google.golang.org/protobuf/encoding/protojson/decode.go
generated
vendored
@@ -102,7 +102,7 @@ type decoder struct {
|
||||
}
|
||||
|
||||
// newError returns an error object with position info.
|
||||
func (d decoder) newError(pos int, f string, x ...interface{}) error {
|
||||
func (d decoder) newError(pos int, f string, x ...any) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("(line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
@@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error {
|
||||
}
|
||||
|
||||
// syntaxError returns a syntax error for given position.
|
||||
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
||||
func (d decoder) syntaxError(pos int, f string, x ...any) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
|
||||
20
vendor/google.golang.org/protobuf/encoding/protojson/encode.go
generated
vendored
20
vendor/google.golang.org/protobuf/encoding/protojson/encode.go
generated
vendored
@@ -25,15 +25,17 @@ const defaultIndent = " "
|
||||
|
||||
// Format formats the message as a multiline string.
|
||||
// This function is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func Format(m proto.Message) string {
|
||||
return MarshalOptions{Multiline: true}.Format(m)
|
||||
}
|
||||
|
||||
// Marshal writes the given [proto.Message] in JSON format using default options.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func Marshal(m proto.Message) ([]byte, error) {
|
||||
return MarshalOptions{}.Marshal(m)
|
||||
}
|
||||
@@ -110,8 +112,9 @@ type MarshalOptions struct {
|
||||
|
||||
// Format formats the message as a string.
|
||||
// This method is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func (o MarshalOptions) Format(m proto.Message) string {
|
||||
if m == nil || !m.ProtoReflect().IsValid() {
|
||||
return "<nil>" // invalid syntax, but okay since this is for debugging
|
||||
@@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
|
||||
}
|
||||
|
||||
// Marshal marshals the given [proto.Message] in the JSON format using options in
|
||||
// MarshalOptions. Do not depend on the output being stable. It may change over
|
||||
// time across different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
|
||||
return o.marshal(nil, m)
|
||||
}
|
||||
|
||||
4
vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
4
vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
@@ -84,7 +84,7 @@ type decoder struct {
|
||||
}
|
||||
|
||||
// newError returns an error object with position info.
|
||||
func (d decoder) newError(pos int, f string, x ...interface{}) error {
|
||||
func (d decoder) newError(pos int, f string, x ...any) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("(line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
@@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error {
|
||||
}
|
||||
|
||||
// syntaxError returns a syntax error for given position.
|
||||
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
||||
func (d decoder) syntaxError(pos int, f string, x ...any) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
|
||||
20
vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
20
vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
@@ -27,15 +27,17 @@ const defaultIndent = " "
|
||||
|
||||
// Format formats the message as a multiline string.
|
||||
// This function is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func Format(m proto.Message) string {
|
||||
return MarshalOptions{Multiline: true}.Format(m)
|
||||
}
|
||||
|
||||
// Marshal writes the given [proto.Message] in textproto format using default
|
||||
// options. Do not depend on the output being stable. It may change over time
|
||||
// across different versions of the program.
|
||||
// options. Do not depend on the output being stable. Its output will change
|
||||
// across different builds of your program, even when using the same version of
|
||||
// the protobuf module.
|
||||
func Marshal(m proto.Message) ([]byte, error) {
|
||||
return MarshalOptions{}.Marshal(m)
|
||||
}
|
||||
@@ -84,8 +86,9 @@ type MarshalOptions struct {
|
||||
|
||||
// Format formats the message as a string.
|
||||
// This method is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
// Do not depend on the output being stable. Its output will change across
|
||||
// different builds of your program, even when using the same version of the
|
||||
// protobuf module.
|
||||
func (o MarshalOptions) Format(m proto.Message) string {
|
||||
if m == nil || !m.ProtoReflect().IsValid() {
|
||||
return "<nil>" // invalid syntax, but okay since this is for debugging
|
||||
@@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
|
||||
}
|
||||
|
||||
// Marshal writes the given [proto.Message] in textproto format using options in
|
||||
// MarshalOptions object. Do not depend on the output being stable. It may
|
||||
// change over time across different versions of the program.
|
||||
// MarshalOptions object. Do not depend on the output being stable. Its output
|
||||
// will change across different builds of your program, even when using the
|
||||
// same version of the protobuf module.
|
||||
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
|
||||
return o.marshal(nil, m)
|
||||
}
|
||||
|
||||
1
vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
1
vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
@@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu
|
||||
{rv.MethodByName("Values"), "Values"},
|
||||
{rv.MethodByName("ReservedNames"), "ReservedNames"},
|
||||
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
|
||||
{rv.MethodByName("IsClosed"), "IsClosed"},
|
||||
}...)
|
||||
|
||||
case protoreflect.EnumValueDescriptor:
|
||||
|
||||
BIN
vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
generated
vendored
BIN
vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
generated
vendored
Binary file not shown.
13
vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
generated
vendored
Normal file
13
vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package editionssupport defines constants for editions that are supported.
|
||||
package editionssupport
|
||||
|
||||
import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
|
||||
|
||||
const (
|
||||
Minimum = descriptorpb.Edition_EDITION_PROTO2
|
||||
Maximum = descriptorpb.Edition_EDITION_2023
|
||||
)
|
||||
2
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
2
vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
generated
vendored
@@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) {
|
||||
|
||||
// newSyntaxError returns an error with line and column information useful for
|
||||
// syntax errors.
|
||||
func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
|
||||
func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
|
||||
e := errors.New(f, x...)
|
||||
line, column := d.Position(pos)
|
||||
return errors.New("syntax error (line %d:%d): %v", line, column, e)
|
||||
|
||||
4
vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
generated
vendored
4
vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
generated
vendored
@@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
|
||||
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
|
||||
f := new(filedesc.Field)
|
||||
f.L0.ParentFile = filedesc.SurrogateProto2
|
||||
f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
|
||||
for len(tag) > 0 {
|
||||
i := strings.IndexByte(tag, ',')
|
||||
if i < 0 {
|
||||
@@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
|
||||
f.L1.StringName.InitJSON(jsonName)
|
||||
}
|
||||
case s == "packed":
|
||||
f.L1.HasPacked = true
|
||||
f.L1.IsPacked = true
|
||||
f.L1.EditionFeatures.IsPacked = true
|
||||
case strings.HasPrefix(s, "weak="):
|
||||
f.L1.IsWeak = true
|
||||
f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
|
||||
|
||||
2
vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
generated
vendored
2
vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
generated
vendored
@@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
|
||||
|
||||
// newSyntaxError returns a syntax error with line and column information for
|
||||
// current position.
|
||||
func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
|
||||
func (d *Decoder) newSyntaxError(f string, x ...any) error {
|
||||
e := errors.New(f, x...)
|
||||
line, column := d.Position(len(d.orig) - len(d.in))
|
||||
return errors.New("syntax error (line %d:%d): %v", line, column, e)
|
||||
|
||||
21
vendor/google.golang.org/protobuf/internal/errors/errors.go
generated
vendored
21
vendor/google.golang.org/protobuf/internal/errors/errors.go
generated
vendored
@@ -17,7 +17,7 @@ var Error = errors.New("protobuf error")
|
||||
|
||||
// New formats a string according to the format specifier and arguments and
|
||||
// returns an error that has a "proto" prefix.
|
||||
func New(f string, x ...interface{}) error {
|
||||
func New(f string, x ...any) error {
|
||||
return &prefixError{s: format(f, x...)}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error {
|
||||
|
||||
// Wrap returns an error that has a "proto" prefix, the formatted string described
|
||||
// by the format specifier and arguments, and a suffix of err. The error wraps err.
|
||||
func Wrap(err error, f string, x ...interface{}) error {
|
||||
func Wrap(err error, f string, x ...any) error {
|
||||
return &wrapError{
|
||||
s: format(f, x...),
|
||||
err: err,
|
||||
@@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool {
|
||||
return target == Error
|
||||
}
|
||||
|
||||
func format(f string, x ...interface{}) string {
|
||||
func format(f string, x ...any) string {
|
||||
// avoid "proto: " prefix when chaining
|
||||
for i := 0; i < len(x); i++ {
|
||||
switch e := x[i].(type) {
|
||||
@@ -87,3 +87,18 @@ func InvalidUTF8(name string) error {
|
||||
func RequiredNotSet(name string) error {
|
||||
return New("required field %v not set", name)
|
||||
}
|
||||
|
||||
type SizeMismatchError struct {
|
||||
Calculated, Measured int
|
||||
}
|
||||
|
||||
func (e *SizeMismatchError) Error() string {
|
||||
return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured)
|
||||
}
|
||||
|
||||
func MismatchedSizeCalculation(calculated, measured int) error {
|
||||
return &SizeMismatchError{
|
||||
Calculated: calculated,
|
||||
Measured: measured,
|
||||
}
|
||||
}
|
||||
|
||||
88
vendor/google.golang.org/protobuf/internal/filedesc/desc.go
generated
vendored
88
vendor/google.golang.org/protobuf/internal/filedesc/desc.go
generated
vendored
@@ -7,6 +7,7 @@ package filedesc
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
|
||||
func (fd *File) Parent() protoreflect.Descriptor { return nil }
|
||||
func (fd *File) Index() int { return 0 }
|
||||
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
|
||||
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
|
||||
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
|
||||
func (fd *File) IsPlaceholder() bool { return false }
|
||||
|
||||
// Not exported and just used to reconstruct the original FileDescriptor proto
|
||||
func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
|
||||
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
|
||||
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
|
||||
func (fd *File) IsPlaceholder() bool { return false }
|
||||
func (fd *File) Options() protoreflect.ProtoMessage {
|
||||
if f := fd.lazyInit().Options; f != nil {
|
||||
return f()
|
||||
@@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 {
|
||||
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
|
||||
return ed.L2
|
||||
}
|
||||
func (ed *Enum) IsClosed() bool {
|
||||
return !ed.L1.EditionFeatures.IsOpenEnum
|
||||
}
|
||||
|
||||
func (ed *EnumValue) Options() protoreflect.ProtoMessage {
|
||||
if f := ed.L1.Options; f != nil {
|
||||
@@ -251,10 +258,6 @@ type (
|
||||
StringName stringName
|
||||
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
|
||||
IsWeak bool // promoted from google.protobuf.FieldOptions
|
||||
HasPacked bool // promoted from google.protobuf.FieldOptions
|
||||
IsPacked bool // promoted from google.protobuf.FieldOptions
|
||||
HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
|
||||
EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
|
||||
Default defaultValue
|
||||
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
|
||||
Enum protoreflect.EnumDescriptor
|
||||
@@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool {
|
||||
if fd.L1.Cardinality == protoreflect.Repeated {
|
||||
return false
|
||||
}
|
||||
explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
|
||||
return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
|
||||
return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
|
||||
}
|
||||
func (fd *Field) HasOptionalKeyword() bool {
|
||||
return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
|
||||
@@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool {
|
||||
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
return false
|
||||
}
|
||||
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
|
||||
return fd.L1.EditionFeatures.IsPacked
|
||||
}
|
||||
if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
|
||||
// proto3 repeated fields are packed by default.
|
||||
return !fd.L1.HasPacked || fd.L1.IsPacked
|
||||
}
|
||||
return fd.L1.IsPacked
|
||||
return fd.L1.EditionFeatures.IsPacked
|
||||
}
|
||||
func (fd *Field) IsExtension() bool { return false }
|
||||
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
|
||||
@@ -388,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor {
|
||||
}
|
||||
return fd.L1.Message
|
||||
}
|
||||
func (fd *Field) IsMapEntry() bool {
|
||||
parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
|
||||
return ok && parent.IsMapEntry()
|
||||
}
|
||||
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
|
||||
func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
|
||||
|
||||
@@ -399,13 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
|
||||
// WARNING: This method is exempt from the compatibility promise and may be
|
||||
// removed in the future without warning.
|
||||
func (fd *Field) EnforceUTF8() bool {
|
||||
if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
|
||||
return fd.L1.EditionFeatures.IsUTF8Validated
|
||||
}
|
||||
if fd.L1.HasEnforceUTF8 {
|
||||
return fd.L1.EnforceUTF8
|
||||
}
|
||||
return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
|
||||
return fd.L1.EditionFeatures.IsUTF8Validated
|
||||
}
|
||||
|
||||
func (od *Oneof) IsSynthetic() bool {
|
||||
@@ -438,7 +431,6 @@ type (
|
||||
Options func() protoreflect.ProtoMessage
|
||||
StringName stringName
|
||||
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
|
||||
IsPacked bool // promoted from google.protobuf.FieldOptions
|
||||
Default defaultValue
|
||||
Enum protoreflect.EnumDescriptor
|
||||
Message protoreflect.MessageDescriptor
|
||||
@@ -461,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi
|
||||
func (xd *Extension) HasOptionalKeyword() bool {
|
||||
return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
|
||||
}
|
||||
func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
|
||||
func (xd *Extension) IsPacked() bool {
|
||||
if xd.L1.Cardinality != protoreflect.Repeated {
|
||||
return false
|
||||
}
|
||||
switch xd.L1.Kind {
|
||||
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
return false
|
||||
}
|
||||
return xd.L1.EditionFeatures.IsPacked
|
||||
}
|
||||
func (xd *Extension) IsExtension() bool { return true }
|
||||
func (xd *Extension) IsWeak() bool { return false }
|
||||
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
|
||||
@@ -542,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
|
||||
// Surrogate files are can be used to create standalone descriptors
|
||||
// where the syntax is only information derived from the parent file.
|
||||
var (
|
||||
SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
|
||||
SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
|
||||
SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
|
||||
SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
|
||||
SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}}
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -585,6 +587,34 @@ func (s *stringName) InitJSON(name string) {
|
||||
s.nameJSON = name
|
||||
}
|
||||
|
||||
// Returns true if this field is structured like the synthetic field of a proto2
|
||||
// group. This allows us to expand our treatment of delimited fields without
|
||||
// breaking proto2 files that have been upgraded to editions.
|
||||
func isGroupLike(fd protoreflect.FieldDescriptor) bool {
|
||||
// Groups are always group types.
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
return false
|
||||
}
|
||||
|
||||
// Group fields are always the lowercase type name.
|
||||
if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Groups could only be defined in the same file they're used.
|
||||
if fd.Message().ParentFile() != fd.ParentFile() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Group messages are always defined in the same scope as the field. File
|
||||
// level extensions will compare NULL == NULL here, which is why the file
|
||||
// comparison above is necessary to ensure both come from the same file.
|
||||
if fd.IsExtension() {
|
||||
return fd.Parent() == fd.Message().Parent()
|
||||
}
|
||||
return fd.ContainingMessage() == fd.Message().Parent()
|
||||
}
|
||||
|
||||
func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
|
||||
s.once.Do(func() {
|
||||
if fd.IsExtension() {
|
||||
@@ -605,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
|
||||
|
||||
// Format the text name.
|
||||
s.nameText = string(fd.Name())
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
if isGroupLike(fd) {
|
||||
s.nameText = string(fd.Message().Name())
|
||||
}
|
||||
}
|
||||
|
||||
43
vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
generated
vendored
43
vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
generated
vendored
@@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) {
|
||||
switch string(v) {
|
||||
case "proto2":
|
||||
fd.L1.Syntax = protoreflect.Proto2
|
||||
fd.L1.Edition = EditionProto2
|
||||
case "proto3":
|
||||
fd.L1.Syntax = protoreflect.Proto3
|
||||
fd.L1.Edition = EditionProto3
|
||||
case "editions":
|
||||
fd.L1.Syntax = protoreflect.Editions
|
||||
default:
|
||||
@@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) {
|
||||
// If syntax is missing, it is assumed to be proto2.
|
||||
if fd.L1.Syntax == 0 {
|
||||
fd.L1.Syntax = protoreflect.Proto2
|
||||
fd.L1.Edition = EditionProto2
|
||||
}
|
||||
|
||||
if fd.L1.Syntax == protoreflect.Editions {
|
||||
fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
|
||||
}
|
||||
fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
|
||||
|
||||
// Parse editions features from options if any
|
||||
if options != nil {
|
||||
@@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
|
||||
ed.L0.ParentFile = pf
|
||||
ed.L0.Parent = pd
|
||||
ed.L0.Index = i
|
||||
ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent())
|
||||
|
||||
var numValues int
|
||||
for b := b; len(b) > 0; {
|
||||
@@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
|
||||
xd.L0.ParentFile = pf
|
||||
xd.L0.Parent = pd
|
||||
xd.L0.Index = i
|
||||
xd.L1.EditionFeatures = featuresFromParentDesc(pd)
|
||||
|
||||
for len(b) > 0 {
|
||||
num, typ, n := protowire.ConsumeTag(b)
|
||||
@@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
|
||||
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
|
||||
case genid.FieldDescriptorProto_Extendee_field_number:
|
||||
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
|
||||
case genid.FieldDescriptorProto_Options_field_number:
|
||||
xd.unmarshalOptions(v)
|
||||
}
|
||||
default:
|
||||
m := protowire.ConsumeFieldValue(num, typ, b)
|
||||
b = b[m:]
|
||||
}
|
||||
}
|
||||
|
||||
if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
|
||||
xd.L1.Kind = protoreflect.GroupKind
|
||||
}
|
||||
}
|
||||
|
||||
func (xd *Extension) unmarshalOptions(b []byte) {
|
||||
for len(b) > 0 {
|
||||
num, typ, n := protowire.ConsumeTag(b)
|
||||
b = b[n:]
|
||||
switch typ {
|
||||
case protowire.VarintType:
|
||||
v, m := protowire.ConsumeVarint(b)
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FieldOptions_Packed_field_number:
|
||||
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
|
||||
}
|
||||
case protowire.BytesType:
|
||||
v, m := protowire.ConsumeBytes(b)
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FieldOptions_Features_field_number:
|
||||
xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
|
||||
}
|
||||
default:
|
||||
m := protowire.ConsumeFieldValue(num, typ, b)
|
||||
@@ -499,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
|
||||
}
|
||||
|
||||
var nameBuilderPool = sync.Pool{
|
||||
New: func() interface{} { return new(strs.Builder) },
|
||||
New: func() any { return new(strs.Builder) },
|
||||
}
|
||||
|
||||
func getBuilder() *strs.Builder {
|
||||
|
||||
49
vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
generated
vendored
49
vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
generated
vendored
@@ -45,6 +45,11 @@ func (file *File) resolveMessages() {
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
|
||||
depIdx++
|
||||
if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
|
||||
// A map field might inherit delimited encoding from a file-wide default feature.
|
||||
// But maps never actually use delimited encoding. (At least for now...)
|
||||
fd.L1.Kind = protoreflect.MessageKind
|
||||
}
|
||||
}
|
||||
|
||||
// Default is resolved here since it depends on Enum being resolved.
|
||||
@@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
|
||||
b = b[m:]
|
||||
}
|
||||
}
|
||||
if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
|
||||
if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
|
||||
fd.L1.Kind = protoreflect.GroupKind
|
||||
}
|
||||
if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
|
||||
if fd.L1.EditionFeatures.IsLegacyRequired {
|
||||
fd.L1.Cardinality = protoreflect.Required
|
||||
}
|
||||
if rawTypeName != nil {
|
||||
@@ -496,13 +501,11 @@ func (fd *Field) unmarshalOptions(b []byte) {
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FieldOptions_Packed_field_number:
|
||||
fd.L1.HasPacked = true
|
||||
fd.L1.IsPacked = protowire.DecodeBool(v)
|
||||
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
|
||||
case genid.FieldOptions_Weak_field_number:
|
||||
fd.L1.IsWeak = protowire.DecodeBool(v)
|
||||
case FieldOptions_EnforceUTF8:
|
||||
fd.L1.HasEnforceUTF8 = true
|
||||
fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
|
||||
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
|
||||
}
|
||||
case protowire.BytesType:
|
||||
v, m := protowire.ConsumeBytes(b)
|
||||
@@ -548,7 +551,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
|
||||
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
|
||||
var rawTypeName []byte
|
||||
var rawOptions []byte
|
||||
xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
|
||||
xd.L2 = new(ExtensionL2)
|
||||
for len(b) > 0 {
|
||||
num, typ, n := protowire.ConsumeTag(b)
|
||||
@@ -572,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
|
||||
case genid.FieldDescriptorProto_TypeName_field_number:
|
||||
rawTypeName = v
|
||||
case genid.FieldDescriptorProto_Options_field_number:
|
||||
xd.unmarshalOptions(v)
|
||||
rawOptions = appendOptions(rawOptions, v)
|
||||
}
|
||||
default:
|
||||
@@ -580,12 +581,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
|
||||
b = b[m:]
|
||||
}
|
||||
}
|
||||
if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
|
||||
xd.L1.Kind = protoreflect.GroupKind
|
||||
}
|
||||
if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
|
||||
xd.L1.Cardinality = protoreflect.Required
|
||||
}
|
||||
if rawTypeName != nil {
|
||||
name := makeFullName(sb, rawTypeName)
|
||||
switch xd.L1.Kind {
|
||||
@@ -598,32 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
|
||||
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
|
||||
}
|
||||
|
||||
func (xd *Extension) unmarshalOptions(b []byte) {
|
||||
for len(b) > 0 {
|
||||
num, typ, n := protowire.ConsumeTag(b)
|
||||
b = b[n:]
|
||||
switch typ {
|
||||
case protowire.VarintType:
|
||||
v, m := protowire.ConsumeVarint(b)
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FieldOptions_Packed_field_number:
|
||||
xd.L2.IsPacked = protowire.DecodeBool(v)
|
||||
}
|
||||
case protowire.BytesType:
|
||||
v, m := protowire.ConsumeBytes(b)
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FieldOptions_Features_field_number:
|
||||
xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
|
||||
}
|
||||
default:
|
||||
m := protowire.ConsumeFieldValue(num, typ, b)
|
||||
b = b[m:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
|
||||
var rawMethods [][]byte
|
||||
var rawOptions []byte
|
||||
|
||||
11
vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
generated
vendored
11
vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
generated
vendored
@@ -8,6 +8,7 @@ package filedesc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/internal/descfmt"
|
||||
@@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields {
|
||||
if _, ok := p.byText[d.TextName()]; !ok {
|
||||
p.byText[d.TextName()] = d
|
||||
}
|
||||
if isGroupLike(d) {
|
||||
lowerJSONName := strings.ToLower(d.JSONName())
|
||||
if _, ok := p.byJSON[lowerJSONName]; !ok {
|
||||
p.byJSON[lowerJSONName] = d
|
||||
}
|
||||
lowerTextName := strings.ToLower(d.TextName())
|
||||
if _, ok := p.byText[lowerTextName]; !ok {
|
||||
p.byText[lowerTextName] = d
|
||||
}
|
||||
}
|
||||
if _, ok := p.byNum[d.Number()]; !ok {
|
||||
p.byNum[d.Number()] = d
|
||||
}
|
||||
|
||||
22
vendor/google.golang.org/protobuf/internal/filedesc/editions.go
generated
vendored
22
vendor/google.golang.org/protobuf/internal/filedesc/editions.go
generated
vendored
@@ -14,9 +14,13 @@ import (
|
||||
)
|
||||
|
||||
var defaultsCache = make(map[Edition]EditionFeatures)
|
||||
var defaultsKeys = []Edition{}
|
||||
|
||||
func init() {
|
||||
unmarshalEditionDefaults(editiondefaults.Defaults)
|
||||
SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2)
|
||||
SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3)
|
||||
SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023)
|
||||
}
|
||||
|
||||
func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
|
||||
@@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) {
|
||||
v, m := protowire.ConsumeBytes(b)
|
||||
b = b[m:]
|
||||
switch num {
|
||||
case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
|
||||
case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number:
|
||||
fs = unmarshalFeatureSet(v, fs)
|
||||
case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number:
|
||||
fs = unmarshalFeatureSet(v, fs)
|
||||
}
|
||||
}
|
||||
}
|
||||
defaultsCache[ed] = fs
|
||||
defaultsKeys = append(defaultsKeys, ed)
|
||||
}
|
||||
|
||||
func unmarshalEditionDefaults(b []byte) {
|
||||
@@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) {
|
||||
}
|
||||
|
||||
func getFeaturesFor(ed Edition) EditionFeatures {
|
||||
if def, ok := defaultsCache[ed]; ok {
|
||||
return def
|
||||
match := EditionUnknown
|
||||
for _, key := range defaultsKeys {
|
||||
if key > ed {
|
||||
break
|
||||
}
|
||||
match = key
|
||||
}
|
||||
panic(fmt.Sprintf("unsupported edition: %v", ed))
|
||||
if match == EditionUnknown {
|
||||
panic(fmt.Sprintf("unsupported edition: %v", ed))
|
||||
}
|
||||
return defaultsCache[match]
|
||||
}
|
||||
|
||||
1
vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
generated
vendored
1
vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
generated
vendored
@@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des
|
||||
func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
|
||||
func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
|
||||
func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
|
||||
func (e PlaceholderEnum) IsClosed() bool { return false }
|
||||
func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
|
||||
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
|
||||
|
||||
|
||||
4
vendor/google.golang.org/protobuf/internal/filetype/build.go
generated
vendored
4
vendor/google.golang.org/protobuf/internal/filetype/build.go
generated
vendored
@@ -68,7 +68,7 @@ type Builder struct {
|
||||
// and for input and output messages referenced by service methods.
|
||||
// Dependencies must come after declarations, but the ordering of
|
||||
// dependencies themselves is unspecified.
|
||||
GoTypes []interface{}
|
||||
GoTypes []any
|
||||
|
||||
// DependencyIndexes is an ordered list of indexes into GoTypes for the
|
||||
// dependencies of messages, extensions, or services.
|
||||
@@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 {
|
||||
|
||||
type (
|
||||
resolverByIndex struct {
|
||||
goTypes []interface{}
|
||||
goTypes []any
|
||||
depIdxs depIdxs
|
||||
fileRegistry
|
||||
}
|
||||
|
||||
49
vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
generated
vendored
49
vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
generated
vendored
@@ -21,6 +21,7 @@ const (
|
||||
// Enum values for google.protobuf.Edition.
|
||||
const (
|
||||
Edition_EDITION_UNKNOWN_enum_value = 0
|
||||
Edition_EDITION_LEGACY_enum_value = 900
|
||||
Edition_EDITION_PROTO2_enum_value = 998
|
||||
Edition_EDITION_PROTO3_enum_value = 999
|
||||
Edition_EDITION_2023_enum_value = 1000
|
||||
@@ -653,6 +654,7 @@ const (
|
||||
FieldOptions_Targets_field_name protoreflect.Name = "targets"
|
||||
FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
|
||||
FieldOptions_Features_field_name protoreflect.Name = "features"
|
||||
FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
|
||||
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
|
||||
|
||||
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
|
||||
@@ -667,6 +669,7 @@ const (
|
||||
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
|
||||
FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
|
||||
FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
|
||||
FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support"
|
||||
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
|
||||
)
|
||||
|
||||
@@ -684,6 +687,7 @@ const (
|
||||
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
|
||||
FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
|
||||
FieldOptions_Features_field_number protoreflect.FieldNumber = 21
|
||||
FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22
|
||||
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
|
||||
)
|
||||
|
||||
@@ -767,6 +771,33 @@ const (
|
||||
FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
|
||||
)
|
||||
|
||||
// Names for google.protobuf.FieldOptions.FeatureSupport.
|
||||
const (
|
||||
FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport"
|
||||
FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport"
|
||||
)
|
||||
|
||||
// Field names for google.protobuf.FieldOptions.FeatureSupport.
|
||||
const (
|
||||
FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced"
|
||||
FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated"
|
||||
FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning"
|
||||
FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed"
|
||||
|
||||
FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced"
|
||||
FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated"
|
||||
FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning"
|
||||
FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed"
|
||||
)
|
||||
|
||||
// Field numbers for google.protobuf.FieldOptions.FeatureSupport.
|
||||
const (
|
||||
FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1
|
||||
FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2
|
||||
FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3
|
||||
FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4
|
||||
)
|
||||
|
||||
// Names for google.protobuf.OneofOptions.
|
||||
const (
|
||||
OneofOptions_message_name protoreflect.Name = "OneofOptions"
|
||||
@@ -829,11 +860,13 @@ const (
|
||||
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
|
||||
EnumValueOptions_Features_field_name protoreflect.Name = "features"
|
||||
EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
|
||||
EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
|
||||
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
|
||||
|
||||
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
|
||||
EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
|
||||
EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
|
||||
EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
|
||||
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
|
||||
)
|
||||
|
||||
@@ -842,6 +875,7 @@ const (
|
||||
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
|
||||
EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
|
||||
EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
|
||||
EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4
|
||||
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
|
||||
)
|
||||
|
||||
@@ -1110,17 +1144,20 @@ const (
|
||||
|
||||
// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
|
||||
const (
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features"
|
||||
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features"
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features"
|
||||
)
|
||||
|
||||
// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
|
||||
const (
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4
|
||||
FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5
|
||||
)
|
||||
|
||||
// Names for google.protobuf.SourceCodeInfo.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user