From 7615153271fc9a4c0e3447f99fa208f7ce0be1be Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 31 Jan 2018 10:12:41 -0500 Subject: [PATCH] Update prometheus client and go-metrics Signed-off-by: Michael Crosby --- vendor.conf | 10 +- vendor/github.com/docker/go-metrics/README.md | 2 +- .../github.com/docker/go-metrics/namespace.go | 18 +- vendor/github.com/docker/go-metrics/timer.go | 29 +- .../prometheus/client_golang/README.md | 6 +- .../client_golang/prometheus/counter.go | 191 +++++-- .../client_golang/prometheus/desc.go | 39 +- .../client_golang/prometheus/doc.go | 67 +-- .../client_golang/prometheus/gauge.go | 204 ++++++- .../client_golang/prometheus/go_collector.go | 51 +- .../client_golang/prometheus/histogram.go | 147 +++-- .../client_golang/prometheus/http.go | 110 ++-- .../client_golang/prometheus/labels.go | 57 ++ .../client_golang/prometheus/metric.go | 20 +- .../client_golang/prometheus/observer.go | 52 ++ .../prometheus/process_collector.go | 104 ++-- .../client_golang/prometheus/registry.go | 357 ++++++------ .../client_golang/prometheus/summary.go | 173 ++++-- .../client_golang/prometheus/timer.go | 51 ++ .../client_golang/prometheus/untyped.go | 102 +--- .../client_golang/prometheus/value.go | 78 +-- .../client_golang/prometheus/vec.go | 513 ++++++++++-------- vendor/github.com/prometheus/common/README.md | 2 +- .../prometheus/common/expfmt/decode.go | 47 +- .../prometheus/common/expfmt/expfmt.go | 7 +- .../prometheus/common/expfmt/text_parse.go | 4 + .../prometheus/common/model/metric.go | 2 +- .../prometheus/common/model/time.go | 17 +- .../prometheus/common/model/value.go | 5 +- vendor/github.com/prometheus/procfs/README.md | 1 + .../github.com/prometheus/procfs/buddyinfo.go | 95 ++++ vendor/github.com/prometheus/procfs/fs.go | 36 ++ .../prometheus/procfs/internal/util/parse.go | 46 ++ vendor/github.com/prometheus/procfs/ipvs.go | 56 +- .../prometheus/procfs/mountstats.go | 134 ++--- .../github.com/prometheus/procfs/net_dev.go | 203 +++++++ .../github.com/prometheus/procfs/nfs/nfs.go | 263 +++++++++ .../github.com/prometheus/procfs/nfs/parse.go | 308 +++++++++++ .../prometheus/procfs/nfs/parse_nfs.go | 67 +++ .../prometheus/procfs/nfs/parse_nfsd.go | 89 +++ .../github.com/prometheus/procfs/proc_io.go | 5 +- .../prometheus/procfs/proc_limits.go | 38 +- .../github.com/prometheus/procfs/proc_ns.go | 55 ++ vendor/github.com/prometheus/procfs/stat.go | 193 ++++++- vendor/github.com/prometheus/procfs/xfrm.go | 187 +++++++ .../github.com/prometheus/procfs/xfs/parse.go | 330 +++++++++++ .../github.com/prometheus/procfs/xfs/xfs.go | 163 ++++++ 47 files changed, 3650 insertions(+), 1084 deletions(-) create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go diff --git a/vendor.conf b/vendor.conf index ecb6ca5fc..b2ca84d3d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -3,13 +3,13 @@ github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 -github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87 +github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f -github.com/prometheus/client_golang v0.8.0 -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb -github.com/prometheus/procfs fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60 +github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/docker/go-units v0.3.1 diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md index fdf7fb746..da6b16c20 100644 --- a/vendor/github.com/docker/go-metrics/README.md +++ b/vendor/github.com/docker/go-metrics/README.md @@ -70,7 +70,7 @@ Package documentation can be found [here](https://godoc.org/github.com/docker/go ## Additional Metrics -Additional metrics are also defined here that are not avaliable in the prometheus client. +Additional metrics are also defined here that are not available in the prometheus client. If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go index 27dab786d..7734c2945 100644 --- a/vendor/github.com/docker/go-metrics/namespace.go +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -66,7 +66,7 @@ func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { return prometheus.CounterOpts{ Namespace: n.name, Subsystem: n.subsystem, - Name: fmt.Sprintf("%s_%s", name, Total), + Name: makeName(name, Total), Help: help, ConstLabels: prometheus.Labels(n.labels), } @@ -92,7 +92,7 @@ func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { return prometheus.HistogramOpts{ Namespace: n.name, Subsystem: n.subsystem, - Name: fmt.Sprintf("%s_%s", name, Seconds), + Name: makeName(name, Seconds), Help: help, ConstLabels: prometheus.Labels(n.labels), } @@ -118,7 +118,7 @@ func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeO return prometheus.GaugeOpts{ Namespace: n.name, Subsystem: n.subsystem, - Name: fmt.Sprintf("%s_%s", name, unit), + Name: makeName(name, unit), Help: help, ConstLabels: prometheus.Labels(n.labels), } @@ -149,9 +149,7 @@ func (n *Namespace) Add(collector prometheus.Collector) { } func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - if string(unit) != "" { - name = fmt.Sprintf("%s_%s", name, unit) - } + name = makeName(name, unit) namespace := n.name if n.subsystem != "" { namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) @@ -173,3 +171,11 @@ func mergeLabels(lbs ...Labels) Labels { return merged } + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go index e91eca76b..824c98739 100644 --- a/vendor/github.com/docker/go-metrics/timer.go +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -28,15 +28,27 @@ type Timer interface { // LabeledTimer is a timer that must have label values populated before use. type LabeledTimer interface { - WithValues(labels ...string) Timer + WithValues(labels ...string) *labeledTimerObserver } type labeledTimer struct { m *prometheus.HistogramVec } -func (lt *labeledTimer) WithValues(labels ...string) Timer { - return &timer{m: lt.m.WithLabelValues(labels...)} +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} } func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { @@ -48,7 +60,7 @@ func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { } type timer struct { - m prometheus.Histogram + m prometheus.Observer } func (t *timer) Update(duration time.Duration) { @@ -60,9 +72,14 @@ func (t *timer) UpdateSince(since time.Time) { } func (t *timer) Describe(c chan<- *prometheus.Desc) { - t.m.Describe(c) + c <- t.m.(prometheus.Metric).Desc() } func (t *timer) Collect(c chan<- prometheus.Metric) { - t.m.Collect(c) + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) } diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md index 557eacf5a..0eb0df1df 100644 --- a/vendor/github.com/prometheus/client_golang/README.md +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -1,12 +1,15 @@ # Prometheus Go client library [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) This is the [Go](http://golang.org) client library for [Prometheus](http://prometheus.io). It has two separate parts, one for instrumenting application code, and one for creating clients that talk to the Prometheus HTTP API. +__This library requires Go1.7 or later.__ + ## Instrumenting applications [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) @@ -29,7 +32,8 @@ The [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) contains the client for the [Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you -to write Go applications that query time series data from a Prometheus server. +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. ## Where is `model`, `extraction`, and `text`? diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index ee37949ad..765e4550c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -15,6 +15,10 @@ package prometheus import ( "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" ) // Counter is a Metric that represents a single numerical value that only ever @@ -30,16 +34,8 @@ type Counter interface { Metric Collector - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - // - // Deprecated: Use NewConstMetric to create a counter for an external - // value. A Counter should never be set. - Set(float64) - // Inc increments the counter by 1. + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. @@ -50,6 +46,14 @@ type Counter interface { type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. func NewCounter(opts CounterOpts) Counter { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} result.init(result) // Init self-collection. return result } type counter struct { - value + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc } func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } - c.value.Add(v) + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) } // CounterVec is a Collector that bundles a set of Counters that all share the @@ -78,16 +120,12 @@ func (c *counter) Add(v float64) { // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. type CounterVec struct { - *MetricVec + *metricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { opts.ConstLabels, ) return &CounterVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Counter), err } @@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // CounterFunc is a Counter whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 77f4b30e8..4a755b0fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -16,33 +16,15 @@ package prometheus import ( "errors" "fmt" - "regexp" "sort" "strings" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to @@ -78,7 +60,7 @@ type Desc struct { // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 - // err is an error that occured during construction. It is reported on + // err is an error that occurred during construction. It is reported on // registration time. err error } @@ -91,8 +73,7 @@ type Desc struct { // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. +// specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, @@ -103,7 +84,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("empty help string") return d } - if !metricNameRE.MatchString(fqName) { + if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } @@ -127,6 +108,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. @@ -142,6 +129,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * d.err = errors.New("duplicate label names") return d } + vh := hashNew() for _, val := range labelValues { vh = hashAdd(vh, val) @@ -198,8 +186,3 @@ func (d *Desc) String() string { d.variableLabels, ) } - -func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index b15a2d3b9..36ef15567 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -17,7 +17,7 @@ // Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless -//specified otherwise. +// specified otherwise. // // A Basic Example // @@ -26,6 +26,7 @@ // package main // // import ( +// "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" @@ -59,7 +60,7 @@ // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":8080", nil) +// log.Fatal(http.ListenAndServe(":8080", nil)) // } // // @@ -69,7 +70,7 @@ // Metrics // // The number of exported identifiers in this package might appear a bit -// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their // vector versions for basic usage. // @@ -95,8 +96,8 @@ // SummaryVec, HistogramVec, and UntypedVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, -// HistogramOpts, or UntypedOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. // // Custom Collectors and constant Metrics // @@ -114,8 +115,8 @@ // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // NewConstSummary (and their respective Must… versions). That will happen in // the Collect method. The Describe method has to return separate Desc -// instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the @@ -129,34 +130,34 @@ // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might -// cause. As suggested by the name, MustRegister panics if an error occurs. With -// the Register function, the error is returned and can be handled. +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data -// model. Inconsistencies are ideally detected at registration time, not at -// collect time. The former will usually be detected at start-up time of a -// program, while the latter will only happen at scrape time, possibly not even -// on the first scrape if the inconsistency only becomes relevant later. That is -// the main reason why a Collector and a Metric have to describe themselves to -// the registry. +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. // // So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can be found in the global DefaultRegisterer variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in -// the same way on a custom registry as the global functions Register and -// Unregister on the default registry. +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. // -// There are a number of uses for custom registries: You can use registries -// with special properties, see NewPedanticRegistry. You can avoid global state, -// as it is imposed by the DefaultRegistry. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // -// Also note that the DefaultRegistry comes registered with a Collector for Go +// Also note that the DefaultRegisterer comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. @@ -166,16 +167,20 @@ // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp -// sub-package. (The top-level functions in the prometheus package are -// deprecated.) +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// // Other Means of Exposition // -// More ways of exposing metrics can easily be added. Sending metrics to -// Graphite would be an example that will soon be implemented. +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 8b70e5141..17c72d7eb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -13,6 +13,14 @@ package prometheus +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // @@ -27,29 +35,95 @@ type Gauge interface { // Set sets the Gauge to an arbitrary value. Set(float64) - // Inc increments the Gauge by 1. + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. Inc() - // Dec decrements the Gauge by 1. + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( + desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, - ), GaugeValue, 0) + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge { // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { - *MetricVec + *metricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { opts.ConstLabels, ) return &GaugeVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { return metric.(Gauge), err } @@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } // GaugeFunc is a Gauge whose value is determined at collect time by calling a diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index abc9d4ec4..096454af9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -8,8 +8,10 @@ import ( ) type goCollector struct { - goroutines Gauge - gcDesc *Desc + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc // metrics to describe and collect metrics memStatsMetrics @@ -19,15 +21,22 @@ type goCollector struct { // go process. func NewGoCollector() Collector { return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the GC invocation durations.", nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), metrics: memStatsMetrics{ { desc: NewDesc( @@ -48,7 +57,7 @@ func NewGoCollector() Collector { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", + "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, @@ -111,12 +120,12 @@ func NewGoCollector() Collector { valType: GaugeValue, }, { desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, + valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), @@ -213,6 +222,14 @@ func NewGoCollector() Collector { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, }, }, } @@ -224,9 +241,10 @@ func memstatNamespace(s string) string { // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() + ch <- c.goroutinesDesc + ch <- c.threadsDesc ch <- c.gcDesc - + ch <- c.goInfoDesc for _, i := range c.metrics { ch <- i.desc } @@ -234,8 +252,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -248,6 +267,8 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + ms := &runtime.MemStats{} runtime.ReadMemStats(ms) for _, i := range c.metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 9719e8fac..331783a75 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -126,23 +126,16 @@ type HistogramOpts struct { // string. Help string - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each @@ -287,12 +280,11 @@ func (h *histogram) Write(out *dto.Metric) error { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { - *MetricVec + *metricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -301,47 +293,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { opts.ConstLabels, ) return &HistogramVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Histogram), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h } -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constHistogram struct { @@ -401,8 +462,8 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constHistogram{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 67ee5ac79..bfee5c6eb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -62,7 +62,8 @@ func giveBuf(buf *bytes.Buffer) { // // Deprecated: Please note the issues described in the doc comment of // InstrumentHandler. You might want to consider using promhttp.Handler instead -// (which is non instrumented). +// (which is not instrumented, but can be instrumented with the tooling provided +// in package promhttp). func Handler() http.Handler { return InstrumentHandler("prometheus", UninstrumentedHandler()) } @@ -95,7 +96,7 @@ func UninstrumentedHandler() http.Handler { closer.Close() } if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) return } header := w.Header() @@ -158,7 +159,8 @@ func nowSeries(t ...time.Time) nower { // value. http_requests_total is a metric vector partitioned by HTTP method // (label name "method") and HTTP status code (label name "code"). // -// Deprecated: InstrumentHandler has several issues: +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: // // - It uses Summaries rather than Histograms. Summaries are not useful if // aggregation across multiple instances is required. @@ -172,9 +174,8 @@ func nowSeries(t ...time.Time) nower { // httputil.ReverseProxy is a prominent example for a handler // performing such writes. // -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Please prefer direct -// instrumentation in the meantime. +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) } @@ -184,12 +185,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun // issues). // // Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as -// InstrumentHandler is. +// InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { return InstrumentHandlerFuncWithOpts( SummaryOpts{ Subsystem: "http", ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, handlerFunc, ) @@ -222,7 +224,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri // SummaryOpts. // // Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as -// InstrumentHandler is. +// InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) } @@ -233,7 +235,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand // SummaryOpts are used. // // Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons -// as InstrumentHandler is. +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { reqCnt := NewCounterVec( CounterOpts{ @@ -245,34 +247,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo }, instLabels, ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } opts.Name = "request_duration_microseconds" opts.Help = "The HTTP request latencies in microseconds." reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } opts.Name = "request_size_bytes" opts.Help = "The HTTP request sizes in bytes." reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } opts.Name = "response_size_bytes" opts.Help = "The HTTP response sizes in bytes." resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) + out := computeApproximateRequestSize(r) _, cn := w.(http.CloseNotifier) _, fl := w.(http.Flusher) @@ -290,30 +310,44 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo method := sanitizeMethod(r.Method) code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) }) } -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } } - } - s += len(r.Host) + s += len(r.Host) - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out } type responseWriterDelegator struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 000000000..2502e3734 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index d4063d98f..6213ee812 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -79,20 +79,12 @@ type Opts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 000000000..5806cd09e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index e31e62e78..94b2553e1 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -19,10 +19,10 @@ type processCollector struct { pid int collectFn func(chan<- Metric) pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc } // NewProcessCollector returns a collector which exports the current state of @@ -44,40 +44,45 @@ func NewProcessCollectorPIDFn( pidFn func() (int, error), namespace string, ) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + c := processCollector{ pidFn: pidFn, collectFn: func(chan<- Metric) {}, - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), } // Set up process metric collection if supported by the runtime. @@ -90,12 +95,12 @@ func NewProcessCollectorPIDFn( // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime } // Collect returns the current state of all metrics of the collector. @@ -117,26 +122,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) } } if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) } if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 32a3986b0..73dde7beb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -18,8 +18,10 @@ import ( "errors" "fmt" "os" + "runtime" "sort" "sync" + "unicode/utf8" "github.com/golang/protobuf/proto" @@ -80,7 +82,7 @@ func NewPedanticRegistry() *Registry { // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather then the Registry type +// Registerer as type for registration purposes (rather than the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { @@ -152,38 +154,6 @@ func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } -// RegisterOrGet registers the provided Collector with the DefaultRegisterer and -// returns the Collector, unless an equal Collector was registered before, in -// which case that Collector is returned. -// -// Deprecated: RegisterOrGet is merely a convenience function for the -// implementation as described in the documentation for -// AlreadyRegisteredError. As the use case is relatively rare, this function -// will be removed in a future version of this package to clean up the -// namespace. -func RegisterOrGet(c Collector) (Collector, error) { - if err := Register(c); err != nil { - if are, ok := err.(AlreadyRegisteredError); ok { - return are.ExistingCollector, nil - } - return nil, err - } - return c, nil -} - -// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning -// an error. -// -// Deprecated: This is deprecated for the same reason RegisterOrGet is. See -// there for details. -func MustRegisterOrGet(c Collector) Collector { - c, err := RegisterOrGet(c) - if err != nil { - panic(err) - } - return c -} - // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // @@ -201,25 +171,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } -// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that -// gathers from the previous DefaultGatherers but then merges the MetricFamily -// protobufs returned from the provided hook function with the MetricFamily -// protobufs returned from the original DefaultGatherer. -// -// Deprecated: This function manipulates the DefaultGatherer variable. Consider -// the implications, i.e. don't do this concurrently with any uses of the -// DefaultGatherer. In the rare cases where you need to inject MetricFamily -// protobufs directly, it is recommended to use a custom Registry and combine it -// with a custom Gatherer using the Gatherers type (see -// there). SetMetricFamilyInjectionHook only exists for compatibility reasons -// with previous versions of this package. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - DefaultGatherer = Gatherers{ - DefaultGatherer, - GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), - } -} - // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails @@ -252,6 +203,13 @@ func (errs MultiError) Error() string { return buf.String() } +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way @@ -294,7 +252,7 @@ func (r *Registry) Register(c Collector) error { }() r.mtx.Lock() defer r.mtx.Unlock() - // Coduct various tests... + // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? @@ -418,22 +376,12 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { ) r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() + collectors := make(chan Collector, len(r.collectorsByID)) for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) + collectors <- collector } - // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { @@ -442,129 +390,176 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs[id] = struct{}{} } } - r.mtx.RUnlock() + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + // Drain metricChan in case of premature return. defer func() { - for _ = range metricChan { + for range metricChan { } }() - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - errs = append(errs, fmt.Errorf( - "error collecting metric %v: %s", desc, err, +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, )) - continue + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are aleady being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + } + break collectLoop + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { - if metricFamily.GetHelp() != desc.help { - errs = append(errs, fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - )) - continue - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - )) - continue - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - errs = append(errs, fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - )) - continue - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - errs = append(errs, fmt.Errorf( - "empty metric collected: %s", dtoMetric, - )) - continue - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { - errs = append(errs, err) - continue - } - if r.pedanticChecksEnabled { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - errs = append(errs, fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - )) - continue - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - errs = append(errs, err) - continue - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the @@ -683,7 +678,7 @@ func (s metricSorter) Less(i, j int) bool { return s[i].GetTimestampMs() < s[j].GetTimestampMs() } -// normalizeMetricFamilies returns a MetricFamily slice whith empty +// normalizeMetricFamilies returns a MetricFamily slice with empty // MetricFamilies pruned and the remaining MetricFamilies sorted by name within // the slice, with the contained Metrics sorted within each MetricFamily. func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { @@ -706,7 +701,7 @@ func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) // checkMetricConsistency checks if the provided Metric is consistent with the // provided MetricFamily. It also hashed the Metric labels and the MetricFamily -// name. If the resulting hash is alread in the provided metricHashes, an error +// name. If the resulting hash is already in the provided metricHashes, an error // is returned. If not, it is added to metricHashes. The provided dimHashes maps // MetricFamily names to their dimHash (hashed sorted label names). If dimHashes // doesn't yet contain a hash for the provided MetricFamily, it is @@ -730,6 +725,12 @@ func checkMetricConsistency( ) } + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + // Is the metric unique (i.e. no other metric with the same name and the same label values)? h := hashNew() h = hashAdd(h, metricFamily.GetName()) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index bce05bf9a..f7dc85b96 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -36,7 +36,10 @@ const quantileLabel = "quantile" // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you @@ -54,6 +57,9 @@ type Summary interface { } // DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. var ( DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} @@ -75,8 +81,10 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name and Help to a non-empty string. While all other fields +// are optional and can safely be left at their zero value, it is recommended to +// explicitly set the Objectives field to the desired value as the default value +// will change in the upcoming v0.10 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -93,29 +101,28 @@ type SummaryOpts struct { // string. Help string - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant @@ -183,7 +190,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } } - if len(opts.Objectives) == 0 { + if opts.Objectives == nil { opts.Objectives = DefObjectives } @@ -390,12 +397,11 @@ func (s quantSort) Less(i, j int) bool { // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { - *MetricVec + *metricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. +// partitioned by the given label names. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), @@ -404,47 +410,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { opts.ConstLabels, ) return &SummaryVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + metricVec: newMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) if metric != nil { - return metric.(Summary), err + return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s } // With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec } type constSummary struct { @@ -505,8 +580,8 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constSummary{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 000000000..b8fc5f18c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go index 5faf7e6e3..0f9ce63f4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -13,108 +13,12 @@ package prometheus -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - *MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }), - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index a944c3775..543b57c27 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,11 +14,8 @@ package prometheus import ( - "errors" "fmt" - "math" "sort" - "sync/atomic" dto "github.com/prometheus/client_model/go" @@ -36,77 +33,6 @@ const ( UntypedValue ) -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -153,8 +79,8 @@ func (v *valueFunc) Write(out *dto.Metric) error { // the Collect method. NewConstMetric returns an error if the length of // labelValues is not consistent with the variable labels in Desc. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err } return &constMetric{ desc: desc, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7f3eef9a4..cea158249 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,33 +20,180 @@ import ( "github.com/prometheus/common/model" ) -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64][]metricWithLabelValues - desc *Desc +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap - newMetric func(labelValues ...string) Metric - hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 hashAddByte func(h uint64, b byte) uint64 } -// newMetricVec returns an initialized MetricVec. The concrete value is -// returned for embedding into another struct. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - children: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, hashAdd: hashAdd, hashAddByte: hashAddByte, } } +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + // metricWithLabelValues provides the metric and its label values for // disambiguation on hash collision. type metricWithLabelValues struct { @@ -54,166 +201,72 @@ type metricWithLabelValues struct { metric Metric } -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { ch <- m.desc } // Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { +func (m *metricMap) Collect(ch chan<- Metric) { m.mtx.RLock() defer m.mtx.RUnlock() - for _, metrics := range m.children { + for _, metrics := range m.metrics { for _, metric := range metrics { ch <- metric.metric } } } -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabelValues(h, lvs), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.getOrCreateMetricWithLabels(h, labels), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { m.mtx.Lock() defer m.mtx.Unlock() - h, err := m.hashLabelValues(lvs) - if err != nil { - return false + for h := range m.metrics { + delete(m.metrics, h) } - return m.deleteByHashWithLabelValues(h, lvs) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.deleteByHashWithLabels(h, labels) } // deleteByHashWithLabelValues removes the metric from the hash bucket h. If // there are multiple matches in the bucket, use lvs to select a metric and // remove only that metric. -func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabelValues(metrics, lvs) + i := findMetricWithLabelValues(metrics, lvs, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } @@ -221,69 +274,35 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { // deleteByHashWithLabels removes the metric from the hash bucket h. If there // are multiple matches in the bucket, use lvs to select a metric and remove // only that metric. -func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { - metrics, ok := m.children[h] +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + metrics, ok := m.metrics[h] if !ok { return false } - i := m.findMetricWithLabels(metrics, labels) + i := findMetricWithLabels(m.desc, metrics, labels, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { - m.children[h] = append(metrics[:i], metrics[i+1:]...) + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) } else { - delete(m.children, h) + delete(m.metrics, h) } return true } -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabelValues(hash, lvs) + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) m.mtx.RUnlock() if ok { return metric @@ -291,13 +310,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabelValues(hash, lvs) + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) if !ok { - // Copy to avoid allocation in case wo don't go down this code path. - copiedLVs := make([]string, len(lvs)) - copy(copiedLVs, lvs) - metric = m.newMetric(copiedLVs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) } return metric } @@ -306,9 +323,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) // or creates it and returns the new one. // // This function holds the mutex. -func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { m.mtx.RLock() - metric, ok := m.getMetricWithLabels(hash, labels) + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) m.mtx.RUnlock() if ok { return metric @@ -316,33 +335,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr m.mtx.Lock() defer m.mtx.Unlock() - metric, ok = m.getMetricWithLabels(hash, labels) + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) if !ok { - lvs := m.extractLabelValues(labels) + lvs := extractLabelValues(m.desc, labels, curry) metric = m.newMetric(lvs...) - m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) } return metric } -// getMetricWithLabelValues gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { - metrics, ok := m.children[h] +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { return metrics[i].metric, true } } return nil, false } -// getMetricWithLabels gets a metric while handling possible collisions in +// getMetricWithHashAndLabels gets a metric while handling possible collisions in // the hash space. Must be called while holding read mutex. -func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { - metrics, ok := m.children[h] +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] if ok { - if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { return metrics[i].metric, true } } @@ -351,9 +374,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) // findMetricWithLabelValues returns the index of the matching metric or // len(metrics) if not found. -func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabelValues(metric.values, lvs) { + if matchLabelValues(metric.values, lvs, curry) { return i } } @@ -362,32 +387,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l // findMetricWithLabels returns the index of the matching metric or len(metrics) // if not found. -func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { for i, metric := range metrics { - if m.matchLabels(metric.values, labels) { + if matchLabels(desc, metric.values, labels, curry) { return i } } return len(metrics) } -func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { - if len(values) != len(lvs) { +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { return false } + var iLVs, iCurry int for i, v := range values { - if v != lvs[i] { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { return false } + iLVs++ } return true } -func (m *MetricVec) matchLabels(values []string, labels Labels) bool { - if len(labels) != len(values) { +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { return false } - for i, k := range m.desc.variableLabels { + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } if values[i] != labels[k] { return false } @@ -395,10 +439,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool { return true } -func (m *MetricVec) extractLabelValues(labels Labels) []string { - labelValues := make([]string, len(labels)) - for i, k := range m.desc.variableLabels { +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } labelValues[i] = labels[k] } return labelValues } + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md index 98f6ce24b..11a584945 100644 --- a/vendor/github.com/prometheus/common/README.md +++ b/vendor/github.com/prometheus/common/README.md @@ -6,7 +6,7 @@ components and libraries. * **config**: Common configuration structures * **expfmt**: Decoding and encoding for the exposition format -* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus) +* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) * **model**: Shared data structures * **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` * **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 487fdc6cc..a7a42d5ef 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -31,6 +31,7 @@ type Decoder interface { Decode(*dto.MetricFamily) error } +// DecodeOptions contains options used by the Decoder and in sample extraction. type DecodeOptions struct { // Timestamp is added to each value from the stream that has no explicit timestamp set. Timestamp model.Time @@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error { return nil } +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. type SampleDecoder struct { Dec Decoder Opts *DecodeOptions @@ -149,37 +152,51 @@ type SampleDecoder struct { f dto.MetricFamily } +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. func (sd *SampleDecoder) Decode(s *model.Vector) error { - if err := sd.Dec.Decode(&sd.f); err != nil { + err := sd.Dec.Decode(&sd.f) + if err != nil { return err } - *s = extractSamples(&sd.f, sd.Opts) - return nil + *s, err = extractSamples(&sd.f, sd.Opts) + return err } -// Extract samples builds a slice of samples from the provided metric families. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { - var all model.Vector +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) for _, f := range fams { - all = append(all, extractSamples(f, o)...) + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) } - return all + return all, lastErr } -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { switch f.GetType() { case dto.MetricType_COUNTER: - return extractCounter(o, f) + return extractCounter(o, f), nil case dto.MetricType_GAUGE: - return extractGauge(o, f) + return extractGauge(o, f), nil case dto.MetricType_SUMMARY: - return extractSummary(o, f) + return extractSummary(o, f), nil case dto.MetricType_UNTYPED: - return extractUntyped(o, f) + return extractUntyped(o, f), nil case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f) + return extractHistogram(o, f), nil } - panic("expfmt.extractSamples: unknown metric family type") + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) } func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index fae10f6eb..371ac7503 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -11,14 +11,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// A package for reading and writing Prometheus metrics. +// Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +// Format specifies the HTTP content type of the different wire protocols. type Format string +// Constants to assemble the Content-Type values for the different wire protocols. const ( - TextVersion = "0.0.4" - + TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ef9a15077..54bcfde29 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn { if p.readTokenAsLabelValue(); p.err != nil { return nil } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } p.currentLabelPair.Value = proto.String(p.currentToken.String()) // Special treatment of summaries: // - Quantile labels are special, will result in dto.Quantile later. diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 9dff899cb..f7250909b 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -44,7 +44,7 @@ func (m Metric) Before(o Metric) bool { // Clone returns a copy of the Metric. func (m Metric) Clone() Metric { - clone := Metric{} + clone := make(Metric, len(m)) for k, v := range m { clone[k] = v } diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 548968aeb..74ed5a9f7 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") -// StringToDuration parses a string into a time.Duration, assuming that a year +// ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { matches := durationRE.FindStringSubmatch(durationStr) @@ -202,6 +214,9 @@ func (d Duration) String() string { ms = int64(time.Duration(d) / time.Millisecond) unit = "ms" ) + if ms == 0 { + return "0s" + } factors := map[string]int64{ "y": 1000 * 60 * 60 * 24 * 365, "w": 1000 * 60 * 60 * 24 * 7, diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 7728abaee..c9ed3ffd8 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -129,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - if s.Value.Equal(o.Value) { - return false - } - return true + return s.Value.Equal(o.Value) } func (s Sample) String() string { diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 6e7ee6b8b..209549471 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 49aaab050..36c1586a1 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -4,6 +4,9 @@ import ( "fmt" "os" "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" ) // FS represents the pseudo-filesystem proc, which provides an interface to @@ -31,3 +34,36 @@ func NewFS(mountPoint string) (FS, error) { func (fs FS) Path(p ...string) string { return path.Join(append([]string{string(fs)}, p...)...) } + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSdClientRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..1ad21c91a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index e7012f732..5761b4570 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -31,14 +31,16 @@ type IPVSStats struct { type IPVSBackendStatus struct { // The local (virtual) IP address. LocalAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The transport protocol (TCP, UDP). - Proto string // The remote (real) IP address. RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 // The remote (real) port. RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string // The current number of active connections for this virtual/real address pair. ActiveConn uint64 // The current number of inactive connections for this virtual/real address pair. @@ -142,13 +144,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status []IPVSBackendStatus scanner = bufio.NewScanner(file) proto string + localMark string localAddress net.IP localPort uint16 err error ) for scanner.Scan() { - fields := strings.Fields(string(scanner.Text())) + fields := strings.Fields(scanner.Text()) if len(fields) == 0 { continue } @@ -160,10 +163,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { continue } proto = fields[0] + localMark = "" localAddress, localPort, err = parseIPPort(fields[1]) if err != nil { return nil, err } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 case fields[0] == "->": if len(fields) < 6 { continue @@ -187,6 +199,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { status = append(status, IPVSBackendStatus{ LocalAddress: localAddress, LocalPort: localPort, + LocalMark: localMark, RemoteAddress: remoteAddress, RemotePort: remotePort, Proto: proto, @@ -200,22 +213,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { } func parseIPPort(s string) (net.IP, uint16, error) { - tmp := strings.SplitN(s, ":", 2) + var ( + ip net.IP + err error + ) - if len(tmp) != 2 { - return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) } - if len(tmp[0]) != 8 && len(tmp[0]) != 32 { - return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) } - - ip, err := hex.DecodeString(tmp[0]) - if err != nil { - return nil, 0, err - } - - port, err := strconv.ParseUint(tmp[1], 16, 16) + port, err := strconv.ParseUint(portString, 16, 16) if err != nil { return nil, 0, err } diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 3b7d3ec84..6b2b0ba9d 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -72,80 +72,80 @@ func (m MountStatsNFS) mountStats() {} // by an NFS client to and from an NFS server. type NFSBytesStats struct { // Number of bytes read using the read() syscall. - Read int + Read uint64 // Number of bytes written using the write() syscall. - Write int + Write uint64 // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead int + DirectRead uint64 // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite int + DirectWrite uint64 // Number of bytes read from the NFS server, in total. - ReadTotal int + ReadTotal uint64 // Number of bytes written to the NFS server, in total. - WriteTotal int + WriteTotal uint64 // Number of pages read directly via mmap()'d files. - ReadPages int + ReadPages uint64 // Number of pages written directly via mmap()'d files. - WritePages int + WritePages uint64 } // A NFSEventsStats contains statistics about NFS event occurrences. type NFSEventsStats struct { // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate int + InodeRevalidate uint64 // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate int + DnodeRevalidate uint64 // Number of times an inode cache is cleared. - DataInvalidate int + DataInvalidate uint64 // Number of times cached inode attributes are invalidated. - AttributeInvalidate int + AttributeInvalidate uint64 // Number of times files or directories have been open()'d. - VFSOpen int + VFSOpen uint64 // Number of times a directory lookup has occurred. - VFSLookup int + VFSLookup uint64 // Number of times permissions have been checked. - VFSAccess int + VFSAccess uint64 // Number of updates (and potential writes) to pages. - VFSUpdatePage int + VFSUpdatePage uint64 // Number of pages read directly via mmap()'d files. - VFSReadPage int + VFSReadPage uint64 // Number of times a group of pages have been read. - VFSReadPages int + VFSReadPages uint64 // Number of pages written directly via mmap()'d files. - VFSWritePage int + VFSWritePage uint64 // Number of times a group of pages have been written. - VFSWritePages int + VFSWritePages uint64 // Number of times directory entries have been read with getdents(). - VFSGetdents int + VFSGetdents uint64 // Number of times attributes have been set on inodes. - VFSSetattr int + VFSSetattr uint64 // Number of pending writes that have been forcefully flushed to the server. - VFSFlush int + VFSFlush uint64 // Number of times fsync() has been called on directories and files. - VFSFsync int - // Number of times locking has been attemped on a file. - VFSLock int + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 // Number of times files have been closed and released. - VFSFileRelease int + VFSFileRelease uint64 // Unknown. Possibly unused. - CongestionWait int + CongestionWait uint64 // Number of times files have been truncated. - Truncation int + Truncation uint64 // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension int + WriteExtension uint64 // Number of times a file was removed while still open by another process. - SillyRename int + SillyRename uint64 // Number of times the NFS server gave less data than expected while reading. - ShortRead int + ShortRead uint64 // Number of times the NFS server wrote less data than expected while writing. - ShortWrite int + ShortWrite uint64 // Number of times the NFS server indicated EJUKEBOX; retrieving data from // offline storage. - JukeboxDelay int + JukeboxDelay uint64 // Number of NFS v4.1+ pNFS reads. - PNFSRead int + PNFSRead uint64 // Number of NFS v4.1+ pNFS writes. - PNFSWrite int + PNFSWrite uint64 } // A NFSOperationStats contains statistics for a single operation. @@ -153,15 +153,15 @@ type NFSOperationStats struct { // The name of the operation. Operation string // Number of requests performed for this operation. - Requests int + Requests uint64 // Number of times an actual RPC request has been transmitted for this operation. - Transmissions int + Transmissions uint64 // Number of times a request has had a major timeout. - MajorTimeouts int + MajorTimeouts uint64 // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent int + BytesSent uint64 // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived int + BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. CumulativeQueueTime time.Duration // Duration it took to get a reply back after the request was transmitted. @@ -174,41 +174,41 @@ type NFSOperationStats struct { // responses. type NFSTransportStats struct { // The local port used for the NFS mount. - Port int + Port uint64 // Number of times the client has had to establish a connection from scratch // to the NFS server. - Bind int + Bind uint64 // Number of times the client has made a TCP connection to the NFS server. - Connect int + Connect uint64 // Duration (in jiffies, a kernel internal unit of time) the NFS mount has // spent waiting for connections to the server to be established. - ConnectIdleTime int + ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. IdleTime time.Duration // Number of RPC requests for this mount sent to the NFS server. - Sends int + Sends uint64 // Number of RPC responses for this mount received from the NFS server. - Receives int + Receives uint64 // Number of times the NFS server sent a response with a transaction ID // unknown to this client. - BadTransactionIDs int + BadTransactionIDs uint64 // A running counter, incremented on each request as the current difference // ebetween sends and receives. - CumulativeActiveRequests int + CumulativeActiveRequests uint64 // A running counter, incremented on each request by the current backlog // queue size. - CumulativeBacklog int + CumulativeBacklog uint64 // Stats below only available with stat version 1.1. // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed int + MaximumRPCSlotsUsed uint64 // A running counter, incremented on each request as the current size of the // sending queue. - CumulativeSendingQueue int + CumulativeSendingQueue uint64 // A running counter, incremented on each request as the current size of the // pending queue. - CumulativePendingQueue int + CumulativePendingQueue uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -356,7 +356,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } // When encountering "per-operation statistics", we must break this - // loop and parse them seperately to ensure we can terminate parsing + // loop and parse them separately to ensure we can terminate parsing // before reaching another device entry; hence why this 'if' statement // is not just another switch case if ss[0] == fieldPerOpStats { @@ -386,9 +386,9 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) } - ns := make([]int, 0, fieldBytesLen) + ns := make([]uint64, 0, fieldBytesLen) for _, s := range ss { - n, err := strconv.Atoi(s) + n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } @@ -415,9 +415,9 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { return nil, fmt.Errorf("invalid NFS events stats: %v", ss) } - ns := make([]int, 0, fieldEventsLen) + ns := make([]uint64, 0, fieldEventsLen) for _, s := range ss { - n, err := strconv.Atoi(s) + n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } @@ -480,9 +480,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } // Skip string operation name for integers - ns := make([]int, 0, numFields-1) + ns := make([]uint64, 0, numFields-1) for _, st := range ss[1:] { - n, err := strconv.Atoi(st) + n, err := strconv.ParseUint(st, 10, 64) if err != nil { return nil, err } @@ -523,15 +523,19 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response - ns := make([]int, 0, fieldTransport11Len) - for _, s := range ss { - n, err := strconv.Atoi(s) + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } - ns = append(ns, n) + ns[i] = n } return &NFSTransportStats{ diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..f8c184efe --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,203 @@ +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma seperated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 000000000..e2185b782 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientId uint64 + SetClientIdConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetAcl uint64 + SetAcl uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeId uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateId uint64 + FreeStateId uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientId uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// RPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 000000000..3aa32563a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,308 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 59 { + return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientId: v[13], + SetClientIdConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetAcl: v[33], + SetAcl: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeId: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateId: v[50], + FreeStateId: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientId: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..b5c0b15f3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..57bb4a358 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index b4e31d7ba..e7f6674d0 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -47,9 +47,6 @@ func (p Proc) NewIO() (ProcIO, error) { _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - return pio, nil + return pio, err } diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 2df997ce1..b684a5b55 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -13,46 +13,46 @@ import ( // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. - CPUTime int + CPUTime int64 // Maximum size of files that the process may create. - FileSize int + FileSize int64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). - DataSize int + DataSize int64 // Maximum size of the process stack in bytes. - StackSize int + StackSize int64 // Maximum size of a core file. - CoreFileSize int + CoreFileSize int64 // Limit of the process's resident set in pages. - ResidentSet int + ResidentSet int64 // Maximum number of processes that can be created for the real user ID of // the calling process. - Processes int + Processes int64 // Value one greater than the maximum file descriptor number that can be // opened by this process. - OpenFiles int + OpenFiles int64 // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int + LockedMemory int64 // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int + AddressSpace int64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. - FileLocks int + FileLocks int64 // Limit of signals that may be queued for the real user ID of the calling // process. - PendingSignals int + PendingSignals int64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. - MsqqueueSize int + MsqqueueSize int64 // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int + NicePriority int64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). - RealtimePriority int + RealtimePriority int64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. - RealtimeTimeout int + RealtimeTimeout int64 } const ( @@ -125,13 +125,13 @@ func (p Proc) NewLimits() (ProcLimits, error) { return l, s.Err() } -func parseInt(s string) (int, error) { +func parseInt(s string) (int64, error) { if s == limitsUnlimited { return -1, nil } - i, err := strconv.ParseInt(s, 10, 32) + i, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) } - return int(i), nil + return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..befdd2690 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 1ca217e8c..701f4df64 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -3,15 +3,66 @@ package procfs import ( "bufio" "fmt" + "io" "os" "strconv" "strings" ) +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + // Stat represents kernel/system statistics. type Stat struct { // Boot time in seconds since the Epoch. - BootTime int64 + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat } // NewStat returns kernel/system statistics read from /proc/stat. @@ -24,33 +75,145 @@ func NewStat() (Stat, error) { return fs.NewStat() } +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + // NewStat returns an information about current kernel/system statistics. func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + f, err := os.Open(fs.Path("stat")) if err != nil { return Stat{}, err } defer f.Close() - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { continue } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil } - if err := s.Err(); err != nil { + + if err := scanner.Err(); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) } - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) + return stat, nil } diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..ffe9df50d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..2bc0ef342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +}