Merge branch 'master' into 44461-gophercloud-bump
This commit is contained in:
1
vendor/github.com/blang/semver/BUILD
generated
vendored
1
vendor/github.com/blang/semver/BUILD
generated
vendored
@@ -11,6 +11,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"json.go",
|
||||
"range.go",
|
||||
"semver.go",
|
||||
"sort.go",
|
||||
"sql.go",
|
||||
|
77
vendor/github.com/blang/semver/README.md
generated
vendored
77
vendor/github.com/blang/semver/README.md
generated
vendored
@@ -40,10 +40,52 @@ Features
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
Ranges
|
||||
------
|
||||
|
||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
||||
|
||||
A condition is composed of an operator and a version. The supported operators are:
|
||||
|
||||
- `<1.0.0` Less than `1.0.0`
|
||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
||||
- `>1.0.0` Greater than `1.0.0`
|
||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
||||
|
||||
A `Range` can link multiple `Ranges` separated by space:
|
||||
|
||||
Ranges can be linked by logical AND:
|
||||
|
||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
||||
|
||||
Ranges can also be linked by logical OR:
|
||||
|
||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
||||
|
||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
|
||||
Ranges can be combined by both AND and OR
|
||||
|
||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
|
||||
Range usage:
|
||||
|
||||
```
|
||||
v, err := semver.Parse("1.2.3")
|
||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
||||
if range(v) {
|
||||
//valid
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example
|
||||
-----
|
||||
@@ -103,23 +145,30 @@ if err != nil {
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
|
||||
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
|
||||
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
|
||||
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
|
||||
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
|
||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"author": "blang",
|
||||
"bugs": {
|
||||
"URL": "https://github.com/blang/semver/issues",
|
||||
"url": "https://github.com/blang/semver/issues"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/blang/semver"
|
||||
},
|
||||
"gxVersion": "0.10.0",
|
||||
"language": "go",
|
||||
"license": "MIT",
|
||||
"name": "semver",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "3.4.0"
|
||||
}
|
||||
|
416
vendor/github.com/blang/semver/range.go
generated
vendored
Normal file
416
vendor/github.com/blang/semver/range.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type wildcardType int
|
||||
|
||||
const (
|
||||
noneWildcard wildcardType = iota
|
||||
majorWildcard wildcardType = 1
|
||||
minorWildcard wildcardType = 2
|
||||
patchWildcard wildcardType = 3
|
||||
)
|
||||
|
||||
func wildcardTypefromInt(i int) wildcardType {
|
||||
switch i {
|
||||
case 1:
|
||||
return majorWildcard
|
||||
case 2:
|
||||
return minorWildcard
|
||||
case 3:
|
||||
return patchWildcard
|
||||
default:
|
||||
return noneWildcard
|
||||
}
|
||||
}
|
||||
|
||||
type comparator func(Version, Version) bool
|
||||
|
||||
var (
|
||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 0
|
||||
}
|
||||
compNE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) != 0
|
||||
}
|
||||
compGT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 1
|
||||
}
|
||||
compGE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) >= 0
|
||||
}
|
||||
compLT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == -1
|
||||
}
|
||||
compLE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) <= 0
|
||||
}
|
||||
)
|
||||
|
||||
type versionRange struct {
|
||||
v Version
|
||||
c comparator
|
||||
}
|
||||
|
||||
// rangeFunc creates a Range from the given versionRange.
|
||||
func (vr *versionRange) rangeFunc() Range {
|
||||
return Range(func(v Version) bool {
|
||||
return vr.c(v, vr.v)
|
||||
})
|
||||
}
|
||||
|
||||
// Range represents a range of versions.
|
||||
// A Range can be used to check if a Version satisfies it:
|
||||
//
|
||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
||||
// range(semver.MustParse("1.1.1") // returns true
|
||||
type Range func(Version) bool
|
||||
|
||||
// OR combines the existing Range with another Range using logical OR.
|
||||
func (rf Range) OR(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) || f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AND combines the existing Range with another Range using logical AND.
|
||||
func (rf Range) AND(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) && f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseRange parses a range and returns a Range.
|
||||
// If the range could not be parsed an error is returned.
|
||||
//
|
||||
// Valid ranges are:
|
||||
// - "<1.0.0"
|
||||
// - "<=1.0.0"
|
||||
// - ">1.0.0"
|
||||
// - ">=1.0.0"
|
||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
||||
// - "!1.0.0", "!=1.0.0"
|
||||
//
|
||||
// A Range can consist of multiple ranges separated by space:
|
||||
// Ranges can be linked by logical AND:
|
||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
||||
//
|
||||
// Ranges can also be linked by logical OR:
|
||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
||||
//
|
||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
//
|
||||
// Ranges can be combined by both AND and OR
|
||||
//
|
||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
func ParseRange(s string) (Range, error) {
|
||||
parts := splitAndTrim(s)
|
||||
orParts, err := splitORParts(parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expandedParts, err := expandWildcardVersion(orParts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var orFn Range
|
||||
for _, p := range expandedParts {
|
||||
var andFn Range
|
||||
for _, ap := range p {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vr, err := buildVersionRange(opStr, vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
||||
}
|
||||
rf := vr.rangeFunc()
|
||||
|
||||
// Set function
|
||||
if andFn == nil {
|
||||
andFn = rf
|
||||
} else { // Combine with existing function
|
||||
andFn = andFn.AND(rf)
|
||||
}
|
||||
}
|
||||
if orFn == nil {
|
||||
orFn = andFn
|
||||
} else {
|
||||
orFn = orFn.OR(andFn)
|
||||
}
|
||||
|
||||
}
|
||||
return orFn, nil
|
||||
}
|
||||
|
||||
// splitORParts splits the already cleaned parts by '||'.
|
||||
// Checks for invalid positions of the operator and returns an
|
||||
// error if found.
|
||||
func splitORParts(parts []string) ([][]string, error) {
|
||||
var ORparts [][]string
|
||||
last := 0
|
||||
for i, p := range parts {
|
||||
if p == "||" {
|
||||
if i == 0 {
|
||||
return nil, fmt.Errorf("First element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:i])
|
||||
last = i + 1
|
||||
}
|
||||
}
|
||||
if last == len(parts) {
|
||||
return nil, fmt.Errorf("Last element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:])
|
||||
return ORparts, nil
|
||||
}
|
||||
|
||||
// buildVersionRange takes a slice of 2: operator and version
|
||||
// and builds a versionRange, otherwise an error.
|
||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
||||
c := parseComparator(opStr)
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
||||
}
|
||||
v, err := Parse(vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
||||
}
|
||||
|
||||
return &versionRange{
|
||||
v: v,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// inArray checks if a byte is contained in an array of bytes
|
||||
func inArray(s byte, list []byte) bool {
|
||||
for _, el := range list {
|
||||
if el == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// splitAndTrim splits a range string by spaces and cleans whitespaces
|
||||
func splitAndTrim(s string) (result []string) {
|
||||
last := 0
|
||||
var lastChar byte
|
||||
excludeFromSplit := []byte{'>', '<', '='}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
|
||||
if last < i-1 {
|
||||
result = append(result, s[last:i])
|
||||
}
|
||||
last = i + 1
|
||||
} else if s[i] != ' ' {
|
||||
lastChar = s[i]
|
||||
}
|
||||
}
|
||||
if last < len(s)-1 {
|
||||
result = append(result, s[last:])
|
||||
}
|
||||
|
||||
for i, v := range result {
|
||||
result[i] = strings.Replace(v, " ", "", -1)
|
||||
}
|
||||
|
||||
// parts := strings.Split(s, " ")
|
||||
// for _, x := range parts {
|
||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
||||
// result = append(result, s)
|
||||
// }
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
// splitComparatorVersion splits the comparator from the version.
|
||||
// Input must be free of leading or trailing spaces.
|
||||
func splitComparatorVersion(s string) (string, string, error) {
|
||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
||||
if i == -1 {
|
||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
||||
}
|
||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
||||
}
|
||||
|
||||
// getWildcardType will return the type of wildcard that the
|
||||
// passed version contains
|
||||
func getWildcardType(vStr string) wildcardType {
|
||||
parts := strings.Split(vStr, ".")
|
||||
nparts := len(parts)
|
||||
wildcard := parts[nparts-1]
|
||||
|
||||
possibleWildcardType := wildcardTypefromInt(nparts)
|
||||
if wildcard == "x" {
|
||||
return possibleWildcardType
|
||||
}
|
||||
|
||||
return noneWildcard
|
||||
}
|
||||
|
||||
// createVersionFromWildcard will convert a wildcard version
|
||||
// into a regular version, replacing 'x's with '0's, handling
|
||||
// special cases like '1.x.x' and '1.x'
|
||||
func createVersionFromWildcard(vStr string) string {
|
||||
// handle 1.x.x
|
||||
vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
|
||||
vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
|
||||
parts := strings.Split(vStr2, ".")
|
||||
|
||||
// handle 1.x
|
||||
if len(parts) == 2 {
|
||||
return vStr2 + ".0"
|
||||
}
|
||||
|
||||
return vStr2
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the major version
|
||||
// of the passed version
|
||||
func incrementMajorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[0] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the minor version
|
||||
// of the passed version
|
||||
func incrementMinorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[1] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// expandWildcardVersion will expand wildcards inside versions
|
||||
// following these rules:
|
||||
//
|
||||
// * when dealing with patch wildcards:
|
||||
// >= 1.2.x will become >= 1.2.0
|
||||
// <= 1.2.x will become < 1.3.0
|
||||
// > 1.2.x will become >= 1.3.0
|
||||
// < 1.2.x will become < 1.2.0
|
||||
// != 1.2.x will become < 1.2.0 >= 1.3.0
|
||||
//
|
||||
// * when dealing with minor wildcards:
|
||||
// >= 1.x will become >= 1.0.0
|
||||
// <= 1.x will become < 2.0.0
|
||||
// > 1.x will become >= 2.0.0
|
||||
// < 1.0 will become < 1.0.0
|
||||
// != 1.x will become < 1.0.0 >= 2.0.0
|
||||
//
|
||||
// * when dealing with wildcards without
|
||||
// version operator:
|
||||
// 1.2.x will become >= 1.2.0 < 1.3.0
|
||||
// 1.x will become >= 1.0.0 < 2.0.0
|
||||
func expandWildcardVersion(parts [][]string) ([][]string, error) {
|
||||
var expandedParts [][]string
|
||||
for _, p := range parts {
|
||||
var newParts []string
|
||||
for _, ap := range p {
|
||||
if strings.Index(ap, "x") != -1 {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionWildcardType := getWildcardType(vStr)
|
||||
flatVersion := createVersionFromWildcard(vStr)
|
||||
|
||||
var resultOperator string
|
||||
var shouldIncrementVersion bool
|
||||
switch opStr {
|
||||
case ">":
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
case ">=":
|
||||
resultOperator = ">="
|
||||
case "<":
|
||||
resultOperator = "<"
|
||||
case "<=":
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "", "=", "==":
|
||||
newParts = append(newParts, ">="+flatVersion)
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "!=", "!":
|
||||
newParts = append(newParts, "<"+flatVersion)
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
}
|
||||
|
||||
var resultVersion string
|
||||
if shouldIncrementVersion {
|
||||
switch versionWildcardType {
|
||||
case patchWildcard:
|
||||
resultVersion, _ = incrementMinorVersion(flatVersion)
|
||||
case minorWildcard:
|
||||
resultVersion, _ = incrementMajorVersion(flatVersion)
|
||||
}
|
||||
} else {
|
||||
resultVersion = flatVersion
|
||||
}
|
||||
|
||||
ap = resultOperator + resultVersion
|
||||
}
|
||||
newParts = append(newParts, ap)
|
||||
}
|
||||
expandedParts = append(expandedParts, newParts)
|
||||
}
|
||||
|
||||
return expandedParts, nil
|
||||
}
|
||||
|
||||
func parseComparator(s string) comparator {
|
||||
switch s {
|
||||
case "==":
|
||||
fallthrough
|
||||
case "":
|
||||
fallthrough
|
||||
case "=":
|
||||
return compEQ
|
||||
case ">":
|
||||
return compGT
|
||||
case ">=":
|
||||
return compGE
|
||||
case "<":
|
||||
return compLT
|
||||
case "<=":
|
||||
return compLE
|
||||
case "!":
|
||||
fallthrough
|
||||
case "!=":
|
||||
return compNE
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
|
||||
func MustParseRange(s string) Range {
|
||||
r, err := ParseRange(s)
|
||||
if err != nil {
|
||||
panic(`semver: ParseRange(` + s + `): ` + err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
23
vendor/github.com/blang/semver/semver.go
generated
vendored
23
vendor/github.com/blang/semver/semver.go
generated
vendored
@@ -200,6 +200,29 @@ func Make(s string) (Version, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
||||
// with only major and minor components specified
|
||||
func ParseTolerant(s string) (Version, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.TrimPrefix(s, "v")
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) < 3 {
|
||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
||||
}
|
||||
for len(parts) < 3 {
|
||||
parts = append(parts, "0")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses version string and returns a validated Version or error
|
||||
func Parse(s string) (Version, error) {
|
||||
if len(s) == 0 {
|
||||
|
43
vendor/github.com/golang/protobuf/ptypes/BUILD
generated
vendored
Normal file
43
vendor/github.com/golang/protobuf/ptypes/BUILD
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"any.go",
|
||||
"doc.go",
|
||||
"duration.go",
|
||||
"timestamp.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/duration:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/timestamp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/any:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/duration:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/timestamp:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
136
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return aname == proto.MessageName(pb)
|
||||
}
|
28
vendor/github.com/golang/protobuf/ptypes/any/BUILD
generated
vendored
Normal file
28
vendor/github.com/golang/protobuf/ptypes/any/BUILD
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["any.pb.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package any is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Any
|
||||
*/
|
||||
package any
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 187 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
|
||||
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
|
||||
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
|
||||
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
|
||||
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
|
||||
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
|
||||
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
|
||||
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
|
||||
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
|
||||
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
|
||||
}
|
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "AnyProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
string type_url = 1;
|
||||
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
bytes value = 2;
|
||||
}
|
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
35
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package ptypes contains code for interacting with well-known types.
|
||||
*/
|
||||
package ptypes
|
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
102
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a durpb.Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the durpb.Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid durpb.Duration
|
||||
// may still be too large to fit into a time.Duration (the range of durpb.Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *durpb.Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration converts a durpb.Duration to a time.Duration. Duration
|
||||
// returns an error if the durpb.Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func Duration(p *durpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durpb.Duration.
|
||||
func DurationProto(d time.Duration) *durpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durpb.Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
28
vendor/github.com/golang/protobuf/ptypes/duration/BUILD
generated
vendored
Normal file
28
vendor/github.com/golang/protobuf/ptypes/duration/BUILD
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["duration.pb.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
114
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
114
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package duration is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Duration
|
||||
*/
|
||||
package duration
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (m *Duration) String() string { return proto.CompactTextString(m) }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 189 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
|
||||
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
|
||||
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
|
||||
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
|
||||
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
|
||||
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
|
||||
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13,
|
||||
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
|
||||
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
|
||||
}
|
98
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
98
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/duration";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DurationProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
66
vendor/github.com/golang/protobuf/ptypes/regen.sh
generated
vendored
Executable file
66
vendor/github.com/golang/protobuf/ptypes/regen.sh
generated
vendored
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# This script fetches and rebuilds the "well-known types" protocol buffers.
|
||||
# To run this you will need protoc and goprotobuf installed;
|
||||
# see https://github.com/golang/protobuf for instructions.
|
||||
# You also need Go and Git installed.
|
||||
|
||||
PKG=github.com/golang/protobuf/ptypes
|
||||
UPSTREAM=https://github.com/google/protobuf
|
||||
UPSTREAM_SUBDIR=src/google/protobuf
|
||||
PROTO_FILES='
|
||||
any.proto
|
||||
duration.proto
|
||||
empty.proto
|
||||
struct.proto
|
||||
timestamp.proto
|
||||
wrappers.proto
|
||||
'
|
||||
|
||||
function die() {
|
||||
echo 1>&2 $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanity check that the right tools are accessible.
|
||||
for tool in go git protoc protoc-gen-go; do
|
||||
q=$(which $tool) || die "didn't find $tool"
|
||||
echo 1>&2 "$tool: $q"
|
||||
done
|
||||
|
||||
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
|
||||
trap 'rm -rf $tmpdir' EXIT
|
||||
|
||||
echo -n 1>&2 "finding package dir... "
|
||||
pkgdir=$(go list -f '{{.Dir}}' $PKG)
|
||||
echo 1>&2 $pkgdir
|
||||
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
|
||||
echo 1>&2 "base: $base"
|
||||
cd $base
|
||||
|
||||
echo 1>&2 "fetching latest protos... "
|
||||
git clone -q $UPSTREAM $tmpdir
|
||||
# Pass 1: build mapping from upstream filename to our filename.
|
||||
declare -A filename_map
|
||||
for f in $(cd $PKG && find * -name '*.proto'); do
|
||||
echo -n 1>&2 "looking for latest version of $f... "
|
||||
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
|
||||
echo 1>&2 $up
|
||||
if [ $(echo $up | wc -w) != "1" ]; then
|
||||
die "not exactly one match"
|
||||
fi
|
||||
filename_map[$up]=$f
|
||||
done
|
||||
# Pass 2: copy files
|
||||
for up in "${!filename_map[@]}"; do
|
||||
f=${filename_map[$up]}
|
||||
shortname=$(basename $f | sed 's,\.proto$,,')
|
||||
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
|
||||
done
|
||||
|
||||
# Run protoc once per package.
|
||||
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
|
||||
echo 1>&2 "* $dir"
|
||||
protoc --go_out=. $dir/*.proto
|
||||
done
|
||||
echo 1>&2 "All OK"
|
125
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
125
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *tspb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := &tspb.Timestamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *tspb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
28
vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD
generated
vendored
Normal file
28
vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["timestamp.pb.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
127
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
127
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package timestamp is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Timestamp
|
||||
*/
|
||||
package timestamp
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 194 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
|
||||
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
|
||||
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
|
||||
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
|
||||
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27,
|
||||
0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1,
|
||||
0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03,
|
||||
0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92,
|
||||
0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
111
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
111
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// now = time.time()
|
||||
// seconds = int(now)
|
||||
// nanos = int((now - seconds) * 10**9)
|
||||
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -223,3 +223,71 @@ func ListContainers(name string, cgroupPaths map[string]string, listType contain
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// AssignDeviceNamesToDiskStats assigns the Device field on the provided DiskIoStats by looking up
|
||||
// the device major and minor identifiers in the provided device namer.
|
||||
func AssignDeviceNamesToDiskStats(namer DeviceNamer, stats *info.DiskIoStats) {
|
||||
assignDeviceNamesToPerDiskStats(
|
||||
namer,
|
||||
stats.IoMerged,
|
||||
stats.IoQueued,
|
||||
stats.IoServiceBytes,
|
||||
stats.IoServiceTime,
|
||||
stats.IoServiced,
|
||||
stats.IoTime,
|
||||
stats.IoWaitTime,
|
||||
stats.Sectors,
|
||||
)
|
||||
}
|
||||
|
||||
// assignDeviceNamesToPerDiskStats looks up device names for the provided stats, caching names
|
||||
// if necessary.
|
||||
func assignDeviceNamesToPerDiskStats(namer DeviceNamer, diskStats ...[]info.PerDiskStats) {
|
||||
devices := make(deviceIdentifierMap)
|
||||
for _, stats := range diskStats {
|
||||
for i, stat := range stats {
|
||||
stats[i].Device = devices.Find(stat.Major, stat.Minor, namer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeviceNamer returns string names for devices by their major and minor id.
|
||||
type DeviceNamer interface {
|
||||
// DeviceName returns the name of the device by its major and minor ids, or false if no
|
||||
// such device is recognized.
|
||||
DeviceName(major, minor uint64) (string, bool)
|
||||
}
|
||||
|
||||
type MachineInfoNamer info.MachineInfo
|
||||
|
||||
func (n *MachineInfoNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.DiskMap {
|
||||
if info.Major == major && info.Minor == minor {
|
||||
return "/dev/" + info.Name, true
|
||||
}
|
||||
}
|
||||
for _, info := range n.Filesystems {
|
||||
if info.DeviceMajor == major && info.DeviceMinor == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
type deviceIdentifier struct {
|
||||
major uint64
|
||||
minor uint64
|
||||
}
|
||||
|
||||
type deviceIdentifierMap map[deviceIdentifier]string
|
||||
|
||||
// Find locates the device name by device identifier out of from, caching the result as necessary.
|
||||
func (m deviceIdentifierMap) Find(major, minor uint64, namer DeviceNamer) string {
|
||||
d := deviceIdentifier{major, minor}
|
||||
if s, ok := m[d]; ok {
|
||||
return s
|
||||
}
|
||||
s, _ := namer.DeviceName(major, minor)
|
||||
m[d] = s
|
||||
return s
|
||||
}
|
||||
|
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
@@ -37,7 +37,10 @@ func Status() (v1.DockerStatus, error) {
|
||||
if err != nil {
|
||||
return v1.DockerStatus{}, err
|
||||
}
|
||||
return StatusFromDockerInfo(dockerInfo), nil
|
||||
}
|
||||
|
||||
func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus {
|
||||
out := v1.DockerStatus{}
|
||||
out.Version = VersionString()
|
||||
out.APIVersion = APIVersionString()
|
||||
@@ -53,7 +56,7 @@ func Status() (v1.DockerStatus, error) {
|
||||
for _, v := range dockerInfo.DriverStatus {
|
||||
out.DriverStatus[v[0]] = v[1]
|
||||
}
|
||||
return out, nil
|
||||
return out
|
||||
}
|
||||
|
||||
func Images() ([]v1.DockerImage, error) {
|
||||
|
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -84,6 +84,7 @@ const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
overlay2StorageDriver storageDriver = "overlay2"
|
||||
zfsStorageDriver storageDriver = "zfs"
|
||||
)
|
||||
|
||||
@@ -107,6 +108,7 @@ type dockerFactory struct {
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -136,6 +138,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
)
|
||||
@@ -323,12 +326,18 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
var thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
var (
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
thinPoolName string
|
||||
)
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
status := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
var zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -350,6 +359,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
}
|
||||
|
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -112,6 +113,9 @@ type dockerContainerHandler struct {
|
||||
|
||||
// zfs watcher
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||
@@ -146,6 +150,7 @@ func newDockerContainerHandler(
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
) (container.ContainerHandler, error) {
|
||||
@@ -180,18 +185,18 @@ func newDockerContainerHandler(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device.
|
||||
// For devicemapper, we only need the thin pool name, and that is passed in to this call
|
||||
var (
|
||||
rootfsStorageDir string
|
||||
poolName string
|
||||
zfsFilesystem string
|
||||
zfsParent string
|
||||
)
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
|
||||
case overlayStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
|
||||
case overlayStorageDriver, overlay2StorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID)
|
||||
case zfsStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
@@ -199,13 +204,6 @@ func newDockerContainerHandler(
|
||||
}
|
||||
zfsParent = status.DriverStatus[dockerutil.DriverStatusParentDataset]
|
||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||
case devicemapperStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine docker status: %v", err)
|
||||
}
|
||||
|
||||
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
@@ -219,7 +217,7 @@ func newDockerContainerHandler(
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
poolName: poolName,
|
||||
poolName: thinPoolName,
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
@@ -248,6 +246,7 @@ func newDockerContainerHandler(
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
||||
handler.restartCount = ctnr.RestartCount
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
|
||||
@@ -383,6 +382,10 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
// Only adds restartcount label if it's greater than 0
|
||||
if self.restartCount > 0 {
|
||||
spec.Labels["restartcount"] = strconv.Itoa(self.restartCount)
|
||||
}
|
||||
spec.Envs = self.envs
|
||||
spec.Image = self.image
|
||||
|
||||
@@ -390,6 +393,15 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -399,7 +411,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
// Device has to be the pool name to correlate with the device name as
|
||||
// set in the machine info filesystems.
|
||||
device = self.poolName
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, overlay2StorageDriver:
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
|
||||
@@ -411,11 +423,6 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return nil
|
||||
}
|
||||
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
limit uint64
|
||||
fsType string
|
||||
|
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -48,6 +48,7 @@ const (
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
|
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -17,6 +17,7 @@ package libcontainer
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -118,6 +119,21 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetri
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(rootFs, pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(rootFs, pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
@@ -291,6 +307,74 @@ func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUdpStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -197,6 +197,7 @@ func fsToFsStats(fs *fs.Fs) info.FsStats {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
var allFs []fs.Fs
|
||||
// Get Filesystem information only for the root cgroup.
|
||||
if isRootCgroup(self.name) {
|
||||
filesystems, err := self.fsInfo.GetGlobalFsInfo()
|
||||
@@ -207,6 +208,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
} else if len(self.externalMounts) > 0 {
|
||||
var mountSet map[string]struct{}
|
||||
mountSet = make(map[string]struct{})
|
||||
@@ -221,7 +223,10 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
}
|
||||
|
||||
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: allFs, factory: self.machineInfoFactory}, &stats.DiskIo)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -272,3 +277,25 @@ func (self *rawContainerHandler) Exists() bool {
|
||||
func (self *rawContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeRaw
|
||||
}
|
||||
|
||||
type fsNamer struct {
|
||||
fs []fs.Fs
|
||||
factory info.MachineInfoFactory
|
||||
info common.DeviceNamer
|
||||
}
|
||||
|
||||
func (n *fsNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.fs {
|
||||
if uint64(info.Major) == major && uint64(info.Minor) == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
if n.info == nil {
|
||||
mi, err := n.factory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
n.info = (*common.MachineInfoNamer)(mi)
|
||||
}
|
||||
return n.info.DeviceName(major, minor)
|
||||
}
|
||||
|
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -202,6 +202,15 @@ func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -211,10 +220,6 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var limit uint64 = 0
|
||||
|
||||
// Use capacity as limit.
|
||||
|
18
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
18
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@@ -266,7 +266,7 @@ func getDockerImagePaths(context Context) map[string]struct{} {
|
||||
|
||||
// TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
|
||||
dockerRoot := context.Docker.Root
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "zfs"} {
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "overlay2", "zfs"} {
|
||||
dockerImagePaths[path.Join(dockerRoot, dir)] = struct{}{}
|
||||
}
|
||||
for dockerRoot != "/" && dockerRoot != "." {
|
||||
@@ -455,11 +455,15 @@ func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
return GetDirDiskUsage(dir, timeout)
|
||||
}
|
||||
|
||||
func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
cmd := exec.Command("nice", "-n", "19", "du", "-s", dir)
|
||||
stdoutp, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
@@ -496,13 +500,17 @@ func (self *RealFsInfo) GetDirDiskUsage(dir string, timeout time.Duration) (uint
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
return GetDirInodeUsage(dir, timeout)
|
||||
}
|
||||
|
||||
func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
|
||||
if dir == "" {
|
||||
return 0, fmt.Errorf("invalid directory")
|
||||
}
|
||||
var counter byteCounter
|
||||
var stderr bytes.Buffer
|
||||
claimToken()
|
||||
defer releaseToken()
|
||||
findCmd := exec.Command("find", dir, "-xdev", "-printf", ".")
|
||||
findCmd.Stdout, findCmd.Stderr = &counter, &stderr
|
||||
if err := findCmd.Start(); err != nil {
|
||||
|
1
vendor/github.com/google/cadvisor/http/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/http/BUILD
generated
vendored
@@ -23,6 +23,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/pages/static:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/validate:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
19
vendor/github.com/google/cadvisor/http/handlers.go
generated
vendored
19
vendor/github.com/google/cadvisor/http/handlers.go
generated
vendored
@@ -17,6 +17,7 @@ package http
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/cadvisor/api"
|
||||
"github.com/google/cadvisor/healthz"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAuthFile, httpAuthRealm, httpDigestFile, httpDigestRealm string) error {
|
||||
@@ -54,7 +56,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
|
||||
// Redirect / to containers page.
|
||||
mux.Handle("/", http.RedirectHandler(pages.ContainersPage, http.StatusTemporaryRedirect))
|
||||
|
||||
var authenticated bool = false
|
||||
var authenticated bool
|
||||
|
||||
// Setup the authenticator object
|
||||
if httpAuthFile != "" {
|
||||
@@ -89,13 +91,16 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPrometheusHandler creates a new PrometheusCollector, registers it
|
||||
// on the global registry and configures the provided HTTP mux to handle the
|
||||
// given Prometheus endpoint.
|
||||
// RegisterPrometheusHandler creates a new PrometheusCollector and configures
|
||||
// the provided HTTP mux to handle the given Prometheus endpoint.
|
||||
func RegisterPrometheusHandler(mux httpmux.Mux, containerManager manager.Manager, prometheusEndpoint string, f metrics.ContainerLabelsFunc) {
|
||||
collector := metrics.NewPrometheusCollector(containerManager, f)
|
||||
prometheus.MustRegister(collector)
|
||||
mux.Handle(prometheusEndpoint, prometheus.Handler())
|
||||
r := prometheus.NewRegistry()
|
||||
r.MustRegister(
|
||||
metrics.NewPrometheusCollector(containerManager, f),
|
||||
prometheus.NewGoCollector(),
|
||||
prometheus.NewProcessCollector(os.Getpid(), ""),
|
||||
)
|
||||
mux.Handle(prometheusEndpoint, promhttp.HandlerFor(r, promhttp.HandlerOpts{}))
|
||||
}
|
||||
|
||||
func staticHandlerNoAuth(w http.ResponseWriter, r *http.Request) {
|
||||
|
25
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
25
vendor/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -307,9 +307,10 @@ type CpuStats struct {
|
||||
}
|
||||
|
||||
type PerDiskStats struct {
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
Device string `json:"-"`
|
||||
Major uint64 `json:"major"`
|
||||
Minor uint64 `json:"minor"`
|
||||
Stats map[string]uint64 `json:"stats"`
|
||||
}
|
||||
|
||||
type DiskIoStats struct {
|
||||
@@ -386,6 +387,10 @@ type NetworkStats struct {
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
// UDP connection stats
|
||||
Udp UdpStat `json:"udp"`
|
||||
// UDP6 connection stats
|
||||
Udp6 UdpStat `json:"udp6"`
|
||||
}
|
||||
|
||||
type TcpStat struct {
|
||||
@@ -413,6 +418,20 @@ type TcpStat struct {
|
||||
Closing uint64
|
||||
}
|
||||
|
||||
type UdpStat struct {
|
||||
// Count of UDP sockets in state "Listen"
|
||||
Listen uint64
|
||||
|
||||
// Count of UDP packets dropped by the IP stack
|
||||
Dropped uint64
|
||||
|
||||
// Count of packets Queued for Receieve
|
||||
RxQueued uint64
|
||||
|
||||
// Count of packets Queued for Transmit
|
||||
TxQueued uint64
|
||||
}
|
||||
|
||||
type FsStats struct {
|
||||
// The block device name associated with the filesystem.
|
||||
Device string `json:"device,omitempty"`
|
||||
|
4
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
4
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
@@ -17,6 +17,10 @@ package v1
|
||||
type FsInfo struct {
|
||||
// Block device associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
// DeviceMajor is the major identifier of the device, used for correlation with blkio stats
|
||||
DeviceMajor uint64 `json:"-"`
|
||||
// DeviceMinor is the minor identifier of the device, used for correlation with blkio stats
|
||||
DeviceMinor uint64 `json:"-"`
|
||||
|
||||
// Total number of bytes available on the filesystem.
|
||||
Capacity uint64 `json:"capacity"`
|
||||
|
4
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
4
vendor/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@@ -269,6 +269,10 @@ type NetworkStats struct {
|
||||
Tcp TcpStat `json:"tcp"`
|
||||
// TCP6 connection stats (Established, Listen...)
|
||||
Tcp6 TcpStat `json:"tcp6"`
|
||||
// UDP connection stats
|
||||
Udp v1.UdpStat `json:"udp"`
|
||||
// UDP6 connection stats
|
||||
Udp6 v1.UdpStat `json:"udp6"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
|
3
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
3
vendor/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
@@ -133,7 +133,7 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats []
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 && containerName != "/" {
|
||||
// Cannot handle multiple devices per container.
|
||||
glog.V(2).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
glog.V(4).Infof("failed to handle multiple devices for container %s. Skipping Filesystem stats", containerName)
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
@@ -259,6 +259,7 @@ func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace s
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
Labels: specV1.Labels,
|
||||
Envs: specV1.Envs,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
|
2
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
2
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
@@ -116,7 +116,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
if fs.Inodes != nil {
|
||||
inodes = *fs.Inodes
|
||||
}
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, DeviceMajor: uint64(fs.Major), DeviceMinor: uint64(fs.Minor), Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
|
||||
}
|
||||
|
||||
return machineInfo, nil
|
||||
|
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -566,9 +566,24 @@ func (self *manager) getDockerContainer(containerName string) (*containerData, e
|
||||
Namespace: docker.DockerNamespace,
|
||||
Name: containerName,
|
||||
}]
|
||||
|
||||
// Look for container by short prefix name if no exact match found.
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
for contName, c := range self.containers {
|
||||
if contName.Namespace == docker.DockerNamespace && strings.HasPrefix(contName.Name, containerName) {
|
||||
if cont == nil {
|
||||
cont = c
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to find container. Container %q is not unique", containerName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cont == nil {
|
||||
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
|
148
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
148
vendor/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -45,6 +45,14 @@ type metricValue struct {
|
||||
|
||||
type metricValues []metricValue
|
||||
|
||||
// asFloat64 converts a uint64 into a float64.
|
||||
func asFloat64(v uint64) float64 { return float64(v) }
|
||||
|
||||
// asNanosecondsToSeconds converts nanoseconds into a float64 representing seconds.
|
||||
func asNanosecondsToSeconds(v uint64) float64 {
|
||||
return float64(v) / float64(time.Second)
|
||||
}
|
||||
|
||||
// fsValues is a helper method for assembling per-filesystem stats.
|
||||
func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
||||
values := make(metricValues, 0, len(fsStats))
|
||||
@@ -57,6 +65,24 @@ func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metri
|
||||
return values
|
||||
}
|
||||
|
||||
// ioValues is a helper method for assembling per-disk and per-filesystem stats.
|
||||
func ioValues(ioStats []info.PerDiskStats, ioType string, ioValueFn func(uint64) float64, fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
||||
values := make(metricValues, 0, len(ioStats)+len(fsStats))
|
||||
for _, stat := range ioStats {
|
||||
values = append(values, metricValue{
|
||||
value: ioValueFn(stat.Stats[ioType]),
|
||||
labels: []string{stat.Device},
|
||||
})
|
||||
}
|
||||
for _, stat := range fsStats {
|
||||
values = append(values, metricValue{
|
||||
value: valueFn(&stat),
|
||||
labels: []string{stat.Device},
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// containerMetric describes a multi-dimensional metric used for exposing a
|
||||
// certain type of container statistic.
|
||||
type containerMetric struct {
|
||||
@@ -130,10 +156,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.Cpu.Usage.PerCpu))
|
||||
for i, value := range s.Cpu.Usage.PerCpu {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value) / float64(time.Second),
|
||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||
})
|
||||
if value > 0 {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value) / float64(time.Second),
|
||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||
})
|
||||
}
|
||||
}
|
||||
return values
|
||||
},
|
||||
@@ -268,15 +296,29 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
return float64(fs.Usage)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_bytes_total",
|
||||
help: "Cumulative count of bytes read",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceBytes, "Read", asFloat64,
|
||||
nil, nil,
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_total",
|
||||
help: "Cumulative count of reads completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsCompleted)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiced, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsCompleted)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_reads_total",
|
||||
@@ -284,9 +326,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsRead)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.Sectors, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsRead)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_merged_total",
|
||||
@@ -294,9 +339,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsMerged)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoMerged, "Read", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsMerged)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_read_seconds_total",
|
||||
@@ -304,9 +352,23 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadTime) / float64(time.Second)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Read", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadTime) / float64(time.Second)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_bytes_total",
|
||||
help: "Cumulative count of bytes written",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceBytes, "Write", asFloat64,
|
||||
nil, nil,
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_total",
|
||||
@@ -314,9 +376,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesCompleted)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiced, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesCompleted)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_writes_total",
|
||||
@@ -324,9 +389,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsWritten)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.Sectors, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsWritten)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_merged_total",
|
||||
@@ -334,9 +402,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesMerged)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoMerged, "Write", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesMerged)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_write_seconds_total",
|
||||
@@ -344,9 +415,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WriteTime) / float64(time.Second)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Write", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WriteTime) / float64(time.Second)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_current",
|
||||
@@ -354,9 +428,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.IoInProgress)
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoQueued, "Total", asFloat64,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.IoInProgress)
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_seconds_total",
|
||||
@@ -364,9 +441,12 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||
})
|
||||
return ioValues(
|
||||
s.DiskIo.IoServiceTime, "Total", asNanosecondsToSeconds,
|
||||
s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||
},
|
||||
)
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_weighted_seconds_total",
|
||||
|
203
vendor/github.com/googleapis/gnostic/LICENSE
generated
vendored
Normal file
203
vendor/github.com/googleapis/gnostic/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
36
vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD
generated
vendored
Normal file
36
vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"OpenAPIv2.go",
|
||||
"OpenAPIv2.pb.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
|
||||
"//vendor/github.com/googleapis/gnostic/compiler:go_default_library",
|
||||
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
6944
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
Normal file
6944
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4456
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
Normal file
4456
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
663
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
generated
vendored
Normal file
663
vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
generated
vendored
Normal file
@@ -0,0 +1,663 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package openapi.v2;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIProto";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapi_v2";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
option objc_class_prefix = "OAS";
|
||||
|
||||
message AdditionalPropertiesItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
bool boolean = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Any {
|
||||
google.protobuf.Any value = 1;
|
||||
string yaml = 2;
|
||||
}
|
||||
|
||||
message ApiKeySecurity {
|
||||
string type = 1;
|
||||
string name = 2;
|
||||
string in = 3;
|
||||
string description = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
message BasicAuthenticationSecurity {
|
||||
string type = 1;
|
||||
string description = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
message BodyParameter {
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 1;
|
||||
// The name of the parameter.
|
||||
string name = 2;
|
||||
// Determines the location of the parameter.
|
||||
string in = 3;
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 4;
|
||||
Schema schema = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
// Contact information for the owners of the API.
|
||||
message Contact {
|
||||
// The identifying name of the contact person/organization.
|
||||
string name = 1;
|
||||
// The URL pointing to the contact information.
|
||||
string url = 2;
|
||||
// The email address of the contact person/organization.
|
||||
string email = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message Default {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// One or more JSON objects describing the schemas being consumed and produced by the API.
|
||||
message Definitions {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message Document {
|
||||
// The Swagger version of this document.
|
||||
string swagger = 1;
|
||||
Info info = 2;
|
||||
// The host (name or ip) of the API. Example: 'swagger.io'
|
||||
string host = 3;
|
||||
// The base path to the API. Example: '/api'.
|
||||
string base_path = 4;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 5;
|
||||
// A list of MIME types accepted by the API.
|
||||
repeated string consumes = 6;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 7;
|
||||
Paths paths = 8;
|
||||
Definitions definitions = 9;
|
||||
ParameterDefinitions parameters = 10;
|
||||
ResponseDefinitions responses = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
SecurityDefinitions security_definitions = 13;
|
||||
repeated Tag tags = 14;
|
||||
ExternalDocs external_docs = 15;
|
||||
repeated NamedAny vendor_extension = 16;
|
||||
}
|
||||
|
||||
message Examples {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// information about external documentation
|
||||
message ExternalDocs {
|
||||
string description = 1;
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message FileSchema {
|
||||
string format = 1;
|
||||
string title = 2;
|
||||
string description = 3;
|
||||
Any default = 4;
|
||||
repeated string required = 5;
|
||||
string type = 6;
|
||||
bool read_only = 7;
|
||||
ExternalDocs external_docs = 8;
|
||||
Any example = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message FormDataParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Header {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
string description = 18;
|
||||
repeated NamedAny vendor_extension = 19;
|
||||
}
|
||||
|
||||
message HeaderParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
message Headers {
|
||||
repeated NamedHeader additional_properties = 1;
|
||||
}
|
||||
|
||||
// General information about the API.
|
||||
message Info {
|
||||
// A unique and precise title of the API.
|
||||
string title = 1;
|
||||
// A semantic version number of the API.
|
||||
string version = 2;
|
||||
// A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The terms of service for the API.
|
||||
string terms_of_service = 4;
|
||||
Contact contact = 5;
|
||||
License license = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message ItemsItem {
|
||||
repeated Schema schema = 1;
|
||||
}
|
||||
|
||||
message JsonReference {
|
||||
string _ref = 1;
|
||||
string description = 2;
|
||||
}
|
||||
|
||||
message License {
|
||||
// The name of the license type. It's encouraged to use an OSI compatible license.
|
||||
string name = 1;
|
||||
// The URL pointing to the license.
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
|
||||
message NamedAny {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Any value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
|
||||
message NamedHeader {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Header value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
|
||||
message NamedParameter {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Parameter value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
|
||||
message NamedPathItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
PathItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
|
||||
message NamedResponse {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Response value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
|
||||
message NamedResponseValue {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ResponseValue value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
|
||||
message NamedSchema {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Schema value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
|
||||
message NamedSecurityDefinitionsItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
SecurityDefinitionsItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
|
||||
message NamedString {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
|
||||
message NamedStringArray {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
StringArray value = 2;
|
||||
}
|
||||
|
||||
message NonBodyParameter {
|
||||
oneof oneof {
|
||||
HeaderParameterSubSchema header_parameter_sub_schema = 1;
|
||||
FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
|
||||
QueryParameterSubSchema query_parameter_sub_schema = 3;
|
||||
PathParameterSubSchema path_parameter_sub_schema = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Oauth2AccessCodeSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string token_url = 5;
|
||||
string description = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message Oauth2ApplicationSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2ImplicitSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2PasswordSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2Scopes {
|
||||
repeated NamedString additional_properties = 1;
|
||||
}
|
||||
|
||||
message Operation {
|
||||
repeated string tags = 1;
|
||||
// A brief summary of the operation.
|
||||
string summary = 2;
|
||||
// A longer description of the operation, GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
ExternalDocs external_docs = 4;
|
||||
// A unique identifier of the operation.
|
||||
string operation_id = 5;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 6;
|
||||
// A list of MIME types the API can consume.
|
||||
repeated string consumes = 7;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 8;
|
||||
Responses responses = 9;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 10;
|
||||
bool deprecated = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
repeated NamedAny vendor_extension = 13;
|
||||
}
|
||||
|
||||
message Parameter {
|
||||
oneof oneof {
|
||||
BodyParameter body_parameter = 1;
|
||||
NonBodyParameter non_body_parameter = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// One or more JSON representations for parameters
|
||||
message ParameterDefinitions {
|
||||
repeated NamedParameter additional_properties = 1;
|
||||
}
|
||||
|
||||
message ParametersItem {
|
||||
oneof oneof {
|
||||
Parameter parameter = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message PathItem {
|
||||
string _ref = 1;
|
||||
Operation get = 2;
|
||||
Operation put = 3;
|
||||
Operation post = 4;
|
||||
Operation delete = 5;
|
||||
Operation options = 6;
|
||||
Operation head = 7;
|
||||
Operation patch = 8;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message PathParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
|
||||
message Paths {
|
||||
repeated NamedAny vendor_extension = 1;
|
||||
repeated NamedPathItem path = 2;
|
||||
}
|
||||
|
||||
message PrimitivesItems {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
repeated NamedAny vendor_extension = 18;
|
||||
}
|
||||
|
||||
message Properties {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message QueryParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Response {
|
||||
string description = 1;
|
||||
SchemaItem schema = 2;
|
||||
Headers headers = 3;
|
||||
Examples examples = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
// One or more JSON representations for parameters
|
||||
message ResponseDefinitions {
|
||||
repeated NamedResponse additional_properties = 1;
|
||||
}
|
||||
|
||||
message ResponseValue {
|
||||
oneof oneof {
|
||||
Response response = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Response objects names can either be any valid HTTP status code or 'default'.
|
||||
message Responses {
|
||||
repeated NamedResponseValue response_code = 1;
|
||||
repeated NamedAny vendor_extension = 2;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message Schema {
|
||||
string _ref = 1;
|
||||
string format = 2;
|
||||
string title = 3;
|
||||
string description = 4;
|
||||
Any default = 5;
|
||||
double multiple_of = 6;
|
||||
double maximum = 7;
|
||||
bool exclusive_maximum = 8;
|
||||
double minimum = 9;
|
||||
bool exclusive_minimum = 10;
|
||||
int64 max_length = 11;
|
||||
int64 min_length = 12;
|
||||
string pattern = 13;
|
||||
int64 max_items = 14;
|
||||
int64 min_items = 15;
|
||||
bool unique_items = 16;
|
||||
int64 max_properties = 17;
|
||||
int64 min_properties = 18;
|
||||
repeated string required = 19;
|
||||
repeated Any enum = 20;
|
||||
AdditionalPropertiesItem additional_properties = 21;
|
||||
TypeItem type = 22;
|
||||
ItemsItem items = 23;
|
||||
repeated Schema all_of = 24;
|
||||
Properties properties = 25;
|
||||
string discriminator = 26;
|
||||
bool read_only = 27;
|
||||
Xml xml = 28;
|
||||
ExternalDocs external_docs = 29;
|
||||
Any example = 30;
|
||||
repeated NamedAny vendor_extension = 31;
|
||||
}
|
||||
|
||||
message SchemaItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
FileSchema file_schema = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityDefinitions {
|
||||
repeated NamedSecurityDefinitionsItem additional_properties = 1;
|
||||
}
|
||||
|
||||
message SecurityDefinitionsItem {
|
||||
oneof oneof {
|
||||
BasicAuthenticationSecurity basic_authentication_security = 1;
|
||||
ApiKeySecurity api_key_security = 2;
|
||||
Oauth2ImplicitSecurity oauth2_implicit_security = 3;
|
||||
Oauth2PasswordSecurity oauth2_password_security = 4;
|
||||
Oauth2ApplicationSecurity oauth2_application_security = 5;
|
||||
Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityRequirement {
|
||||
repeated NamedStringArray additional_properties = 1;
|
||||
}
|
||||
|
||||
message StringArray {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
message Tag {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
ExternalDocs external_docs = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message TypeItem {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
// Any property starting with x- is valid.
|
||||
message VendorExtension {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
message Xml {
|
||||
string name = 1;
|
||||
string namespace = 2;
|
||||
string prefix = 3;
|
||||
bool attribute = 4;
|
||||
bool wrapped = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
16
vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
generated
vendored
Normal file
16
vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# OpenAPI v2 Protocol Buffer Models
|
||||
|
||||
This directory contains a Protocol Buffer-language model
|
||||
and related code for supporting OpenAPI v2.
|
||||
|
||||
Gnostic applications and plugins can use OpenAPIv2.proto
|
||||
to generate Protocol Buffer support code for their preferred languages.
|
||||
|
||||
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI
|
||||
descriptions into the Protocol Buffer-based datastructures
|
||||
generated from OpenAPIv2.proto.
|
||||
|
||||
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic
|
||||
compiler generator, and OpenAPIv2.pb.go is generated by
|
||||
protoc, the Protocol Buffer compiler, and protoc-gen-go, the
|
||||
Protocol Buffer Go code generation plugin.
|
1610
vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
generated
vendored
Normal file
1610
vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
40
vendor/github.com/googleapis/gnostic/compiler/BUILD
generated
vendored
Normal file
40
vendor/github.com/googleapis/gnostic/compiler/BUILD
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"context.go",
|
||||
"error.go",
|
||||
"extension-handler.go",
|
||||
"helpers.go",
|
||||
"main.go",
|
||||
"reader.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
|
||||
"//vendor/github.com/googleapis/gnostic/extensions:go_default_library",
|
||||
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/github.com/googleapis/gnostic/compiler/README.md
generated
vendored
Normal file
3
vendor/github.com/googleapis/gnostic/compiler/README.md
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Compiler support code
|
||||
|
||||
This directory contains compiler support code used by Gnostic and Gnostic extensions.
|
41
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
Normal file
41
vendor/github.com/googleapis/gnostic/compiler/context.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
type Context struct {
|
||||
Parent *Context
|
||||
Name string
|
||||
ExtensionHandlers *[]ExtensionHandler
|
||||
}
|
||||
|
||||
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
|
||||
}
|
||||
|
||||
func NewContext(name string, parent *Context) *Context {
|
||||
if parent != nil {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
|
||||
} else {
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
|
||||
}
|
||||
}
|
||||
|
||||
func (context *Context) Description() string {
|
||||
if context.Parent != nil {
|
||||
return context.Parent.Description() + "." + context.Name
|
||||
} else {
|
||||
return context.Name
|
||||
}
|
||||
}
|
59
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
Normal file
59
vendor/github.com/googleapis/gnostic/compiler/error.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
// basic error type
|
||||
type Error struct {
|
||||
Context *Context
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewError(context *Context, message string) *Error {
|
||||
return &Error{Context: context, Message: message}
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
if err.Context != nil {
|
||||
return "ERROR " + err.Context.Description() + " " + err.Message
|
||||
} else {
|
||||
return "ERROR " + err.Message
|
||||
}
|
||||
}
|
||||
|
||||
// container for groups of errors
|
||||
type ErrorGroup struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func NewErrorGroupOrNil(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
} else if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &ErrorGroup{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
func (group *ErrorGroup) Error() string {
|
||||
result := ""
|
||||
for i, err := range group.Errors {
|
||||
if i > 0 {
|
||||
result += "\n"
|
||||
}
|
||||
result += err.Error()
|
||||
}
|
||||
return result
|
||||
}
|
99
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
Normal file
99
vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
ext_plugin "github.com/googleapis/gnostic/extensions"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type ExtensionHandler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
|
||||
handled := false
|
||||
var errFromPlugin error
|
||||
var outFromPlugin *any.Any
|
||||
|
||||
if context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
|
||||
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
|
||||
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
|
||||
if outFromPlugin == nil {
|
||||
continue
|
||||
} else {
|
||||
handled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return handled, outFromPlugin, errFromPlugin
|
||||
}
|
||||
|
||||
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
|
||||
if extensionHandlers.Name != "" {
|
||||
binary, _ := yaml.Marshal(in)
|
||||
|
||||
request := &ext_plugin.ExtensionHandlerRequest{}
|
||||
|
||||
version := &ext_plugin.Version{}
|
||||
version.Major = 0
|
||||
version.Minor = 1
|
||||
version.Patch = 0
|
||||
request.CompilerVersion = version
|
||||
|
||||
request.Wrapper = &ext_plugin.Wrapper{}
|
||||
|
||||
request.Wrapper.Version = "v2"
|
||||
request.Wrapper.Yaml = string(binary)
|
||||
request.Wrapper.ExtensionName = extensionName
|
||||
|
||||
requestBytes, _ := proto.Marshal(request)
|
||||
cmd := exec.Command(extensionHandlers.Name)
|
||||
cmd.Stdin = bytes.NewReader(requestBytes)
|
||||
output, err := cmd.Output()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %+v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
response := &ext_plugin.ExtensionHandlerResponse{}
|
||||
err = proto.Unmarshal(output, response)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %+v\n", err)
|
||||
fmt.Printf("%s\n", string(output))
|
||||
return nil, err
|
||||
}
|
||||
if !response.Handled {
|
||||
return nil, nil
|
||||
}
|
||||
if len(response.Error) != 0 {
|
||||
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
return response.Value, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
193
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
Normal file
193
vendor/github.com/googleapis/gnostic/compiler/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// compiler helper functions, usually called from generated code
|
||||
|
||||
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
|
||||
m, ok := in.(yaml.MapSlice)
|
||||
if ok {
|
||||
return m, ok
|
||||
} else {
|
||||
// do we have an empty array?
|
||||
a, ok := in.([]interface{})
|
||||
if ok && len(a) == 0 {
|
||||
// if so, return an empty map
|
||||
return yaml.MapSlice{}, ok
|
||||
} else {
|
||||
return nil, ok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SortedKeysForMap(m yaml.MapSlice) []string {
|
||||
keys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
keys = append(keys, item.Key.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func MapHasKey(m yaml.MapSlice, key string) bool {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok && key == itemKey {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok && key == itemKey {
|
||||
return item.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
|
||||
stringArray := make([]string, 0)
|
||||
for _, item := range interfaceArray {
|
||||
v, ok := item.(string)
|
||||
if ok {
|
||||
stringArray = append(stringArray, v)
|
||||
}
|
||||
}
|
||||
return stringArray
|
||||
}
|
||||
|
||||
func PatternMatches(pattern string, value string) bool {
|
||||
// if pattern contains a subpattern like "{path}", replace it with ".*"
|
||||
if pattern[0] != '^' {
|
||||
subpatternPattern := regexp.MustCompile("^.*(\\{.*\\}).*$")
|
||||
if matches := subpatternPattern.FindSubmatch([]byte(pattern)); matches != nil {
|
||||
match := string(matches[1])
|
||||
pattern = strings.Replace(pattern, match, ".*", -1)
|
||||
}
|
||||
}
|
||||
matched, err := regexp.Match(pattern, []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return matched
|
||||
}
|
||||
|
||||
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
|
||||
missingKeys := make([]string, 0)
|
||||
for _, k := range requiredKeys {
|
||||
if !MapHasKey(m, k) {
|
||||
missingKeys = append(missingKeys, k)
|
||||
}
|
||||
}
|
||||
return missingKeys
|
||||
}
|
||||
|
||||
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []string) []string {
|
||||
invalidKeys := make([]string, 0)
|
||||
for _, item := range m {
|
||||
itemKey, ok := item.Key.(string)
|
||||
if ok {
|
||||
key := itemKey
|
||||
found := false
|
||||
// does the key match an allowed key?
|
||||
for _, allowedKey := range allowedKeys {
|
||||
if key == allowedKey {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// does the key match an allowed pattern?
|
||||
for _, allowedPattern := range allowedPatterns {
|
||||
if PatternMatches(allowedPattern, key) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
invalidKeys = append(invalidKeys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return invalidKeys
|
||||
}
|
||||
|
||||
// describe a map (for debugging purposes)
|
||||
func DescribeMap(in interface{}, indent string) string {
|
||||
description := ""
|
||||
m, ok := in.(map[string]interface{})
|
||||
if ok {
|
||||
keys := make([]string, 0)
|
||||
for k, _ := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
v := m[k]
|
||||
description += fmt.Sprintf("%s%s:\n", indent, k)
|
||||
description += DescribeMap(v, indent+" ")
|
||||
}
|
||||
return description
|
||||
}
|
||||
a, ok := in.([]interface{})
|
||||
if ok {
|
||||
for i, v := range a {
|
||||
description += fmt.Sprintf("%s%d:\n", indent, i)
|
||||
description += DescribeMap(v, indent+" ")
|
||||
}
|
||||
return description
|
||||
}
|
||||
description += fmt.Sprintf("%s%+v\n", indent, in)
|
||||
return description
|
||||
}
|
||||
|
||||
func PluralProperties(count int) string {
|
||||
if count == 1 {
|
||||
return "property"
|
||||
} else {
|
||||
return "properties"
|
||||
}
|
||||
}
|
||||
|
||||
func StringArrayContainsValue(array []string, value string) bool {
|
||||
for _, item := range array {
|
||||
if item == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func StringArrayContainsValues(array []string, values []string) bool {
|
||||
for _, value := range values {
|
||||
if !StringArrayContainsValue(array, value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
16
vendor/github.com/googleapis/gnostic/compiler/main.go
generated
vendored
Normal file
16
vendor/github.com/googleapis/gnostic/compiler/main.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compiler provides support functions to generated compiler code.
|
||||
package compiler
|
167
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
Normal file
167
vendor/github.com/googleapis/gnostic/compiler/reader.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var file_cache map[string][]byte
|
||||
var info_cache map[string]interface{}
|
||||
var count int64
|
||||
|
||||
var VERBOSE_READER = false
|
||||
|
||||
func initializeFileCache() {
|
||||
if file_cache == nil {
|
||||
file_cache = make(map[string][]byte, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeInfoCache() {
|
||||
if info_cache == nil {
|
||||
info_cache = make(map[string]interface{}, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func FetchFile(fileurl string) ([]byte, error) {
|
||||
initializeFileCache()
|
||||
bytes, ok := file_cache[fileurl]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit %s", fileurl)
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
log.Printf("Fetching %s", fileurl)
|
||||
response, err := http.Get(fileurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err == nil {
|
||||
file_cache[fileurl] = bytes
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
}
|
||||
|
||||
// read a file and unmarshal it as a yaml.MapSlice
|
||||
func ReadInfoForFile(filename string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
info, ok := info_cache[filename]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit info for file %s", filename)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Reading info for file %s", filename)
|
||||
}
|
||||
|
||||
// is the filename a url?
|
||||
fileurl, _ := url.Parse(filename)
|
||||
if fileurl.Scheme != "" {
|
||||
// yes, fetch it
|
||||
bytes, err := FetchFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
} else {
|
||||
// no, it's a local filename
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
var info yaml.MapSlice
|
||||
err = yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info_cache[filename] = info
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
|
||||
// read a file and return the fragment needed to resolve a $ref
|
||||
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
|
||||
initializeInfoCache()
|
||||
{
|
||||
info, ok := info_cache[ref]
|
||||
if ok {
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Cache hit for ref %s#%s", basefile, ref)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
if VERBOSE_READER {
|
||||
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
||||
}
|
||||
count = count + 1
|
||||
basedir, _ := filepath.Split(basefile)
|
||||
parts := strings.Split(ref, "#")
|
||||
var filename string
|
||||
if parts[0] != "" {
|
||||
filename = basedir + parts[0]
|
||||
} else {
|
||||
filename = basefile
|
||||
}
|
||||
info, err := ReadInfoForFile(filename)
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
} else {
|
||||
if len(parts) > 1 {
|
||||
path := strings.Split(parts[1], "/")
|
||||
for i, key := range path {
|
||||
if i > 0 {
|
||||
m, ok := info.(yaml.MapSlice)
|
||||
if ok {
|
||||
found := false
|
||||
for _, section := range m {
|
||||
if section.Key == key {
|
||||
info = section.Value
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
info_cache[ref] = nil
|
||||
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info_cache[ref] = info
|
||||
return info, nil
|
||||
}
|
36
vendor/github.com/googleapis/gnostic/extensions/BUILD
generated
vendored
Normal file
36
vendor/github.com/googleapis/gnostic/extensions/BUILD
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"extension.pb.go",
|
||||
"extensions.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
|
||||
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
Executable file
7
vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
generated
vendored
Executable file
@@ -0,0 +1,7 @@
|
||||
go get github.com/golang/protobuf/protoc-gen-go
|
||||
|
||||
protoc \
|
||||
--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto
|
||||
|
||||
go build
|
||||
go install
|
5
vendor/github.com/googleapis/gnostic/extensions/README.md
generated
vendored
Normal file
5
vendor/github.com/googleapis/gnostic/extensions/README.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Extensions
|
||||
|
||||
This directory contains support code for building Gnostic extensions and associated examples.
|
||||
|
||||
Extensions are used to compile vendor or specification extensions into protocol buffer structures.
|
219
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
219
vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: extension.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package openapiextension_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
extension.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Version
|
||||
ExtensionHandlerRequest
|
||||
ExtensionHandlerResponse
|
||||
Wrapper
|
||||
*/
|
||||
package openapiextension_v1
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf "github.com/golang/protobuf/ptypes/any"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// The version number of OpenAPI compiler.
|
||||
type Version struct {
|
||||
Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
|
||||
Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
|
||||
Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Version) Reset() { *m = Version{} }
|
||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||
func (*Version) ProtoMessage() {}
|
||||
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Version) GetMajor() int32 {
|
||||
if m != nil {
|
||||
return m.Major
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetMinor() int32 {
|
||||
if m != nil {
|
||||
return m.Minor
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetPatch() int32 {
|
||||
if m != nil {
|
||||
return m.Patch
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Version) GetSuffix() string {
|
||||
if m != nil {
|
||||
return m.Suffix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
type ExtensionHandlerRequest struct {
|
||||
// The OpenAPI descriptions that were explicitly listed on the command line.
|
||||
// The specifications will appear in the order they are specified to openapic.
|
||||
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"`
|
||||
// The version number of openapi compiler.
|
||||
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
|
||||
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
|
||||
if m != nil {
|
||||
return m.Wrapper
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
|
||||
if m != nil {
|
||||
return m.CompilerVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
type ExtensionHandlerResponse struct {
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"`
|
||||
// Error message. If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"`
|
||||
// text output
|
||||
Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
|
||||
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetHandled() bool {
|
||||
if m != nil {
|
||||
return m.Handled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetError() []string {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Wrapper struct {
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
|
||||
// Name of the extension
|
||||
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"`
|
||||
// Must be a valid yaml for the proto
|
||||
Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Wrapper) Reset() { *m = Wrapper{} }
|
||||
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
|
||||
func (*Wrapper) ProtoMessage() {}
|
||||
func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *Wrapper) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Wrapper) GetExtensionName() string {
|
||||
if m != nil {
|
||||
return m.ExtensionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Wrapper) GetYaml() string {
|
||||
if m != nil {
|
||||
return m.Yaml
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
|
||||
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
|
||||
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
|
||||
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("extension.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 355 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40,
|
||||
0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22,
|
||||
0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb,
|
||||
0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50,
|
||||
0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4,
|
||||
0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4,
|
||||
0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0,
|
||||
0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34,
|
||||
0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a,
|
||||
0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a,
|
||||
0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66,
|
||||
0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2,
|
||||
0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d,
|
||||
0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3,
|
||||
0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c,
|
||||
0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa,
|
||||
0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab,
|
||||
0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3,
|
||||
0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62,
|
||||
0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23,
|
||||
0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1,
|
||||
0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52,
|
||||
0x02, 0x00, 0x00,
|
||||
}
|
93
vendor/github.com/googleapis/gnostic/extensions/extension.proto
generated
vendored
Normal file
93
vendor/github.com/googleapis/gnostic/extensions/extension.proto
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
package openapiextension.v1;
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIExtensionV1";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapic.v1";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
//
|
||||
option objc_class_prefix = "OAE"; // "OpenAPI Extension"
|
||||
|
||||
// The version number of OpenAPI compiler.
|
||||
message Version {
|
||||
int32 major = 1;
|
||||
int32 minor = 2;
|
||||
int32 patch = 3;
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
string suffix = 4;
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
message ExtensionHandlerRequest {
|
||||
|
||||
// The OpenAPI descriptions that were explicitly listed on the command line.
|
||||
// The specifications will appear in the order they are specified to openapic.
|
||||
Wrapper wrapper = 1;
|
||||
|
||||
// The version number of openapi compiler.
|
||||
Version compiler_version = 3;
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
message ExtensionHandlerResponse {
|
||||
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
bool handled = 1;
|
||||
|
||||
// Error message. If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
repeated string error = 2;
|
||||
|
||||
// text output
|
||||
google.protobuf.Any value = 3;
|
||||
}
|
||||
|
||||
message Wrapper {
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
string version = 1;
|
||||
|
||||
// Name of the extension
|
||||
string extension_name = 2;
|
||||
|
||||
// Must be a valid yaml for the proto
|
||||
string yaml = 3;
|
||||
}
|
83
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
Normal file
83
vendor/github.com/googleapis/gnostic/extensions/extensions.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openapiextension_v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type documentHandler func(version string, extensionName string, document string)
|
||||
type extensionHandler func(name string, info yaml.MapSlice) (bool, proto.Message, error)
|
||||
|
||||
func forInputYamlFromOpenapic(handler documentHandler) {
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("File error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
request := &ExtensionHandlerRequest{}
|
||||
err = proto.Unmarshal(data, request)
|
||||
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
|
||||
}
|
||||
|
||||
func ProcessExtension(handleExtension extensionHandler) {
|
||||
response := &ExtensionHandlerResponse{}
|
||||
forInputYamlFromOpenapic(
|
||||
func(version string, extensionName string, yamlInput string) {
|
||||
var info yaml.MapSlice
|
||||
var newObject proto.Message
|
||||
var err error
|
||||
err = yaml.Unmarshal([]byte(yamlInput), &info)
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
handled, newObject, err := handleExtension(extensionName, info)
|
||||
if !handled {
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// If we reach here, then the extension is handled
|
||||
response.Handled = true
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
response.Value, err = ptypes.MarshalAny(newObject)
|
||||
if err != nil {
|
||||
response.Error = append(response.Error, err.Error())
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
os.Exit(0)
|
||||
}
|
||||
})
|
||||
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
}
|
43
vendor/github.com/howeyc/gopass/pass.go
generated
vendored
43
vendor/github.com/howeyc/gopass/pass.go
generated
vendored
@@ -7,9 +7,14 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
var defaultGetCh = func() (byte, error) {
|
||||
type FdReader interface {
|
||||
io.Reader
|
||||
Fd() uintptr
|
||||
}
|
||||
|
||||
var defaultGetCh = func(r io.Reader) (byte, error) {
|
||||
buf := make([]byte, 1)
|
||||
if n, err := os.Stdin.Read(buf); n == 0 || err != nil {
|
||||
if n, err := r.Read(buf); n == 0 || err != nil {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -28,9 +33,10 @@ var (
|
||||
)
|
||||
|
||||
// getPasswd returns the input read from terminal.
|
||||
// If prompt is not empty, it will be output as a prompt to the user
|
||||
// If masked is true, typing will be matched by asterisks on the screen.
|
||||
// Otherwise, typing will echo nothing.
|
||||
func getPasswd(masked bool) ([]byte, error) {
|
||||
func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) {
|
||||
var err error
|
||||
var pass, bs, mask []byte
|
||||
if masked {
|
||||
@@ -38,26 +44,33 @@ func getPasswd(masked bool) ([]byte, error) {
|
||||
mask = []byte("*")
|
||||
}
|
||||
|
||||
if isTerminal(os.Stdin.Fd()) {
|
||||
if oldState, err := makeRaw(os.Stdin.Fd()); err != nil {
|
||||
if isTerminal(r.Fd()) {
|
||||
if oldState, err := makeRaw(r.Fd()); err != nil {
|
||||
return pass, err
|
||||
} else {
|
||||
defer restore(os.Stdin.Fd(), oldState)
|
||||
defer func() {
|
||||
restore(r.Fd(), oldState)
|
||||
fmt.Fprintln(w)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
if prompt != "" {
|
||||
fmt.Fprint(w, prompt)
|
||||
}
|
||||
|
||||
// Track total bytes read, not just bytes in the password. This ensures any
|
||||
// errors that might flood the console with nil or -1 bytes infinitely are
|
||||
// capped.
|
||||
var counter int
|
||||
for counter = 0; counter <= maxLength; counter++ {
|
||||
if v, e := getch(); e != nil {
|
||||
if v, e := getch(r); e != nil {
|
||||
err = e
|
||||
break
|
||||
} else if v == 127 || v == 8 {
|
||||
if l := len(pass); l > 0 {
|
||||
pass = pass[:l-1]
|
||||
fmt.Print(string(bs))
|
||||
fmt.Fprint(w, string(bs))
|
||||
}
|
||||
} else if v == 13 || v == 10 {
|
||||
break
|
||||
@@ -66,7 +79,7 @@ func getPasswd(masked bool) ([]byte, error) {
|
||||
break
|
||||
} else if v != 0 {
|
||||
pass = append(pass, v)
|
||||
fmt.Print(string(mask))
|
||||
fmt.Fprint(w, string(mask))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,18 +87,24 @@ func getPasswd(masked bool) ([]byte, error) {
|
||||
err = ErrMaxLengthExceeded
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
return pass, err
|
||||
}
|
||||
|
||||
// GetPasswd returns the password read from the terminal without echoing input.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswd() ([]byte, error) {
|
||||
return getPasswd(false)
|
||||
return getPasswd("", false, os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
// GetPasswdMasked returns the password read from the terminal, echoing asterisks.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswdMasked() ([]byte, error) {
|
||||
return getPasswd(true)
|
||||
return getPasswd("", true, os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
// GetPasswdPrompt prompts the user and returns the password read from the terminal.
|
||||
// If mask is true, then asterisks are echoed.
|
||||
// The returned byte array does not include end-of-line characters.
|
||||
func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) {
|
||||
return getPasswd(prompt, mask, r, w)
|
||||
}
|
||||
|
59
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
59
vendor/github.com/juju/ratelimit/ratelimit.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
// The ratelimit package provides an efficient token bucket implementation
|
||||
// Package ratelimit provides an efficient token bucket implementation
|
||||
// that can be used to limit the rate of arbitrary things.
|
||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
||||
package ratelimit
|
||||
@@ -21,6 +21,7 @@ type Bucket struct {
|
||||
capacity int64
|
||||
quantum int64
|
||||
fillInterval time.Duration
|
||||
clock Clock
|
||||
|
||||
// The mutex guards the fields following it.
|
||||
mu sync.Mutex
|
||||
@@ -33,12 +34,37 @@ type Bucket struct {
|
||||
availTick int64
|
||||
}
|
||||
|
||||
// Clock is used to inject testable fakes.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Sleep(d time.Duration)
|
||||
}
|
||||
|
||||
// realClock implements Clock in terms of standard time functions.
|
||||
type realClock struct{}
|
||||
|
||||
// Now is identical to time.Now.
|
||||
func (realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Sleep is identical to time.Sleep.
|
||||
func (realClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
// NewBucket returns a new token bucket that fills at the
|
||||
// rate of one token every fillInterval, up to the given
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
return NewBucketWithClock(fillInterval, capacity, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithClock is identical to NewBucket but injects a testable clock
|
||||
// interface.
|
||||
func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket {
|
||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
@@ -51,12 +77,18 @@ const rateMargin = 0.01
|
||||
// at high rates, the actual rate may be up to 1% different from the
|
||||
// specified rate.
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
return NewBucketWithRateAndClock(rate, capacity, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
|
||||
// testable clock interface.
|
||||
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
|
||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
|
||||
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
@@ -79,6 +111,12 @@ func nextQuantum(q int64) int64 {
|
||||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
|
||||
}
|
||||
|
||||
// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
|
||||
// a testable clock interface.
|
||||
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
@@ -89,7 +127,8 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *
|
||||
panic("token bucket quantum is not > 0")
|
||||
}
|
||||
return &Bucket{
|
||||
startTime: time.Now(),
|
||||
clock: clock,
|
||||
startTime: clock.Now(),
|
||||
capacity: capacity,
|
||||
quantum: quantum,
|
||||
avail: capacity,
|
||||
@@ -101,7 +140,7 @@ func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *
|
||||
// available.
|
||||
func (tb *Bucket) Wait(count int64) {
|
||||
if d := tb.Take(count); d > 0 {
|
||||
time.Sleep(d)
|
||||
tb.clock.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,7 +152,7 @@ func (tb *Bucket) Wait(count int64) {
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
||||
if d > 0 {
|
||||
time.Sleep(d)
|
||||
tb.clock.Sleep(d)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
@@ -127,7 +166,7 @@ const infinityDuration time.Duration = 0x7fffffffffffffff
|
||||
// Note that if the request is irrevocable - there is no way to return
|
||||
// tokens to the bucket once this method commits us to taking them.
|
||||
func (tb *Bucket) Take(count int64) time.Duration {
|
||||
d, _ := tb.take(time.Now(), count, infinityDuration)
|
||||
d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -141,14 +180,14 @@ func (tb *Bucket) Take(count int64) time.Duration {
|
||||
// wait until the tokens are actually available, and reports
|
||||
// true.
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
return tb.take(time.Now(), count, maxWait)
|
||||
return tb.take(tb.clock.Now(), count, maxWait)
|
||||
}
|
||||
|
||||
// TakeAvailable takes up to count immediately available tokens from the
|
||||
// bucket. It returns the number of tokens removed, or zero if there are
|
||||
// no available tokens. It does not block.
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
||||
return tb.takeAvailable(time.Now(), count)
|
||||
return tb.takeAvailable(tb.clock.Now(), count)
|
||||
}
|
||||
|
||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
||||
@@ -178,7 +217,7 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
||||
// tokens could have changed in the meantime. This method is intended
|
||||
// primarily for metrics reporting and debugging.
|
||||
func (tb *Bucket) Available() int64 {
|
||||
return tb.available(time.Now())
|
||||
return tb.available(tb.clock.Now())
|
||||
}
|
||||
|
||||
// available is the internal version of available - it takes the current time as
|
||||
|
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
|
||||
script:
|
||||
- go test
|
78
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
78
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
@@ -1,11 +1,59 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
|
||||
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
|
||||
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
|
||||
// Create variables here so we can reference them with the reflect pkg
|
||||
var f1 DecodeHookFuncType
|
||||
var f2 DecodeHookFuncKind
|
||||
|
||||
// Fill in the variables into this interface and the rest is done
|
||||
// automatically using the reflect package.
|
||||
potential := []interface{}{f1, f2}
|
||||
|
||||
v := reflect.ValueOf(h)
|
||||
vt := v.Type()
|
||||
for _, raw := range potential {
|
||||
pt := reflect.ValueOf(raw).Type()
|
||||
if vt.ConvertibleTo(pt) {
|
||||
return v.Convert(pt).Interface()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeHookExec executes the given decode hook. This should be used
|
||||
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
|
||||
// that took reflect.Kind instead of reflect.Type.
|
||||
func DecodeHookExec(
|
||||
raw DecodeHookFunc,
|
||||
from reflect.Type, to reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
// Build our arguments that reflect expects
|
||||
argVals := make([]reflect.Value, 3)
|
||||
argVals[0] = reflect.ValueOf(from)
|
||||
argVals[1] = reflect.ValueOf(to)
|
||||
argVals[2] = reflect.ValueOf(data)
|
||||
|
||||
switch f := typedDecodeHook(raw).(type) {
|
||||
case DecodeHookFuncType:
|
||||
return f(from, to, data)
|
||||
case DecodeHookFuncKind:
|
||||
return f(from.Kind(), to.Kind(), data)
|
||||
default:
|
||||
return nil, errors.New("invalid decode hook signature")
|
||||
}
|
||||
}
|
||||
|
||||
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
|
||||
// automatically composes multiple DecodeHookFuncs.
|
||||
//
|
||||
@@ -13,18 +61,21 @@ import (
|
||||
// previous transformation.
|
||||
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
var err error
|
||||
for _, f1 := range fs {
|
||||
data, err = f1(f, t, data)
|
||||
data, err = DecodeHookExec(f1, f, t, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Modify the from kind to be correct with the new data
|
||||
f = getKind(reflect.ValueOf(data))
|
||||
f = nil
|
||||
if val := reflect.ValueOf(data); val.IsValid() {
|
||||
f = val.Type()
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
@@ -51,6 +102,25 @@ func StringToSliceHookFunc(sep string) DecodeHookFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
|
||||
// strings to time.Duration.
|
||||
func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||
return func(
|
||||
f reflect.Type,
|
||||
t reflect.Type,
|
||||
data interface{}) (interface{}, error) {
|
||||
if f.Kind() != reflect.String {
|
||||
return data, nil
|
||||
}
|
||||
if t != reflect.TypeOf(time.Duration(5)) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Convert it by parsing
|
||||
return time.ParseDuration(data.(string))
|
||||
}
|
||||
}
|
||||
|
||||
func WeaklyTypedHook(
|
||||
f reflect.Kind,
|
||||
t reflect.Kind,
|
||||
|
18
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
18
vendor/github.com/mitchellh/mapstructure/error.go
generated
vendored
@@ -1,7 +1,9 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -17,11 +19,27 @@ func (e *Error) Error() string {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
sort.Strings(points)
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) decoding:\n\n%s",
|
||||
len(e.Errors), strings.Join(points, "\n"))
|
||||
}
|
||||
|
||||
// WrappedErrors implements the errwrap.Wrapper interface to make this
|
||||
// return value more useful with the errwrap and go-multierror libraries.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]error, len(e.Errors))
|
||||
for i, e := range e.Errors {
|
||||
result[i] = errors.New(e)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func appendErrors(errors []string, err error) []string {
|
||||
switch e := err.(type) {
|
||||
case *Error:
|
||||
|
227
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
227
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
// The mapstructure package exposes functionality to convert an
|
||||
// abitrary map[string]interface{} into a native Go structure.
|
||||
// arbitrary map[string]interface{} into a native Go structure.
|
||||
//
|
||||
// The Go structure can be arbitrarily complex, containing slices,
|
||||
// other structs, etc. and the decoder will properly decode nested
|
||||
@@ -8,6 +8,7 @@
|
||||
package mapstructure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -19,10 +20,20 @@ import (
|
||||
// DecodeHookFunc is the callback function that can be used for
|
||||
// data transformations. See "DecodeHook" in the DecoderConfig
|
||||
// struct.
|
||||
type DecodeHookFunc func(
|
||||
from reflect.Kind,
|
||||
to reflect.Kind,
|
||||
data interface{}) (interface{}, error)
|
||||
//
|
||||
// The type should be DecodeHookFuncType or DecodeHookFuncKind.
|
||||
// Either is accepted. Types are a superset of Kinds (Types can return
|
||||
// Kinds) and are generally a richer thing to use, but Kinds are simpler
|
||||
// if you only need those.
|
||||
//
|
||||
// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
|
||||
// we started with Kinds and then realized Types were the better solution,
|
||||
// but have a promise to not break backwards compat so we now support
|
||||
// both.
|
||||
type DecodeHookFunc interface{}
|
||||
|
||||
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
||||
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
||||
|
||||
// DecoderConfig is the configuration that is used to create a new decoder
|
||||
// and allows customization of various aspects of decoding.
|
||||
@@ -40,6 +51,11 @@ type DecoderConfig struct {
|
||||
// (extra keys).
|
||||
ErrorUnused bool
|
||||
|
||||
// ZeroFields, if set to true, will zero fields before writing them.
|
||||
// For example, a map will be emptied before decoded values are put in
|
||||
// it. If this is false, a map will be merged.
|
||||
ZeroFields bool
|
||||
|
||||
// If WeaklyTypedInput is true, the decoder will make the following
|
||||
// "weak" conversions:
|
||||
//
|
||||
@@ -51,6 +67,11 @@ type DecoderConfig struct {
|
||||
// - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
|
||||
// FALSE, false, False. Anything else is an error)
|
||||
// - empty array = empty map and vice versa
|
||||
// - negative numbers to overflowed uint values (base 10)
|
||||
// - slice of maps to a merged map
|
||||
// - single values are converted to slices if required. Each
|
||||
// element is weakly decoded. For example: "4" can become []int{4}
|
||||
// if the target type is an int slice.
|
||||
//
|
||||
WeaklyTypedInput bool
|
||||
|
||||
@@ -180,9 +201,11 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
||||
if d.config.DecodeHook != nil {
|
||||
// We have a DecodeHook, so let's pre-process the data.
|
||||
var err error
|
||||
data, err = d.config.DecodeHook(getKind(dataVal), getKind(val), data)
|
||||
data, err = DecodeHookExec(
|
||||
d.config.DecodeHook,
|
||||
dataVal.Type(), val.Type(), data)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("error decoding '%s': %s", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,6 +232,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
||||
err = d.decodePtr(name, data, val)
|
||||
case reflect.Slice:
|
||||
err = d.decodeSlice(name, data, val)
|
||||
case reflect.Func:
|
||||
err = d.decodeFunc(name, data, val)
|
||||
default:
|
||||
// If we reached this point then we weren't able to decode it
|
||||
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
|
||||
@@ -227,6 +252,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
||||
// value to "data" of that type.
|
||||
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
if !dataVal.IsValid() {
|
||||
dataVal = reflect.Zero(val.Type())
|
||||
}
|
||||
|
||||
dataValType := dataVal.Type()
|
||||
if !dataValType.AssignableTo(val.Type()) {
|
||||
return fmt.Errorf(
|
||||
@@ -283,6 +312,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
|
||||
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
@@ -304,6 +334,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Int64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetInt(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
@@ -319,11 +357,21 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
val.SetUint(uint64(dataVal.Int()))
|
||||
i := dataVal.Int()
|
||||
if i < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %d overflows uint",
|
||||
name, i)
|
||||
}
|
||||
val.SetUint(uint64(i))
|
||||
case dataKind == reflect.Uint:
|
||||
val.SetUint(dataVal.Uint())
|
||||
case dataKind == reflect.Float32:
|
||||
val.SetUint(uint64(dataVal.Float()))
|
||||
f := dataVal.Float()
|
||||
if f < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %f overflows uint",
|
||||
name, f)
|
||||
}
|
||||
val.SetUint(uint64(f))
|
||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||
if dataVal.Bool() {
|
||||
val.SetUint(1)
|
||||
@@ -380,6 +428,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
|
||||
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.ValueOf(data)
|
||||
dataKind := getKind(dataVal)
|
||||
dataType := dataVal.Type()
|
||||
|
||||
switch {
|
||||
case dataKind == reflect.Int:
|
||||
@@ -401,6 +450,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
|
||||
} else {
|
||||
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Float64()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
val.SetFloat(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
@@ -415,22 +472,43 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
|
||||
valKeyType := valType.Key()
|
||||
valElemType := valType.Elem()
|
||||
|
||||
// Make a new map to hold our result
|
||||
mapType := reflect.MapOf(valKeyType, valElemType)
|
||||
valMap := reflect.MakeMap(mapType)
|
||||
// By default we overwrite keys in the current map
|
||||
valMap := val
|
||||
|
||||
// If the map is nil or we're purposely zeroing fields, make a new map
|
||||
if valMap.IsNil() || d.config.ZeroFields {
|
||||
// Make a new map to hold our result
|
||||
mapType := reflect.MapOf(valKeyType, valElemType)
|
||||
valMap = reflect.MakeMap(mapType)
|
||||
}
|
||||
|
||||
// Check input type
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if dataVal.Kind() != reflect.Map {
|
||||
// Accept empty array/slice instead of an empty map in weakly typed mode
|
||||
if d.config.WeaklyTypedInput &&
|
||||
(dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) &&
|
||||
dataVal.Len() == 0 {
|
||||
val.Set(valMap)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
// In weak mode, we accept a slice of maps as an input...
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch dataVal.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Special case for BC reasons (covered by tests)
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(valMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
err := d.decode(
|
||||
fmt.Sprintf("%s[%d]", name, i),
|
||||
dataVal.Index(i).Interface(), val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||
}
|
||||
|
||||
// Accumulate errors
|
||||
@@ -473,7 +551,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
valType := val.Type()
|
||||
valElemType := valType.Elem()
|
||||
realVal := reflect.New(valElemType)
|
||||
|
||||
realVal := val
|
||||
if realVal.IsNil() || d.config.ZeroFields {
|
||||
realVal = reflect.New(valElemType)
|
||||
}
|
||||
|
||||
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -482,6 +565,19 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
if val.Type() != dataVal.Type() {
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||
name, val.Type(), dataVal.Type())
|
||||
}
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
dataValKind := dataVal.Kind()
|
||||
@@ -489,26 +585,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
||||
valElemType := valType.Elem()
|
||||
sliceType := reflect.SliceOf(valElemType)
|
||||
|
||||
// Check input type
|
||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||
// Accept empty map instead of array/slice in weakly typed mode
|
||||
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
|
||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return nil
|
||||
} else {
|
||||
valSlice := val
|
||||
if valSlice.IsNil() || d.config.ZeroFields {
|
||||
// Check input type
|
||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||
if d.config.WeaklyTypedInput {
|
||||
switch {
|
||||
// Empty maps turn into empty slices
|
||||
case dataValKind == reflect.Map:
|
||||
if dataVal.Len() == 0 {
|
||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// All other types we try to convert to the slice type
|
||||
// and "lift" it into it. i.e. a string becomes a string slice.
|
||||
default:
|
||||
// Just re-try this function with data as a slice.
|
||||
return d.decodeSlice(name, []interface{}{data}, val)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||
}
|
||||
}
|
||||
|
||||
// Make a new slice to hold our result, same size as the original data.
|
||||
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
}
|
||||
|
||||
// Make a new slice to hold our result, same size as the original data.
|
||||
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
errors := make([]string, 0)
|
||||
|
||||
for i := 0; i < dataVal.Len(); i++ {
|
||||
currentData := dataVal.Index(i).Interface()
|
||||
for valSlice.Len() <= i {
|
||||
valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
|
||||
}
|
||||
currentField := valSlice.Index(i)
|
||||
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
@@ -530,6 +644,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
||||
|
||||
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||
|
||||
// If the type of the value to write to and the data match directly,
|
||||
// then we just set it directly instead of recursing into the structure.
|
||||
if dataVal.Type() == val.Type() {
|
||||
val.Set(dataVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
dataValKind := dataVal.Kind()
|
||||
if dataValKind != reflect.Map {
|
||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
|
||||
@@ -565,32 +687,29 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
// If "squash" is specified in the tag, we squash the field down.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
if fieldKind != reflect.Struct {
|
||||
errors = appendErrors(errors,
|
||||
fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
|
||||
continue
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
|
||||
} else {
|
||||
structs = append(structs, val.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
@@ -612,7 +731,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||
if !rawMapVal.IsValid() {
|
||||
// Do a slower search by iterating over each key and
|
||||
// doing case-insensitive search.
|
||||
for dataValKey, _ := range dataValKeys {
|
||||
for dataValKey := range dataValKeys {
|
||||
mK, ok := dataValKey.Interface().(string)
|
||||
if !ok {
|
||||
// Not a string key
|
||||
@@ -660,7 +779,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||
|
||||
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
||||
keys := make([]string, 0, len(dataValKeysUnused))
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
for rawKey := range dataValKeysUnused {
|
||||
keys = append(keys, rawKey.(string))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
@@ -675,7 +794,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||
|
||||
// Add the unused keys to the list of unused keys if we're tracking metadata
|
||||
if d.config.Metadata != nil {
|
||||
for rawKey, _ := range dataValKeysUnused {
|
||||
for rawKey := range dataValKeysUnused {
|
||||
key := rawKey.(string)
|
||||
if name != "" {
|
||||
key = fmt.Sprintf("%s.%s", name, key)
|
||||
|
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
@@ -1,18 +0,0 @@
|
||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Daniel Bornkessel <daniel@soundcloud.com>
|
||||
* Jeff Younker <jeff@drinktomi.com>
|
||||
* Julius Volz <julius@soundcloud.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
@@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
|
||||
|
||||
The following components are included in this product:
|
||||
|
||||
goautoneg
|
||||
http://bitbucket.org/ww/goautoneg
|
||||
Copyright 2011, Open Knowledge Foundation Ltd.
|
||||
See README.txt for license details.
|
||||
|
||||
perks - a fork of https://github.com/bmizerany/perks
|
||||
https://github.com/beorn7/perks
|
||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||
|
12
vendor/github.com/prometheus/client_golang/prometheus/BUILD
generated
vendored
12
vendor/github.com/prometheus/client_golang/prometheus/BUILD
generated
vendored
@@ -14,16 +14,18 @@ go_library(
|
||||
"counter.go",
|
||||
"desc.go",
|
||||
"doc.go",
|
||||
"expvar.go",
|
||||
"expvar_collector.go",
|
||||
"fnv.go",
|
||||
"gauge.go",
|
||||
"go_collector.go",
|
||||
"histogram.go",
|
||||
"http.go",
|
||||
"metric.go",
|
||||
"observer.go",
|
||||
"process_collector.go",
|
||||
"push.go",
|
||||
"registry.go",
|
||||
"summary.go",
|
||||
"timer.go",
|
||||
"untyped.go",
|
||||
"value.go",
|
||||
"vec.go",
|
||||
@@ -34,6 +36,7 @@ go_library(
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/github.com/prometheus/procfs:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -47,6 +50,9 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus/promhttp:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
@@ -1,53 +1 @@
|
||||
# Overview
|
||||
This is the [Prometheus](http://www.prometheus.io) telemetric
|
||||
instrumentation client [Go](http://golang.org) client library. It
|
||||
enable authors to define process-space metrics for their servers and
|
||||
expose them through a web service interface for extraction,
|
||||
aggregation, and a whole slew of other post processing techniques.
|
||||
|
||||
# Installing
|
||||
$ go get github.com/prometheus/client_golang/prometheus
|
||||
|
||||
# Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
indexed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "indexer",
|
||||
Name: "documents_indexed",
|
||||
Help: "The number of documents indexed.",
|
||||
})
|
||||
size = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "storage",
|
||||
Name: "documents_total_size_bytes",
|
||||
Help: "The total size of all documents in the storage.",
|
||||
})
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
indexed.Inc()
|
||||
size.Set(5)
|
||||
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(indexed)
|
||||
prometheus.MustRegister(size)
|
||||
}
|
||||
```
|
||||
|
||||
# Documentation
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/client_golang)
|
||||
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
||||
|
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
@@ -15,15 +15,15 @@ package prometheus
|
||||
|
||||
// Collector is the interface implemented by anything that can be used by
|
||||
// Prometheus to collect metrics. A Collector has to be registered for
|
||||
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
|
||||
// collection. See Registerer.Register.
|
||||
//
|
||||
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
|
||||
// also Collectors (which only ever collect one metric, namely itself). An
|
||||
// implementer of Collector may, however, collect multiple metrics in a
|
||||
// coordinated fashion and/or create metrics on the fly. Examples for collectors
|
||||
// already implemented in this library are the metric vectors (i.e. collection
|
||||
// of multiple instances of the same Metric but with different label values)
|
||||
// like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||
// namely itself). An implementer of Collector may, however, collect multiple
|
||||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||
// for collectors already implemented in this library are the metric vectors
|
||||
// (i.e. collection of multiple instances of the same Metric but with different
|
||||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
type Collector interface {
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
@@ -37,39 +37,39 @@ type Collector interface {
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by Prometheus when collecting metrics. The
|
||||
// implementation sends each collected metric via the provided channel
|
||||
// and returns once the last metric has been sent. The descriptor of
|
||||
// each sent metric is one of those returned by Describe. Returned
|
||||
// metrics that share the same descriptor must differ in their variable
|
||||
// label values. This method may be called concurrently and must
|
||||
// therefore be implemented in a concurrency safe way. Blocking occurs
|
||||
// at the expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Collector implementations support concurrent
|
||||
// readers.
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// SelfCollector implements Collector for a single Metric so that that the
|
||||
// Metric collects itself. Add it as an anonymous field to a struct that
|
||||
// implements Metric, and call Init with the Metric itself as an argument.
|
||||
type SelfCollector struct {
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
type selfCollector struct {
|
||||
self Metric
|
||||
}
|
||||
|
||||
// Init provides the SelfCollector with a reference to the metric it is supposed
|
||||
// init provides the selfCollector with a reference to the metric it is supposed
|
||||
// to collect. It is usually called within the factory function to create a
|
||||
// metric. See example.
|
||||
func (c *SelfCollector) Init(self Metric) {
|
||||
func (c *selfCollector) init(self Metric) {
|
||||
c.self = self
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (c *SelfCollector) Describe(ch chan<- *Desc) {
|
||||
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.self.Desc()
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (c *SelfCollector) Collect(ch chan<- Metric) {
|
||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||
ch <- c.self
|
||||
}
|
||||
|
37
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
37
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
@@ -31,13 +30,8 @@ type Counter interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set is used to set the Counter to an arbitrary value. It is only used
|
||||
// if you have to transfer a value from an external counter into this
|
||||
// Prometheus metric. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
// Inc increments the counter by 1. Use Add to increment it by arbitrary
|
||||
// non-negative values.
|
||||
Inc()
|
||||
// Add adds the given value to the counter. It panics if the value is <
|
||||
// 0.
|
||||
@@ -56,7 +50,7 @@ func NewCounter(opts CounterOpts) Counter {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result.Init(result) // Init self-collection.
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -80,7 +74,7 @@ func (c *counter) Add(v float64) {
|
||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
@@ -94,20 +88,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.Init(result) // Init self-collection.
|
||||
return result
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
55
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
55
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@@ -1,24 +1,30 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var (
|
||||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
)
|
||||
|
||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
const reservedLabelPrefix = "__"
|
||||
@@ -67,7 +73,7 @@ type Desc struct {
|
||||
// Help string. Each Desc with the same fqName must have the same
|
||||
// dimHash.
|
||||
dimHash uint64
|
||||
// err is an error that occured during construction. It is reported on
|
||||
// err is an error that occurred during construction. It is reported on
|
||||
// registration time.
|
||||
err error
|
||||
}
|
||||
@@ -92,7 +98,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !metricNameRE.MatchString(fqName) {
|
||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
}
|
||||
@@ -131,31 +137,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
h := fnv.New64a()
|
||||
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
|
||||
vh := hashNew()
|
||||
for _, val := range labelValues {
|
||||
b.Reset()
|
||||
b.WriteString(val)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
vh = hashAdd(vh, val)
|
||||
vh = hashAddByte(vh, separatorByte)
|
||||
}
|
||||
d.id = h.Sum64()
|
||||
d.id = vh
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
h.Reset()
|
||||
b.Reset()
|
||||
b.WriteString(help)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh := hashNew()
|
||||
lh = hashAdd(lh, help)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
for _, labelName := range labelNames {
|
||||
b.Reset()
|
||||
b.WriteString(labelName)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh = hashAdd(lh, labelName)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
}
|
||||
d.dimHash = h.Sum64()
|
||||
d.dimHash = lh
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
@@ -196,6 +195,6 @@ func (d *Desc) String() string {
|
||||
}
|
||||
|
||||
func checkLabelName(l string) bool {
|
||||
return labelNameRE.MatchString(l) &&
|
||||
return model.LabelName(l).IsValid() &&
|
||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
||||
}
|
||||
|
183
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
183
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@@ -11,25 +11,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides embeddable metric primitives for servers and
|
||||
// standardized exposition of telemetry through a web services interface.
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//
|
||||
// To expose metrics registered with the Prometheus registry, an HTTP server
|
||||
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
|
||||
// A Basic Example
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
//
|
||||
// As a starting point a very basic usage example:
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "log"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
@@ -37,73 +38,149 @@
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.Inc()
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter.
|
||||
// It also exports some stats about the HTTP usage of the /metrics
|
||||
// endpoint. (See the Handler function for more detail.)
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
//
|
||||
// Two more advanced metric types are the Summary and Histogram.
|
||||
// Metrics
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||
// Histogram, a very important part of the Prometheus data model is the
|
||||
// partitioning of samples along dimensions called labels, which results in
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// and HistogramVec.
|
||||
// HistogramVec, and UntypedVec.
|
||||
//
|
||||
// Those are all the parts needed for basic usage. Detailed documentation and
|
||||
// examples are provided below.
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
//
|
||||
// Everything else this package offers is essentially for "power users" only. A
|
||||
// few pointers to "power user features":
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
|
||||
// UntypedOpts.
|
||||
//
|
||||
// All the various ...Opts structs have a ConstLabels field for labels that
|
||||
// never change their value (which is only useful under special circumstances,
|
||||
// see documentation of the Opts type).
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
|
||||
// not to assume anything about its type.
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||
// registration (with the prime example of the different metric vectors above,
|
||||
// which bundle all the metrics of the same name but with different labels).
|
||||
//
|
||||
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
|
||||
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
|
||||
// There is a more involved use case, too: If you already have metrics
|
||||
// available, created outside of the Prometheus context, you don't need the
|
||||
// interface of the various Metric types. You essentially want to mirror the
|
||||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created later.
|
||||
// NewDesc comes in handy to create those Desc instances.
|
||||
//
|
||||
// For custom metric collection, there are two entry points: Custom Metric
|
||||
// implementations and custom Collector implementations. A Metric is the
|
||||
// fundamental unit in the Prometheus data model: a sample at a point in time
|
||||
// together with its meta-data (like its fully-qualified name and any number of
|
||||
// pairs of label name and label value) that knows how to marshal itself into a
|
||||
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
|
||||
// gets registered with the Prometheus registry and manages the collection of
|
||||
// one or more Metrics. Many parts of this package are building blocks for
|
||||
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
|
||||
// metrics under the hood, and by Collectors to describe the Metrics to be
|
||||
// collected, but only to be dealt with by users if they implement their own
|
||||
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
|
||||
// in handy. Other useful components for Metric and Collector implementation
|
||||
// include: LabelPairSorter to sort the DTO version of label pairs,
|
||||
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
|
||||
// collection time, MetricVec to bundle custom Metrics into a metric vector
|
||||
// Collector, SelfCollector to make a custom Metric collect itself.
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||
// metrics) as examples that are used in this package itself.
|
||||
//
|
||||
// A good example for a custom Collector is the ExpVarCollector included in this
|
||||
// package, which exports variables exported via the "expvar" package as
|
||||
// Prometheus metrics.
|
||||
// If you just need to call a function to get a single float value to collect as
|
||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might cause.
|
||||
// As suggested by the name, MustRegister panics if an error occurs. With the
|
||||
// Register function, the error is returned and can be handled.
|
||||
//
|
||||
// An error is returned if the registered Collector is incompatible or
|
||||
// inconsistent with already registered metrics. The registry aims for
|
||||
// consistency of the collected metrics according to the Prometheus data model.
|
||||
// Inconsistencies are ideally detected at registration time, not at collect
|
||||
// time. The former will usually be detected at start-up time of a program,
|
||||
// while the latter will only happen at scrape time, possibly not even on the
|
||||
// first scrape if the inconsistency only becomes relevant later. That is the
|
||||
// main reason why a Collector and a Metric have to describe themselves to the
|
||||
// registry.
|
||||
//
|
||||
// So far, everything we did operated on the so-called default registry, as it
|
||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||
// can create a custom registry, or you can even implement the Registerer or
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in the
|
||||
// same way on a custom registry as the global functions Register and Unregister
|
||||
// on the default registry.
|
||||
//
|
||||
// There are a number of uses for custom registries: You can use registries with
|
||||
// special properties, see NewPedanticRegistry. You can avoid global state, as
|
||||
// it is imposed by the DefaultRegistry. You can use multiple registries at the
|
||||
// same time to expose different metrics in different ways. You can use separate
|
||||
// registries for testing purposes.
|
||||
//
|
||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||
// (The top-level functions in the prometheus package are deprecated.)
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Graphite Bridge
|
||||
//
|
||||
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||
// found in the graphite sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added by following the approaches
|
||||
// of the existing implementations.
|
||||
package prometheus
|
||||
|
@@ -18,21 +18,21 @@ import (
|
||||
"expvar"
|
||||
)
|
||||
|
||||
// ExpvarCollector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the ExpvarCollector is inherently
|
||||
// slow. Thus, the ExpvarCollector is probably great for experiments and
|
||||
// prototying, but you should seriously consider a more direct implementation of
|
||||
// Prometheus metrics for monitoring production systems.
|
||||
//
|
||||
// Use NewExpvarCollector to create new instances.
|
||||
type ExpvarCollector struct {
|
||||
type expvarCollector struct {
|
||||
exports map[string]*Desc
|
||||
}
|
||||
|
||||
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
|
||||
// to be registered with the Prometheus registry.
|
||||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
||||
// to be registered with a Prometheus registry.
|
||||
//
|
||||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the expvar Collector is inherently slower
|
||||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
||||
// for experiments and prototying, but you should seriously consider a more
|
||||
// direct implementation of Prometheus metrics for monitoring production
|
||||
// systems.
|
||||
//
|
||||
// The exports map has the following meaning:
|
||||
//
|
||||
@@ -59,21 +59,21 @@ type ExpvarCollector struct {
|
||||
// sample values.
|
||||
//
|
||||
// Anything that does not fit into the scheme above is silently ignored.
|
||||
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
|
||||
return &ExpvarCollector{
|
||||
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||
return &expvarCollector{
|
||||
exports: exports,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
|
||||
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||
for _, desc := range e.exports {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
|
||||
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||
for name, desc := range e.exports {
|
||||
var m Metric
|
||||
expVar := expvar.Get(name)
|
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
28
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@@ -13,8 +13,6 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
@@ -29,16 +27,21 @@ type Gauge interface {
|
||||
|
||||
// Set sets the Gauge to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Gauge by 1.
|
||||
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
|
||||
// values.
|
||||
Inc()
|
||||
// Dec decrements the Gauge by 1.
|
||||
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
|
||||
// values.
|
||||
Dec()
|
||||
// Add adds the given value to the Gauge. (The value can be
|
||||
// negative, resulting in a decrease of the Gauge.)
|
||||
// Add adds the given value to the Gauge. (The value can be negative,
|
||||
// resulting in a decrease of the Gauge.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Gauge. (The value can be
|
||||
// negative, resulting in an increase of the Gauge.)
|
||||
Sub(float64)
|
||||
|
||||
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
|
||||
SetToCurrentTime()
|
||||
}
|
||||
|
||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||
@@ -60,7 +63,7 @@ func NewGauge(opts GaugeOpts) Gauge {
|
||||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
@@ -74,14 +77,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
47
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
47
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@@ -8,8 +8,9 @@ import (
|
||||
)
|
||||
|
||||
type goCollector struct {
|
||||
goroutines Gauge
|
||||
gcDesc *Desc
|
||||
goroutinesDesc *Desc
|
||||
threadsDesc *Desc
|
||||
gcDesc *Desc
|
||||
|
||||
// metrics to describe and collect
|
||||
metrics memStatsMetrics
|
||||
@@ -17,13 +18,16 @@ type goCollector struct {
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
func NewGoCollector() *goCollector {
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Namespace: "go",
|
||||
Name: "goroutines",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
}),
|
||||
goroutinesDesc: NewDesc(
|
||||
"go_goroutines",
|
||||
"Number of goroutines that currently exist.",
|
||||
nil, nil),
|
||||
threadsDesc: NewDesc(
|
||||
"go_threads",
|
||||
"Number of OS threads created",
|
||||
nil, nil),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the GC invocation durations.",
|
||||
@@ -48,7 +52,7 @@ func NewGoCollector() *goCollector {
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("sys_bytes"),
|
||||
"Number of bytes obtained by system. Sum of all system allocations.",
|
||||
"Number of bytes obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||
@@ -111,12 +115,12 @@ func NewGoCollector() *goCollector {
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_released_bytes_total"),
|
||||
"Total number of heap bytes released to OS.",
|
||||
memstatNamespace("heap_released_bytes"),
|
||||
"Number of heap bytes released to OS.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||
valType: CounterValue,
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_objects"),
|
||||
@@ -211,7 +215,15 @@ func NewGoCollector() *goCollector {
|
||||
"Number of seconds since 1970 of last garbage collection.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("gc_cpu_fraction"),
|
||||
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||
valType: GaugeValue,
|
||||
},
|
||||
},
|
||||
@@ -224,9 +236,9 @@ func memstatNamespace(s string) string {
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.goroutines.Desc()
|
||||
ch <- c.goroutinesDesc
|
||||
ch <- c.threadsDesc
|
||||
ch <- c.gcDesc
|
||||
|
||||
for _, i := range c.metrics {
|
||||
ch <- i.desc
|
||||
}
|
||||
@@ -234,8 +246,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
||||
ch <- c.goroutines
|
||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||
n, _ := runtime.ThreadCreateProfile(nil)
|
||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||
|
||||
var stats debug.GCStats
|
||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||
|
50
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
50
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
@@ -52,11 +51,11 @@ type Histogram interface {
|
||||
// bucket of a histogram ("le" -> "less or equal").
|
||||
const bucketLabel = "le"
|
||||
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a network
|
||||
// service. Most likely, however, you will be required to define buckets
|
||||
// customized to your use case.
|
||||
var (
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a
|
||||
// network service. Most likely, however, you will be required to define
|
||||
// buckets customized to your use case.
|
||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
errBucketLabelNotAllowed = fmt.Errorf(
|
||||
@@ -211,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.Init(h) // Init self-collection.
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -223,7 +222,7 @@ type histogram struct {
|
||||
sumBits uint64
|
||||
count uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
@@ -288,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewHistogramVec.
|
||||
type HistogramVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||
@@ -302,35 +301,30 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &HistogramVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
||||
// MetricVec. The difference is that this method returns an Observer and not a
|
||||
// Metric so that no type conversion to an Observer is required.
|
||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Histogram and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
||||
// difference is that this method returns an Observer and not a Metric so that no
|
||||
// type conversion to an Observer is required.
|
||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -339,15 +333,15 @@ func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Observer {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Observer)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
||||
return m.MetricVec.With(labels).(Histogram)
|
||||
func (m *HistogramVec) With(labels Labels) Observer {
|
||||
return m.MetricVec.With(labels).(Observer)
|
||||
}
|
||||
|
||||
type constHistogram struct {
|
||||
|
243
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
243
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@@ -15,14 +15,115 @@ package prometheus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is not instrumented, but can be instrumented with the tooling provided
|
||||
// in package promhttp).
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
@@ -57,29 +158,52 @@ func nowSeries(t ...time.Time) nower {
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||
// package promhttp instead. The issues are the following:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// - It has additional issues with HTTP/2, cf.
|
||||
// https://github.com/prometheus/client_golang/issues/272.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler.
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
Subsystem: "http",
|
||||
ConstLabels: Labels{"handler": handlerName},
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
handlerFunc,
|
||||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
|
||||
// flexibility (at the cost of a more complex call syntax). As
|
||||
// InstrumentHandler, this function registers four metric collectors, but it
|
||||
// uses the provided SummaryOpts to create them. However, the fields "Name" and
|
||||
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
|
||||
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
|
||||
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
@@ -98,13 +222,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
|
||||
// more flexibility (at the cost of a more complex call syntax). See
|
||||
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
@@ -116,34 +247,52 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||
},
|
||||
instLabels,
|
||||
)
|
||||
if err := Register(reqCnt); err != nil {
|
||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||
reqCnt = are.ExistingCollector.(*CounterVec)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.Name = "request_duration_microseconds"
|
||||
opts.Help = "The HTTP request latencies in microseconds."
|
||||
reqDur := NewSummary(opts)
|
||||
if err := Register(reqDur); err != nil {
|
||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||
reqDur = are.ExistingCollector.(Summary)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.Name = "request_size_bytes"
|
||||
opts.Help = "The HTTP request sizes in bytes."
|
||||
reqSz := NewSummary(opts)
|
||||
if err := Register(reqSz); err != nil {
|
||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||
reqSz = are.ExistingCollector.(Summary)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
opts.Name = "response_size_bytes"
|
||||
opts.Help = "The HTTP response sizes in bytes."
|
||||
resSz := NewSummary(opts)
|
||||
|
||||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
|
||||
regReqDur := MustRegisterOrGet(reqDur).(Summary)
|
||||
regReqSz := MustRegisterOrGet(reqSz).(Summary)
|
||||
regResSz := MustRegisterOrGet(resSz).(Summary)
|
||||
if err := Register(resSz); err != nil {
|
||||
if are, ok := err.(AlreadyRegisteredError); ok {
|
||||
resSz = are.ExistingCollector.(Summary)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||
out := make(chan int)
|
||||
urlLen := 0
|
||||
if r.URL != nil {
|
||||
urlLen = len(r.URL.String())
|
||||
}
|
||||
go computeApproximateRequestSize(r, out, urlLen)
|
||||
out := computeApproximateRequestSize(r)
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
@@ -161,30 +310,44 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||
|
||||
method := sanitizeMethod(r.Method)
|
||||
code := sanitizeCode(delegate.status)
|
||||
regReqCnt.WithLabelValues(method, code).Inc()
|
||||
regReqDur.Observe(elapsed)
|
||||
regResSz.Observe(float64(delegate.written))
|
||||
regReqSz.Observe(float64(<-out))
|
||||
reqCnt.WithLabelValues(method, code).Inc()
|
||||
reqDur.Observe(elapsed)
|
||||
resSz.Observe(float64(delegate.written))
|
||||
reqSz.Observe(float64(<-out))
|
||||
})
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||
// Get URL length in current go routine for avoiding a race condition.
|
||||
// HandlerFunc that runs in parallel may modify the URL.
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
s += len(r.URL.String())
|
||||
}
|
||||
|
||||
out := make(chan int, 1)
|
||||
|
||||
go func() {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
close(out)
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
|
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@@ -22,10 +22,8 @@ import (
|
||||
const separatorByte byte = 255
|
||||
|
||||
// A Metric models a single sample value with its meta data being exported to
|
||||
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
|
||||
// Untyped, and Summary. Users can implement their own Metric types, but that
|
||||
// should be rarely needed. See the example for SelfCollector, which is also an
|
||||
// example for a user-implemented Metric.
|
||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||
// Histogram, Summary, and Untyped.
|
||||
type Metric interface {
|
||||
// Desc returns the descriptor for the Metric. This method idempotently
|
||||
// returns the same descriptor throughout the lifetime of the
|
||||
@@ -36,21 +34,23 @@ type Metric interface {
|
||||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||
// transmission object.
|
||||
//
|
||||
// Implementers of custom Metric types must observe concurrency safety
|
||||
// as reads of this metric may occur at any time, and any blocking
|
||||
// occurs at the expense of total performance of rendering all
|
||||
// registered metrics. Ideally Metric implementations should support
|
||||
// concurrent readers.
|
||||
// Metric implementations must observe concurrency safety as reads of
|
||||
// this metric may occur at any time, and any blocking occurs at the
|
||||
// expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Metric implementations should support concurrent
|
||||
// readers.
|
||||
//
|
||||
// The Prometheus client library attempts to minimize memory allocations
|
||||
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
|
||||
// may recycle the dto.Metric proto message, so Metric implementations
|
||||
// should just populate the provided dto.Metric and then should not keep
|
||||
// any reference to it.
|
||||
//
|
||||
// While populating dto.Metric, labels must be sorted lexicographically.
|
||||
// (Implementers may find LabelPairSorter useful for that.)
|
||||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
// signature of this method should be changed to "Write() (*dto.Metric,
|
||||
// error)".
|
||||
}
|
||||
|
||||
// Opts bundles the options for creating most Metric types. Each metric
|
||||
|
50
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
50
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Observer is the interface that wraps the Observe method, which is used by
|
||||
// Histogram and Summary to add observations.
|
||||
type Observer interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// The ObserverFunc type is an adapter to allow the use of ordinary
|
||||
// functions as Observers. If f is a function with the appropriate
|
||||
// signature, ObserverFunc(f) is an Observer that calls f.
|
||||
//
|
||||
// This adapter is usually used in connection with the Timer type, and there are
|
||||
// two general use cases:
|
||||
//
|
||||
// The most common one is to use a Gauge as the Observer for a Timer.
|
||||
// See the "Gauge" Timer example.
|
||||
//
|
||||
// The more advanced use case is to create a function that dynamically decides
|
||||
// which Observer to use for observing the duration. See the "Complex" Timer
|
||||
// example.
|
||||
type ObserverFunc func(float64)
|
||||
|
||||
// Observe calls f(value). It implements Observer.
|
||||
func (f ObserverFunc) Observe(value float64) {
|
||||
f(value)
|
||||
}
|
||||
|
||||
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
|
||||
type ObserverVec interface {
|
||||
GetMetricWith(Labels) (Observer, error)
|
||||
GetMetricWithLabelValues(lvs ...string) (Observer, error)
|
||||
With(Labels) Observer
|
||||
WithLabelValues(...string) Observer
|
||||
|
||||
Collector
|
||||
}
|
108
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
108
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@@ -19,16 +19,16 @@ type processCollector struct {
|
||||
pid int
|
||||
collectFn func(chan<- Metric)
|
||||
pidFn func() (int, error)
|
||||
cpuTotal Counter
|
||||
openFDs, maxFDs Gauge
|
||||
vsize, rss Gauge
|
||||
startTime Gauge
|
||||
cpuTotal *Desc
|
||||
openFDs, maxFDs *Desc
|
||||
vsize, rss *Desc
|
||||
startTime *Desc
|
||||
}
|
||||
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
@@ -43,41 +43,46 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) *processCollector {
|
||||
) Collector {
|
||||
ns := ""
|
||||
if len(namespace) > 0 {
|
||||
ns = namespace + "_"
|
||||
}
|
||||
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
||||
cpuTotal: NewCounter(CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_cpu_seconds_total",
|
||||
Help: "Total user and system CPU time spent in seconds.",
|
||||
}),
|
||||
openFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_open_fds",
|
||||
Help: "Number of open file descriptors.",
|
||||
}),
|
||||
maxFDs: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_max_fds",
|
||||
Help: "Maximum number of open file descriptors.",
|
||||
}),
|
||||
vsize: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_virtual_memory_bytes",
|
||||
Help: "Virtual memory size in bytes.",
|
||||
}),
|
||||
rss: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_resident_memory_bytes",
|
||||
Help: "Resident memory size in bytes.",
|
||||
}),
|
||||
startTime: NewGauge(GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "process_start_time_seconds",
|
||||
Help: "Start time of the process since unix epoch in seconds.",
|
||||
}),
|
||||
cpuTotal: NewDesc(
|
||||
ns+"process_cpu_seconds_total",
|
||||
"Total user and system CPU time spent in seconds.",
|
||||
nil, nil,
|
||||
),
|
||||
openFDs: NewDesc(
|
||||
ns+"process_open_fds",
|
||||
"Number of open file descriptors.",
|
||||
nil, nil,
|
||||
),
|
||||
maxFDs: NewDesc(
|
||||
ns+"process_max_fds",
|
||||
"Maximum number of open file descriptors.",
|
||||
nil, nil,
|
||||
),
|
||||
vsize: NewDesc(
|
||||
ns+"process_virtual_memory_bytes",
|
||||
"Virtual memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
rss: NewDesc(
|
||||
ns+"process_resident_memory_bytes",
|
||||
"Resident memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
startTime: NewDesc(
|
||||
ns+"process_start_time_seconds",
|
||||
"Start time of the process since unix epoch in seconds.",
|
||||
nil, nil,
|
||||
),
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
@@ -90,12 +95,12 @@ func NewProcessCollectorPIDFn(
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal.Desc()
|
||||
ch <- c.openFDs.Desc()
|
||||
ch <- c.maxFDs.Desc()
|
||||
ch <- c.vsize.Desc()
|
||||
ch <- c.rss.Desc()
|
||||
ch <- c.startTime.Desc()
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
@@ -117,26 +122,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
}
|
||||
|
||||
if stat, err := p.NewStat(); err == nil {
|
||||
c.cpuTotal.Set(stat.CPUTime())
|
||||
ch <- c.cpuTotal
|
||||
c.vsize.Set(float64(stat.VirtualMemory()))
|
||||
ch <- c.vsize
|
||||
c.rss.Set(float64(stat.ResidentMemory()))
|
||||
ch <- c.rss
|
||||
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||
if startTime, err := stat.StartTime(); err == nil {
|
||||
c.startTime.Set(startTime)
|
||||
ch <- c.startTime
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
}
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||
c.openFDs.Set(float64(fds))
|
||||
ch <- c.openFDs
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err == nil {
|
||||
c.maxFDs.Set(float64(limits.OpenFiles))
|
||||
ch <- c.maxFDs
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||
}
|
||||
}
|
||||
|
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD
generated
vendored
Normal file
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"delegator_1_8.go",
|
||||
"http.go",
|
||||
"instrument_client.go",
|
||||
"instrument_client_1_8.go",
|
||||
"instrument_server.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_7.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
_, hj := w.(http.Hijacker)
|
||||
_, rf := w.(io.ReaderFrom)
|
||||
if cn && fl && hj && rf {
|
||||
return &fancyDelegator{d}
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
73
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
Normal file
73
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// newDelegator handles the four different methods of upgrading a
|
||||
// http.ResponseWriter to delegator.
|
||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||
d := &responseWriterDelegator{
|
||||
ResponseWriter: w,
|
||||
observeWriteHeader: observeWriteHeaderFunc,
|
||||
}
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
_, hj := w.(http.Hijacker)
|
||||
_, ps := w.(http.Pusher)
|
||||
_, rf := w.(io.ReaderFrom)
|
||||
|
||||
// Check for the four most common combination of interfaces a
|
||||
// http.ResponseWriter might implement.
|
||||
switch {
|
||||
case cn && fl && hj && rf && ps:
|
||||
// All interfaces.
|
||||
return &fancyPushDelegator{
|
||||
fancyDelegator: &fancyDelegator{d},
|
||||
p: &pushDelegator{d},
|
||||
}
|
||||
case cn && fl && hj && rf:
|
||||
// All interfaces, except http.Pusher.
|
||||
return &fancyDelegator{d}
|
||||
case ps:
|
||||
// Just http.Pusher.
|
||||
return &pushDelegator{d}
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
type fancyPushDelegator struct {
|
||||
p *pushDelegator
|
||||
|
||||
*fancyDelegator
|
||||
}
|
||||
|
||||
func (f *fancyPushDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return f.p.Push(target, opts)
|
||||
}
|
||||
|
||||
type pushDelegator struct {
|
||||
*responseWriterDelegator
|
||||
}
|
||||
|
||||
func (f *pushDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return f.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
204
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
204
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promhttp provides tooling around HTTP servers and clients.
|
||||
//
|
||||
// First, the package allows the creation of http.Handler instances to expose
|
||||
// Prometheus metrics via HTTP. promhttp.Handler acts on the
|
||||
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
|
||||
// custom registry or anything that implements the Gatherer interface. It also
|
||||
// allows the creation of handlers that act differently on errors or allow to
|
||||
// log errors.
|
||||
//
|
||||
// Second, the package provides tooling to instrument instances of http.Handler
|
||||
// via middleware. Middleware wrappers follow the naming scheme
|
||||
// InstrumentHandlerX, where X describes the intended use of the middleware.
|
||||
// See each function's doc comment for specific details.
|
||||
//
|
||||
// Finally, the package allows for an http.RoundTripper to be instrumented via
|
||||
// middleware. Middleware wrappers follow the naming scheme
|
||||
// InstrumentRoundTripperX, where X describes the intended use of the
|
||||
// middleware. See each function's doc comment for specific details.
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
|
||||
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
|
||||
// error, no error logging, and compression if requested by the client.
|
||||
//
|
||||
// If you want to create a Handler for the DefaultGatherer with different
|
||||
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
|
||||
// your desired HandlerOpts.
|
||||
func Handler() http.Handler {
|
||||
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
|
||||
}
|
||||
|
||||
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
|
||||
// of the Handler is defined by the provided HandlerOpts.
|
||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||
}
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case ContinueOnError:
|
||||
if len(mfs) == 0 {
|
||||
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case HTTPErrorOnError:
|
||||
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error encoding metric family:", err)
|
||||
}
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case ContinueOnError:
|
||||
// Handled later.
|
||||
case HTTPErrorOnError:
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
// TODO(beorn7): Consider streaming serving of metrics.
|
||||
})
|
||||
}
|
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
// errors.
|
||||
type HandlerErrorHandling int
|
||||
|
||||
// These constants cause handlers serving metrics to behave as described if
|
||||
// errors are encountered.
|
||||
const (
|
||||
// Serve an HTTP status code 500 upon the first error
|
||||
// encountered. Report the error message in the body.
|
||||
HTTPErrorOnError HandlerErrorHandling = iota
|
||||
// Ignore errors and try to serve as many metrics as possible. However,
|
||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||
// last error message in the body. Only use this in deliberate "best
|
||||
// effort" metrics collection scenarios. It is recommended to at least
|
||||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
||||
// errors completely.
|
||||
ContinueOnError
|
||||
// Panic upon the first error encountered (useful for "crash only" apps).
|
||||
PanicOnError
|
||||
)
|
||||
|
||||
// Logger is the minimal interface HandlerOpts needs for logging. Note that
|
||||
// log.Logger from the standard library implements this interface, and it is
|
||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||
type Logger interface {
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
|
||||
// zero value of HandlerOpts is a reasonable default.
|
||||
type HandlerOpts struct {
|
||||
// ErrorLog specifies an optional logger for errors collecting and
|
||||
// serving metrics. If nil, errors are not logged at all.
|
||||
ErrorLog Logger
|
||||
// ErrorHandling defines how errors are handled. Note that errors are
|
||||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling
|
||||
// If DisableCompression is true, the handler will never compress the
|
||||
// response, even if requested by the client.
|
||||
DisableCompression bool
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
|
||||
if compressionDisabled {
|
||||
return writer, ""
|
||||
}
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
95
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
95
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// The RoundTripperFunc type is an adapter to allow the use of ordinary
|
||||
// functions as RoundTrippers. If f is a function with the appropriate
|
||||
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
|
||||
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return rt(r)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
|
||||
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
|
||||
// requests currently handled by the wrapped http.RoundTripper.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
gauge.Inc()
|
||||
defer gauge.Dec()
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
||||
// http.RoundTripper to observe the request result with the provided CounterVec.
|
||||
// The CounterVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. Partitioning of the CounterVec happens by HTTP status
|
||||
// code and/or HTTP method if the respective instance label names are present
|
||||
// in the CounterVec. For unpartitioned counting, use a CounterVec with
|
||||
// zero labels.
|
||||
//
|
||||
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
||||
// is not incremented.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
|
||||
code, method := checkLabels(counter)
|
||||
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||
// http.RoundTripper to observe the request duration with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. The Observe method of the Observer in the ObserverVec
|
||||
// is called with the request duration in seconds. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||
// reported.
|
||||
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
142
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
Normal file
142
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"time"
|
||||
)
|
||||
|
||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||
// representing the time in seconds since the start of the http request. A user
|
||||
// may choose to use separately buckets Histograms, or implement custom
|
||||
// instance labels on a per function basis.
|
||||
type InstrumentTrace struct {
|
||||
GotConn func(float64)
|
||||
PutIdleConn func(float64)
|
||||
GotFirstResponseByte func(float64)
|
||||
Got100Continue func(float64)
|
||||
DNSStart func(float64)
|
||||
DNSDone func(float64)
|
||||
ConnectStart func(float64)
|
||||
ConnectDone func(float64)
|
||||
TLSHandshakeStart func(float64)
|
||||
TLSHandshakeDone func(float64)
|
||||
WroteHeaders func(float64)
|
||||
Wait100Continue func(float64)
|
||||
WroteRequest func(float64)
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||
// RoundTripper and reports times to hook functions provided in the
|
||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||
// time since the start of the request. Note that partitioning of Histograms
|
||||
// is expensive and should be used judiciously.
|
||||
//
|
||||
// For hook functions that receive an error as an argument, no observations are
|
||||
// made in the event of a non-nil error value.
|
||||
//
|
||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||
start := time.Now()
|
||||
|
||||
trace := &httptrace.ClientTrace{
|
||||
GotConn: func(_ httptrace.GotConnInfo) {
|
||||
if it.GotConn != nil {
|
||||
it.GotConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
PutIdleConn: func(err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.PutIdleConn != nil {
|
||||
it.PutIdleConn(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
if it.ConnectStart != nil {
|
||||
it.ConnectStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectDone: func(_, _ string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.ConnectDone != nil {
|
||||
it.ConnectDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
GotFirstResponseByte: func() {
|
||||
if it.GotFirstResponseByte != nil {
|
||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Got100Continue: func() {
|
||||
if it.Got100Continue != nil {
|
||||
it.Got100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeStart: func() {
|
||||
if it.TLSHandshakeStart != nil {
|
||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if it.TLSHandshakeDone != nil {
|
||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteHeaders: func() {
|
||||
if it.WroteHeaders != nil {
|
||||
it.WroteHeaders(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
Wait100Continue: func() {
|
||||
if it.Wait100Continue != nil {
|
||||
it.Wait100Continue(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||
if it.WroteRequest != nil {
|
||||
it.WroteRequest(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
}
|
||||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
|
||||
|
||||
return next.RoundTrip(r)
|
||||
})
|
||||
}
|
505
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
505
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
Normal file
@@ -0,0 +1,505 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promhttp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||
|
||||
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
||||
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
||||
// requests currently handled by the wrapped http.Handler.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
g.Inc()
|
||||
defer g.Dec()
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerDuration is a middleware that wraps the provided
|
||||
// http.Handler to observe the request duration with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. The Observe method of the Observer in the ObserverVec
|
||||
// is called with the request duration in seconds. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerCounter is a middleware that wraps the provided
|
||||
// http.Handler to observe the request result with the provided CounterVec.
|
||||
// The CounterVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. Partitioning of the CounterVec happens by HTTP status
|
||||
// code and/or HTTP method if the respective instance label names are present
|
||||
// in the CounterVec. For unpartitioned counting, use a CounterVec with
|
||||
// zero labels.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, the Counter is not incremented.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(counter)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
counter.With(labels(code, method, r.Method, d.Status())).Inc()
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
counter.With(labels(code, method, r.Method, 0)).Inc()
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||
// http.Handler to observe with the provided ObserverVec the request duration
|
||||
// until the response headers are written. The ObserverVec must have zero, one,
|
||||
// or two labels. The only allowed label names are "code" and "method". The
|
||||
// function panics if any other instance labels are provided. The Observe
|
||||
// method of the Observer in the ObserverVec is called with the request
|
||||
// duration in seconds. Partitioning happens by HTTP status code and/or HTTP
|
||||
// method if the respective instance label names are present in the
|
||||
// ObserverVec. For unpartitioned observations, use an ObserverVec with zero
|
||||
// labels. Note that partitioning of Histograms is expensive and should be used
|
||||
// judiciously.
|
||||
//
|
||||
// If the wrapped Handler panics before calling WriteHeader, no value is
|
||||
// reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, func(status int) {
|
||||
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
|
||||
})
|
||||
next.ServeHTTP(d, r)
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||
// http.Handler to observe the request size with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. The Observe method of the Observer in the ObserverVec
|
||||
// is called with the request size in bytes. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
||||
code, method := checkLabels(obs)
|
||||
|
||||
if code {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
|
||||
})
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
||||
// http.Handler to observe the response size with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. The Observe method of the Observer in the ObserverVec
|
||||
// is called with the response size in bytes. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
|
||||
//
|
||||
// If the wrapped Handler panics, no values are reported.
|
||||
//
|
||||
// See the example for InstrumentHandlerDuration for example usage.
|
||||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
|
||||
code, method := checkLabels(obs)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
|
||||
})
|
||||
}
|
||||
|
||||
func checkLabels(c prometheus.Collector) (code bool, method bool) {
|
||||
// TODO(beorn7): Remove this hacky way to check for instance labels
|
||||
// once Descriptors can have their dimensionality queried.
|
||||
var (
|
||||
desc *prometheus.Desc
|
||||
pm dto.Metric
|
||||
)
|
||||
|
||||
descc := make(chan *prometheus.Desc, 1)
|
||||
c.Describe(descc)
|
||||
|
||||
select {
|
||||
case desc = <-descc:
|
||||
default:
|
||||
panic("no description provided by collector")
|
||||
}
|
||||
select {
|
||||
case <-descc:
|
||||
panic("more than one description provided by collector")
|
||||
default:
|
||||
}
|
||||
|
||||
close(descc)
|
||||
|
||||
if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {
|
||||
return
|
||||
}
|
||||
if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil {
|
||||
if err := m.Write(&pm); err != nil {
|
||||
panic("error checking metric for labels")
|
||||
}
|
||||
for _, label := range pm.Label {
|
||||
name, value := label.GetName(), label.GetValue()
|
||||
if value != magicString {
|
||||
continue
|
||||
}
|
||||
switch name {
|
||||
case "code":
|
||||
code = true
|
||||
case "method":
|
||||
method = true
|
||||
default:
|
||||
panic("metric partitioned with non-supported labels")
|
||||
}
|
||||
return
|
||||
}
|
||||
panic("previously set label not found – this must never happen")
|
||||
}
|
||||
if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil {
|
||||
if err := m.Write(&pm); err != nil {
|
||||
panic("error checking metric for labels")
|
||||
}
|
||||
for _, label := range pm.Label {
|
||||
name, value := label.GetName(), label.GetValue()
|
||||
if value != magicString {
|
||||
continue
|
||||
}
|
||||
if name == "code" || name == "method" {
|
||||
continue
|
||||
}
|
||||
panic("metric partitioned with non-supported labels")
|
||||
}
|
||||
code = true
|
||||
method = true
|
||||
return
|
||||
}
|
||||
panic("metric partitioned with non-supported labels")
|
||||
}
|
||||
|
||||
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
|
||||
// unnecessary allocations on each request.
|
||||
var emptyLabels = prometheus.Labels{}
|
||||
|
||||
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
|
||||
if !(code || method) {
|
||||
return emptyLabels
|
||||
}
|
||||
labels := prometheus.Labels{}
|
||||
|
||||
if code {
|
||||
labels["code"] = sanitizeCode(status)
|
||||
}
|
||||
if method {
|
||||
labels["method"] = sanitizeMethod(reqMethod)
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) int {
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
s += len(r.URL.String())
|
||||
}
|
||||
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||
// currently 0, santizeCode will return 200, for consistency with behavior in
|
||||
// the stdlib.
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200, 0:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
||||
|
||||
type delegator interface {
|
||||
Status() int
|
||||
Written() int64
|
||||
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
observeWriteHeader func(int)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Status() int {
|
||||
return r.status
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Written() int64 {
|
||||
return r.written
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
if r.observeWriteHeader != nil {
|
||||
r.observeWriteHeader(code)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
type fancyDelegator struct {
|
||||
*responseWriterDelegator
|
||||
}
|
||||
|
||||
func (r *fancyDelegator) CloseNotify() <-chan bool {
|
||||
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
func (r *fancyDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return r.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
func (r *fancyDelegator) Flush() {
|
||||
r.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (r *fancyDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
|
||||
r.written += n
|
||||
return n, err
|
||||
}
|
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
@@ -1,65 +0,0 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Push triggers a metric collection by the default registry and pushes all
|
||||
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
|
||||
// documentation for detailed implications of the job and instance
|
||||
// parameter. instance can be left empty. You can use just host:port or ip:port
|
||||
// as url, in which case 'http://' is added automatically. You can also include
|
||||
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and instance will
|
||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
||||
// to push to the Pushgateway.)
|
||||
func Push(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "PUT")
|
||||
}
|
||||
|
||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
||||
// 'POST' to push to the Pushgateway.)
|
||||
func PushAdd(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "POST")
|
||||
}
|
||||
|
||||
// PushCollectors works like Push, but it does not collect from the default
|
||||
// registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// PushAddCollectors works like PushAdd, but it does not collect from the
|
||||
// default registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
|
||||
r := newRegistry()
|
||||
for _, collector := range collectors {
|
||||
if _, err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return r.Push(job, instance, url, method)
|
||||
}
|
945
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
945
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
File diff suppressed because it is too large
Load Diff
73
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
73
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
@@ -54,8 +53,11 @@ type Summary interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
//
|
||||
// Deprecated: DefObjectives will not be used as the default objectives in
|
||||
// v0.10 of the library. The default Summary will have no quantiles then.
|
||||
var (
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
|
||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
||||
@@ -114,9 +116,15 @@ type SummaryOpts struct {
|
||||
ConstLabels Labels
|
||||
|
||||
// Objectives defines the quantile rank estimates with their respective
|
||||
// absolute error. If Objectives[q] = e, then the value reported
|
||||
// for q will be the φ-quantile value for some φ between q-e and q+e.
|
||||
// The default value is DefObjectives.
|
||||
// absolute error. If Objectives[q] = e, then the value reported for q
|
||||
// will be the φ-quantile value for some φ between q-e and q+e. The
|
||||
// default value is DefObjectives. It is used if Objectives is left at
|
||||
// its zero value (i.e. nil). To create a Summary without Objectives,
|
||||
// set it to an empty map (i.e. map[float64]float64{}).
|
||||
//
|
||||
// Deprecated: Note that the current value of DefObjectives is
|
||||
// deprecated. It will be replaced by an empty map in v0.10 of the
|
||||
// library. Please explicitly set Objectives to the desired value.
|
||||
Objectives map[float64]float64
|
||||
|
||||
// MaxAge defines the duration for which an observation stays relevant
|
||||
@@ -140,11 +148,11 @@ type SummaryOpts struct {
|
||||
BufCap uint32
|
||||
}
|
||||
|
||||
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
|
||||
// method of perk/quantile is actually not working as advertised - and it might
|
||||
// be unfixable, as the underlying algorithm is apparently not capable of
|
||||
// merging summaries in the first place. To avoid using Merge, we are currently
|
||||
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
||||
// perk/quantile is actually not working as advertised - and it might be
|
||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// essentially multiplied by the number of age buckets. When rotating age
|
||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||
// the quantiles from the head stream (no merging required). Result: More effort
|
||||
@@ -184,7 +192,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.Objectives) == 0 {
|
||||
if opts.Objectives == nil {
|
||||
opts.Objectives = DefObjectives
|
||||
}
|
||||
|
||||
@@ -228,12 +236,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
}
|
||||
sort.Float64s(s.sortedObjectives)
|
||||
|
||||
s.Init(s) // Init self-collection.
|
||||
s.init(s) // Init self-collection.
|
||||
return s
|
||||
}
|
||||
|
||||
type summary struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||
mtx sync.Mutex // Protects every other moving part.
|
||||
@@ -391,7 +399,7 @@ func (s quantSort) Less(i, j int) bool {
|
||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewSummaryVec.
|
||||
type SummaryVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
@@ -405,35 +413,30 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &SummaryVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Summary and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
|
||||
// GetMetricWithLabelValues replaces the method of the same name in MetricVec.
|
||||
// The difference is that this method returns an Observer and not a Metric so
|
||||
// that no type conversion to an Observer is required.
|
||||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Summary and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
||||
// difference is that this method returns an Observer and not a Metric so that
|
||||
// no type conversion to an Observer is required.
|
||||
func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Summary), err
|
||||
return metric.(Observer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -442,15 +445,15 @@ func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Summary)
|
||||
func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Observer)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (m *SummaryVec) With(labels Labels) Summary {
|
||||
return m.MetricVec.With(labels).(Summary)
|
||||
func (m *SummaryVec) With(labels Labels) Observer {
|
||||
return m.MetricVec.With(labels).(Observer)
|
||||
}
|
||||
|
||||
type constSummary struct {
|
||||
|
48
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
48
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "time"
|
||||
|
||||
// Timer is a helper type to time functions. Use NewTimer to create new
|
||||
// instances.
|
||||
type Timer struct {
|
||||
begin time.Time
|
||||
observer Observer
|
||||
}
|
||||
|
||||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||
// duration in seconds. Timer is usually used to time a function call in the
|
||||
// following way:
|
||||
// func TimeMe() {
|
||||
// timer := NewTimer(myHistogram)
|
||||
// defer timer.ObserveDuration()
|
||||
// // Do actual work.
|
||||
// }
|
||||
func NewTimer(o Observer) *Timer {
|
||||
return &Timer{
|
||||
begin: time.Now(),
|
||||
observer: o,
|
||||
}
|
||||
}
|
||||
|
||||
// ObserveDuration records the duration passed since the Timer was created with
|
||||
// NewTimer. It calls the Observe method of the Observer provided during
|
||||
// construction with the duration in seconds as an argument. ObserveDuration is
|
||||
// usually called with a defer statement.
|
||||
func (t *Timer) ObserveDuration() {
|
||||
if t.observer != nil {
|
||||
t.observer.Observe(time.Since(t.begin).Seconds())
|
||||
}
|
||||
}
|
20
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
20
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
@@ -13,8 +13,6 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Untyped is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
@@ -22,6 +20,11 @@ import "hash/fnv"
|
||||
// no type information is implied.
|
||||
//
|
||||
// To create Untyped instances, use NewUntyped.
|
||||
//
|
||||
// Deprecated: The Untyped type is deprecated because it doesn't make sense in
|
||||
// direct instrumentation. If you need to mirror an external metric of unknown
|
||||
// type (usually while writing exporters), Use MustNewConstMetric to create an
|
||||
// untyped metric instance on the fly.
|
||||
type Untyped interface {
|
||||
Metric
|
||||
Collector
|
||||
@@ -58,7 +61,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
|
||||
// labels. This is used if you want to count the same thing partitioned by
|
||||
// various dimensions. Create instances with NewUntypedVec.
|
||||
type UntypedVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||
@@ -72,14 +75,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &UntypedVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
15
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
15
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
@@ -43,12 +44,12 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
||||
// ValueType. This is a low-level building block used by the library to back the
|
||||
// implementations of Counter, Gauge, and Untyped.
|
||||
type value struct {
|
||||
// valBits containst the bits of the represented float64 value. It has
|
||||
// valBits contains the bits of the represented float64 value. It has
|
||||
// to go first in the struct to guarantee alignment for atomic
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
@@ -68,7 +69,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
|
||||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -80,6 +81,10 @@ func (v *value) Set(val float64) {
|
||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (v *value) SetToCurrentTime() {
|
||||
v.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||
}
|
||||
|
||||
func (v *value) Inc() {
|
||||
v.Add(1)
|
||||
}
|
||||
@@ -113,7 +118,7 @@ func (v *value) Write(out *dto.Metric) error {
|
||||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||
// UntypedFunc.
|
||||
type valueFunc struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
@@ -134,7 +139,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
|
||||
function: function,
|
||||
labelPairs: makeLabelPairs(desc, nil),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
|
253
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
253
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@@ -14,10 +14,10 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// MetricVec is a Collector to bundle metrics of the same name that
|
||||
@@ -26,17 +26,32 @@ import (
|
||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
||||
// provided in this package.
|
||||
type MetricVec struct {
|
||||
mtx sync.RWMutex // Protects not only children, but also hash and buf.
|
||||
children map[uint64]Metric
|
||||
mtx sync.RWMutex // Protects the children.
|
||||
children map[uint64][]metricWithLabelValues
|
||||
desc *Desc
|
||||
|
||||
// hash is our own hash instance to avoid repeated allocations.
|
||||
hash hash.Hash64
|
||||
// buf is used to copy string contents into it for hashing,
|
||||
// again to avoid allocations.
|
||||
buf bytes.Buffer
|
||||
newMetric func(labelValues ...string) Metric
|
||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
||||
hashAddByte func(h uint64, b byte) uint64
|
||||
}
|
||||
|
||||
newMetric func(labelValues ...string) Metric
|
||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
||||
// returned for embedding into another struct.
|
||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||
return &MetricVec{
|
||||
children: map[uint64][]metricWithLabelValues{},
|
||||
desc: desc,
|
||||
newMetric: newMetric,
|
||||
hashAdd: hashAdd,
|
||||
hashAddByte: hashAddByte,
|
||||
}
|
||||
}
|
||||
|
||||
// metricWithLabelValues provides the metric and its label values for
|
||||
// disambiguation on hash collision.
|
||||
type metricWithLabelValues struct {
|
||||
values []string
|
||||
metric Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. The length of the returned slice
|
||||
@@ -50,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
for _, metric := range m.children {
|
||||
ch <- metric
|
||||
for _, metrics := range m.children {
|
||||
for _, metric := range metrics {
|
||||
ch <- metric.metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,14 +97,12 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
@@ -103,18 +118,12 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lvs := make([]string, len(labels))
|
||||
for i, label := range m.desc.variableLabels {
|
||||
lvs[i] = labels[label]
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||
@@ -162,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
return true
|
||||
return m.deleteByHashWithLabelValues(h, lvs)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
@@ -187,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
|
||||
return m.deleteByHashWithLabels(h, labels)
|
||||
}
|
||||
|
||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||
// remove only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
|
||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||
// only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
i := m.findMetricWithLabels(metrics, labels)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -208,40 +253,152 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
if len(vals) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, val := range vals {
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if len(labels) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
|
||||
metric, ok := m.children[hash]
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
||||
if !ok {
|
||||
// Copy labelValues. Otherwise, they would be allocated even if we don't go
|
||||
// down this code path.
|
||||
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
|
||||
metric = m.newMetric(copiedLabelValues...)
|
||||
m.children[hash] = metric
|
||||
// Copy to avoid allocation in case wo don't go down this code path.
|
||||
copiedLVs := make([]string, len(lvs))
|
||||
copy(copiedLVs, lvs)
|
||||
metric = m.newMetric(copiedLVs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
||||
if !ok {
|
||||
lvs := m.extractLabelValues(labels)
|
||||
metric = m.newMetric(lvs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// findMetricWithLabelValues returns the index of the matching metric or
|
||||
// len(metrics) if not found.
|
||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabelValues(metric.values, lvs) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||
// if not found.
|
||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabels(metric.values, labels) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
||||
if len(values) != len(lvs) {
|
||||
return false
|
||||
}
|
||||
for i, v := range values {
|
||||
if v != lvs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||
if len(labels) != len(values) {
|
||||
return false
|
||||
}
|
||||
for i, k := range m.desc.variableLabels {
|
||||
if values[i] != labels[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
||||
labelValues := make([]string, len(labels))
|
||||
for i, k := range m.desc.variableLabels {
|
||||
labelValues[i] = labels[k]
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
@@ -1,11 +0,0 @@
|
||||
Maintainers of this repository:
|
||||
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
* Julius Volz <julius@soundcloud.com>
|
||||
* Miguel Molina <hi@mvader.me>
|
3
vendor/github.com/prometheus/common/expfmt/BUILD
generated
vendored
3
vendor/github.com/prometheus/common/expfmt/BUILD
generated
vendored
@@ -13,16 +13,15 @@ go_library(
|
||||
"decode.go",
|
||||
"encode.go",
|
||||
"expfmt.go",
|
||||
"json_decode.go",
|
||||
"text_create.go",
|
||||
"text_parse.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/bitbucket.org/ww/goautoneg:go_default_library",
|
||||
"//vendor/github.com/golang/protobuf/proto:go_default_library",
|
||||
"//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
],
|
||||
)
|
||||
|
96
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
96
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -31,6 +31,7 @@ type Decoder interface {
|
||||
Decode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
||||
type DecodeOptions struct {
|
||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||
Timestamp model.Time
|
||||
@@ -46,10 +47,7 @@ func ResponseFormat(h http.Header) Format {
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
textType = "text/plain"
|
||||
jsonType = "application/json"
|
||||
)
|
||||
const textType = "text/plain"
|
||||
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
@@ -66,22 +64,6 @@ func ResponseFormat(h http.Header) Format {
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
|
||||
case jsonType:
|
||||
var prometheusAPIVersion string
|
||||
|
||||
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
||||
prometheusAPIVersion = params["version"]
|
||||
} else {
|
||||
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
||||
}
|
||||
|
||||
switch prometheusAPIVersion {
|
||||
case "0.0.2", "":
|
||||
return fmtJSON2
|
||||
default:
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
@@ -93,8 +75,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
case fmtJSON2:
|
||||
return newJSON2Decoder(r)
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
@@ -107,10 +87,32 @@ type protoDecoder struct {
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||
}
|
||||
for _, m := range v.GetMetric() {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
for _, l := range m.GetLabel() {
|
||||
if l == nil {
|
||||
continue
|
||||
}
|
||||
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||
}
|
||||
if !model.LabelName(l.GetName()).IsValid() {
|
||||
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// textDecoder implements the Decoder interface for the text protcol.
|
||||
// textDecoder implements the Decoder interface for the text protocol.
|
||||
type textDecoder struct {
|
||||
r io.Reader
|
||||
p TextParser
|
||||
@@ -141,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||
// decoded by the wrapped Decoder.
|
||||
type SampleDecoder struct {
|
||||
Dec Decoder
|
||||
Opts *DecodeOptions
|
||||
@@ -148,37 +152,51 @@ type SampleDecoder struct {
|
||||
f dto.MetricFamily
|
||||
}
|
||||
|
||||
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
||||
// samples from the decoded MetricFamily into the provided model.Vector.
|
||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
if err := sd.Dec.Decode(&sd.f); err != nil {
|
||||
err := sd.Dec.Decode(&sd.f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = extractSamples(&sd.f, sd.Opts)
|
||||
return nil
|
||||
*s, err = extractSamples(&sd.f, sd.Opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract samples builds a slice of samples from the provided metric families.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
|
||||
var all model.Vector
|
||||
// ExtractSamples builds a slice of samples from the provided metric
|
||||
// families. If an error occurs during sample extraction, it continues to
|
||||
// extract from the remaining metric families. The returned error is the last
|
||||
// error that has occured.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||
var (
|
||||
all model.Vector
|
||||
lastErr error
|
||||
)
|
||||
for _, f := range fams {
|
||||
all = append(all, extractSamples(f, o)...)
|
||||
some, err := extractSamples(f, o)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
all = append(all, some...)
|
||||
}
|
||||
return all
|
||||
return all, lastErr
|
||||
}
|
||||
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
||||
switch f.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return extractCounter(o, f)
|
||||
return extractCounter(o, f), nil
|
||||
case dto.MetricType_GAUGE:
|
||||
return extractGauge(o, f)
|
||||
return extractGauge(o, f), nil
|
||||
case dto.MetricType_SUMMARY:
|
||||
return extractSummary(o, f)
|
||||
return extractSummary(o, f), nil
|
||||
case dto.MetricType_UNTYPED:
|
||||
return extractUntyped(o, f)
|
||||
return extractUntyped(o, f), nil
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return extractHistogram(o, f)
|
||||
return extractHistogram(o, f), nil
|
||||
}
|
||||
panic("expfmt.extractSamples: unknown metric family type")
|
||||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
||||
}
|
||||
|
||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
|
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -18,9 +18,9 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"bitbucket.org/ww/goautoneg"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user