Update dependencies
Signed-off-by: Markus Lehtonen <markus.lehtonen@intel.com>
This commit is contained in:
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@@ -1,8 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
||||
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
@@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
||||
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
@@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
||||
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -6,41 +6,52 @@
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
|
||||
11
vendor/github.com/google/gofuzz/.travis.yml
generated
vendored
11
vendor/github.com/google/gofuzz/.travis.yml
generated
vendored
@@ -1,13 +1,10 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.3
|
||||
- 1.2
|
||||
- tip
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- master
|
||||
|
||||
script:
|
||||
- go test -cover
|
||||
|
||||
2
vendor/github.com/google/gofuzz/CONTRIBUTING.md
generated
vendored
2
vendor/github.com/google/gofuzz/CONTRIBUTING.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
# How to contribute #
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are
|
||||
a just a few small guidelines you need to follow.
|
||||
just a few small guidelines you need to follow.
|
||||
|
||||
|
||||
## Contributor License Agreement ##
|
||||
|
||||
18
vendor/github.com/google/gofuzz/README.md
generated
vendored
18
vendor/github.com/google/gofuzz/README.md
generated
vendored
@@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
|
||||
|
||||
See more examples in ```example_test.go```.
|
||||
|
||||
You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
|
||||
go-fuzz provides the user a byte-slice, which should be converted to different inputs
|
||||
for the tested function. This library can help convert the byte slice. Consider for
|
||||
example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments:
|
||||
```go
|
||||
// +build gofuzz
|
||||
package mypackage
|
||||
|
||||
import fuzz "github.com/google/gofuzz"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
var i int
|
||||
fuzz.NewFromGoFuzz(data).Fuzz(&i)
|
||||
MyFunc(i)
|
||||
return 0
|
||||
}
|
||||
```
|
||||
|
||||
Happy testing!
|
||||
|
||||
81
vendor/github.com/google/gofuzz/bytesource/bytesource.go
generated
vendored
Normal file
81
vendor/github.com/google/gofuzz/bytesource/bytesource.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package bytesource provides a rand.Source64 that is determined by a slice of bytes.
|
||||
package bytesource
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are
|
||||
// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a
|
||||
// fallback pseudo random source is created in case more random numbers are required.
|
||||
// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly.
|
||||
type ByteSource struct {
|
||||
*bytes.Reader
|
||||
fallback rand.Source
|
||||
}
|
||||
|
||||
// New returns a new ByteSource from a given slice of bytes.
|
||||
func New(input []byte) *ByteSource {
|
||||
s := &ByteSource{
|
||||
Reader: bytes.NewReader(input),
|
||||
fallback: rand.NewSource(0),
|
||||
}
|
||||
if len(input) > 0 {
|
||||
s.fallback = rand.NewSource(int64(s.consumeUint64()))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *ByteSource) Uint64() uint64 {
|
||||
// Return from input if it was not exhausted.
|
||||
if s.Len() > 0 {
|
||||
return s.consumeUint64()
|
||||
}
|
||||
|
||||
// Input was exhausted, return random number from fallback (in this case fallback should not be
|
||||
// nil). Try first having a Uint64 output (Should work in current rand implementation),
|
||||
// otherwise return a conversion of Int63.
|
||||
if s64, ok := s.fallback.(rand.Source64); ok {
|
||||
return s64.Uint64()
|
||||
}
|
||||
return uint64(s.fallback.Int63())
|
||||
}
|
||||
|
||||
func (s *ByteSource) Int63() int64 {
|
||||
return int64(s.Uint64() >> 1)
|
||||
}
|
||||
|
||||
func (s *ByteSource) Seed(seed int64) {
|
||||
s.fallback = rand.NewSource(seed)
|
||||
s.Reader = bytes.NewReader(nil)
|
||||
}
|
||||
|
||||
// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the
|
||||
// bytes reader is not empty.
|
||||
func (s *ByteSource) consumeUint64() uint64 {
|
||||
var bytes [8]byte
|
||||
_, err := s.Read(bytes[:])
|
||||
if err != nil && err != io.EOF {
|
||||
panic("failed reading source") // Should not happen.
|
||||
}
|
||||
return binary.BigEndian.Uint64(bytes[:])
|
||||
}
|
||||
137
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
137
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
@@ -22,6 +22,9 @@ import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/google/gofuzz/bytesource"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
|
||||
@@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFromGoFuzz is a helper function that enables using gofuzz (this
|
||||
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
|
||||
// fuzzing. Essentially, it enables translating the fuzzing bytes from
|
||||
// go-fuzz to any Go object using this library.
|
||||
//
|
||||
// This implementation promises a constant translation from a given slice of
|
||||
// bytes to the fuzzed objects. This promise will remain over future
|
||||
// versions of Go and of this library.
|
||||
//
|
||||
// Note: the returned Fuzzer should not be shared between multiple goroutines,
|
||||
// as its deterministic output will no longer be available.
|
||||
//
|
||||
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
|
||||
// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
|
||||
//
|
||||
// // +build gofuzz
|
||||
// package mypacakge
|
||||
// import fuzz "github.com/google/gofuzz"
|
||||
// func Fuzz(data []byte) int {
|
||||
// var i int
|
||||
// fuzz.NewFromGoFuzz(data).Fuzz(&i)
|
||||
// MyFunc(i)
|
||||
// return 0
|
||||
// }
|
||||
func NewFromGoFuzz(data []byte) *Fuzzer {
|
||||
return New().RandSource(bytesource.New(data))
|
||||
}
|
||||
|
||||
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
|
||||
//
|
||||
// Each entry in fuzzFuncs must be a function taking two parameters.
|
||||
@@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int {
|
||||
}
|
||||
|
||||
func (f *Fuzzer) genShouldFill() bool {
|
||||
return f.r.Float64() > f.nilChance
|
||||
return f.r.Float64() >= f.nilChance
|
||||
}
|
||||
|
||||
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
|
||||
@@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
|
||||
fn(v, fc.fuzzer.r)
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
@@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
|
||||
v.SetFloat(r.Float64())
|
||||
},
|
||||
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
|
||||
panic("unimplemented")
|
||||
v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
|
||||
},
|
||||
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
|
||||
panic("unimplemented")
|
||||
v.SetComplex(complex(r.Float64(), r.Float64()))
|
||||
},
|
||||
reflect.String: func(v reflect.Value, r *rand.Rand) {
|
||||
v.SetString(randString(r))
|
||||
@@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
|
||||
|
||||
// randBool returns true or false randomly.
|
||||
func randBool(r *rand.Rand) bool {
|
||||
if r.Int()&1 == 1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return r.Int31()&(1<<30) == 0
|
||||
}
|
||||
|
||||
type charRange struct {
|
||||
first, last rune
|
||||
type int63nPicker interface {
|
||||
Int63n(int64) int64
|
||||
}
|
||||
|
||||
// UnicodeRange describes a sequential range of unicode characters.
|
||||
// Last must be numerically greater than First.
|
||||
type UnicodeRange struct {
|
||||
First, Last rune
|
||||
}
|
||||
|
||||
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
|
||||
// To be useful, each range must have at least one character (First <= Last) and
|
||||
// there must be at least one range.
|
||||
type UnicodeRanges []UnicodeRange
|
||||
|
||||
// choose returns a random unicode character from the given range, using the
|
||||
// given randomness source.
|
||||
func (r *charRange) choose(rand *rand.Rand) rune {
|
||||
count := int64(r.last - r.first)
|
||||
return r.first + rune(rand.Int63n(count))
|
||||
func (ur UnicodeRange) choose(r int63nPicker) rune {
|
||||
count := int64(ur.Last - ur.First + 1)
|
||||
return ur.First + rune(r.Int63n(count))
|
||||
}
|
||||
|
||||
var unicodeRanges = []charRange{
|
||||
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
|
||||
// Each character is selected from the range ur. If there are no characters
|
||||
// in the range (cr.Last < cr.First), this will panic.
|
||||
func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
|
||||
ur.check()
|
||||
return func(s *string, c Continue) {
|
||||
*s = ur.randString(c.Rand)
|
||||
}
|
||||
}
|
||||
|
||||
// check is a function that used to check whether the first of ur(UnicodeRange)
|
||||
// is greater than the last one.
|
||||
func (ur UnicodeRange) check() {
|
||||
if ur.Last < ur.First {
|
||||
panic("The last encoding must be greater than the first one.")
|
||||
}
|
||||
}
|
||||
|
||||
// randString of UnicodeRange makes a random string up to 20 characters long.
|
||||
// Each character is selected form ur(UnicodeRange).
|
||||
func (ur UnicodeRange) randString(r *rand.Rand) string {
|
||||
n := r.Intn(20)
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(n)
|
||||
for i := 0; i < n; i++ {
|
||||
sb.WriteRune(ur.choose(r))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// defaultUnicodeRanges sets a default unicode range when user do not set
|
||||
// CustomStringFuzzFunc() but wants fuzz string.
|
||||
var defaultUnicodeRanges = UnicodeRanges{
|
||||
{' ', '~'}, // ASCII characters
|
||||
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
|
||||
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
|
||||
}
|
||||
|
||||
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
|
||||
// Each character is selected from one of the ranges of ur(UnicodeRanges).
|
||||
// Each range has an equal probability of being chosen. If there are no ranges,
|
||||
// or a selected range has no characters (.Last < .First), this will panic.
|
||||
// Do not modify any of the ranges in ur after calling this function.
|
||||
func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
|
||||
// Check unicode ranges slice is empty.
|
||||
if len(ur) == 0 {
|
||||
panic("UnicodeRanges is empty.")
|
||||
}
|
||||
// if not empty, each range should be checked.
|
||||
for i := range ur {
|
||||
ur[i].check()
|
||||
}
|
||||
return func(s *string, c Continue) {
|
||||
*s = ur.randString(c.Rand)
|
||||
}
|
||||
}
|
||||
|
||||
// randString of UnicodeRanges makes a random string up to 20 characters long.
|
||||
// Each character is selected form one of the ranges of ur(UnicodeRanges),
|
||||
// and each range has an equal probability of being chosen.
|
||||
func (ur UnicodeRanges) randString(r *rand.Rand) string {
|
||||
n := r.Intn(20)
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(n)
|
||||
for i := 0; i < n; i++ {
|
||||
sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// randString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func randString(r *rand.Rand) string {
|
||||
n := r.Intn(20)
|
||||
runes := make([]rune, n)
|
||||
for i := range runes {
|
||||
runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
|
||||
}
|
||||
return string(runes)
|
||||
return defaultUnicodeRanges.randString(r)
|
||||
}
|
||||
|
||||
// randUint64 makes random 64 bit numbers.
|
||||
|
||||
9
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
9
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
@@ -44,6 +44,8 @@ func Wrap(outer, inner error) error {
|
||||
//
|
||||
// format is the format of the error message. The string '{{err}}' will
|
||||
// be replaced with the original error message.
|
||||
//
|
||||
// Deprecated: Use fmt.Errorf()
|
||||
func Wrapf(format string, err error) error {
|
||||
outerMsg := "<nil>"
|
||||
if err != nil {
|
||||
@@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) {
|
||||
for _, err := range e.WrappedErrors() {
|
||||
Walk(err, cb)
|
||||
}
|
||||
case interface{ Unwrap() error }:
|
||||
cb(err)
|
||||
Walk(e.Unwrap(), cb)
|
||||
default:
|
||||
cb(err)
|
||||
}
|
||||
@@ -167,3 +172,7 @@ func (w *wrappedError) Error() string {
|
||||
func (w *wrappedError) WrappedErrors() []error {
|
||||
return []error{w.Outer, w.Inner}
|
||||
}
|
||||
|
||||
func (w *wrappedError) Unwrap() error {
|
||||
return w.Inner
|
||||
}
|
||||
|
||||
12
vendor/github.com/hashicorp/go-multierror/.travis.yml
generated
vendored
12
vendor/github.com/hashicorp/go-multierror/.travis.yml
generated
vendored
@@ -1,12 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test testrace
|
||||
69
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
69
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
@@ -1,10 +1,11 @@
|
||||
# go-multierror
|
||||
|
||||
[][travis]
|
||||
[][godocs]
|
||||
[](https://circleci.com/gh/hashicorp/go-multierror)
|
||||
[](https://pkg.go.dev/github.com/hashicorp/go-multierror)
|
||||

|
||||
|
||||
[travis]: https://travis-ci.org/hashicorp/go-multierror
|
||||
[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
|
||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror
|
||||
[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
`go-multierror` is a package for Go that provides a mechanism for
|
||||
representing a list of `error` values as a single `error`.
|
||||
@@ -14,16 +15,35 @@ be a list of errors. If the caller knows this, they can unwrap the
|
||||
list and access the errors. If the caller doesn't know, the error
|
||||
formats to a nice human-readable format.
|
||||
|
||||
`go-multierror` implements the
|
||||
[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
|
||||
be used with that library, as well.
|
||||
`go-multierror` is fully compatible with the Go standard library
|
||||
[errors](https://golang.org/pkg/errors/) package, including the
|
||||
functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
|
||||
for introspecting on error values.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/go-multierror`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/go-multierror
|
||||
https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
### Requires go version 1.13 or newer
|
||||
|
||||
`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced
|
||||
[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which
|
||||
this library takes advantage of.
|
||||
|
||||
If you need to use an earlier version of go, you can use the
|
||||
[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0)
|
||||
tag, which doesn't rely on features in go 1.13.
|
||||
|
||||
If you see compile errors that look like the below, it's likely that
|
||||
you're on an older version of go:
|
||||
|
||||
```
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -81,6 +101,39 @@ if err := something(); err != nil {
|
||||
}
|
||||
```
|
||||
|
||||
You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
|
||||
function. This will continue to unwrap into subsequent errors until none exist.
|
||||
|
||||
**Extracting an error**
|
||||
|
||||
The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
|
||||
function can be used directly with a multierror to extract a specific error:
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
|
||||
// We want to know if "err" has a "RichErrorType" in it and extract it.
|
||||
var errRich RichErrorType
|
||||
if errors.As(err, &errRich) {
|
||||
// It has it, and now errRich is populated.
|
||||
}
|
||||
```
|
||||
|
||||
**Checking for an exact error value**
|
||||
|
||||
Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
|
||||
error in the `os` package. You can check if this error is present by using
|
||||
the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// err contains os.ErrNotExist
|
||||
}
|
||||
```
|
||||
|
||||
**Returning a multierror only if there are errors**
|
||||
|
||||
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
|
||||
|
||||
2
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
2
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
@@ -6,6 +6,8 @@ package multierror
|
||||
// If err is not a multierror.Error, then it will be turned into
|
||||
// one. If any of the errs are multierr.Error, they will be flattened
|
||||
// one level into err.
|
||||
// Any nil errors within errs will be ignored. If err is nil, a new
|
||||
// *Error will be returned.
|
||||
func Append(err error, errs ...error) *Error {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
|
||||
2
vendor/github.com/hashicorp/go-multierror/go.mod
generated
vendored
2
vendor/github.com/hashicorp/go-multierror/go.mod
generated
vendored
@@ -1,3 +1,5 @@
|
||||
module github.com/hashicorp/go-multierror
|
||||
|
||||
go 1.13
|
||||
|
||||
require github.com/hashicorp/errwrap v1.0.0
|
||||
|
||||
2
vendor/github.com/hashicorp/go-multierror/go.sum
generated
vendored
2
vendor/github.com/hashicorp/go-multierror/go.sum
generated
vendored
@@ -1,4 +1,2 @@
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
||||
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
Normal file
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package multierror
|
||||
|
||||
import "sync"
|
||||
|
||||
// Group is a collection of goroutines which return errors that need to be
|
||||
// coalesced.
|
||||
type Group struct {
|
||||
mutex sync.Mutex
|
||||
err *Error
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// Go calls the given function in a new goroutine.
|
||||
//
|
||||
// If the function returns an error it is added to the group multierror which
|
||||
// is returned by Wait.
|
||||
func (g *Group) Go(f func() error) {
|
||||
g.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.mutex.Lock()
|
||||
g.err = Append(g.err, err)
|
||||
g.mutex.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the multierror.
|
||||
func (g *Group) Wait() *Error {
|
||||
g.wg.Wait()
|
||||
g.mutex.Lock()
|
||||
defer g.mutex.Unlock()
|
||||
return g.err
|
||||
}
|
||||
82
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
82
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
@@ -39,13 +40,82 @@ func (e *Error) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *e)
|
||||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping.
|
||||
// It is an implementation of the errwrap.Wrapper interface so that
|
||||
// multierror.Error can be used with that library.
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping. It is
|
||||
// an implementation of the errwrap.Wrapper interface so that multierror.Error
|
||||
// can be used with that library.
|
||||
//
|
||||
// This method is not safe to be called concurrently and is no different
|
||||
// than accessing the Errors field directly. It is implemented only to
|
||||
// satisfy the errwrap.Wrapper interface.
|
||||
// This method is not safe to be called concurrently. Unlike accessing the
|
||||
// Errors field directly, this function also checks if the multierror is nil to
|
||||
// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Errors
|
||||
}
|
||||
|
||||
// Unwrap returns an error from Error (or nil if there are no errors).
|
||||
// This error returned will further support Unwrap to get the next error,
|
||||
// etc. The order will match the order of Errors in the multierror.Error
|
||||
// at the time of calling.
|
||||
//
|
||||
// The resulting error supports errors.As/Is/Unwrap so you can continue
|
||||
// to use the stdlib errors package to introspect further.
|
||||
//
|
||||
// This will perform a shallow copy of the errors slice. Any errors appended
|
||||
// to this error after calling Unwrap will not be available until a new
|
||||
// Unwrap is called on the multierror.Error.
|
||||
func (e *Error) Unwrap() error {
|
||||
// If we have no errors then we do nothing
|
||||
if e == nil || len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we have exactly one error, we can just return that directly.
|
||||
if len(e.Errors) == 1 {
|
||||
return e.Errors[0]
|
||||
}
|
||||
|
||||
// Shallow copy the slice
|
||||
errs := make([]error, len(e.Errors))
|
||||
copy(errs, e.Errors)
|
||||
return chain(errs)
|
||||
}
|
||||
|
||||
// chain implements the interfaces necessary for errors.Is/As/Unwrap to
|
||||
// work in a deterministic way with multierror. A chain tracks a list of
|
||||
// errors while accounting for the current represented error. This lets
|
||||
// Is/As be meaningful.
|
||||
//
|
||||
// Unwrap returns the next error. In the cleanest form, Unwrap would return
|
||||
// the wrapped error here but we can't do that if we want to properly
|
||||
// get access to all the errors. Instead, users are recommended to use
|
||||
// Is/As to get the correct error type out.
|
||||
//
|
||||
// Precondition: []error is non-empty (len > 0)
|
||||
type chain []error
|
||||
|
||||
// Error implements the error interface
|
||||
func (e chain) Error() string {
|
||||
return e[0].Error()
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap by returning the next error in the
|
||||
// chain or nil if there are no more errors.
|
||||
func (e chain) Unwrap() error {
|
||||
if len(e) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e[1:]
|
||||
}
|
||||
|
||||
// As implements errors.As by attempting to map to the current value.
|
||||
func (e chain) As(target interface{}) bool {
|
||||
return errors.As(e[0], target)
|
||||
}
|
||||
|
||||
// Is implements errors.Is by comparing the current value directly.
|
||||
func (e chain) Is(target error) bool {
|
||||
return errors.Is(e[0], target)
|
||||
}
|
||||
|
||||
201
vendor/github.com/intel/goresctrl/LICENSE
generated
vendored
Normal file
201
vendor/github.com/intel/goresctrl/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
58
vendor/github.com/intel/goresctrl/pkg/kubernetes/annotations.go
generated
vendored
Normal file
58
vendor/github.com/intel/goresctrl/pkg/kubernetes/annotations.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright 2021 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubernetes
|
||||
|
||||
// ClassOrigin type indicates the source of container's class
|
||||
// information: whether it is found from CRI level container
|
||||
// annotations, Kubernetes' pod annotations, or it has not been found
|
||||
// at all.
|
||||
type ClassOrigin int
|
||||
|
||||
const (
|
||||
ClassOriginNotFound ClassOrigin = iota
|
||||
ClassOriginContainerAnnotation
|
||||
ClassOriginPodAnnotation
|
||||
)
|
||||
|
||||
func (c ClassOrigin) String() string {
|
||||
switch c {
|
||||
case ClassOriginNotFound:
|
||||
return "<not found>"
|
||||
case ClassOriginContainerAnnotation:
|
||||
return "container annotations"
|
||||
case ClassOriginPodAnnotation:
|
||||
return "pod annotations"
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
}
|
||||
|
||||
// ContainerClassFromAnnotations determines the effective class of a
|
||||
// container from the Pod annotations and CRI level container
|
||||
// annotations of a container.
|
||||
func ContainerClassFromAnnotations(containerAnnotation, podAnnotation, podAnnotationContainerPrefix string, containerName string, containerAnnotations, podAnnotations map[string]string) (string, ClassOrigin) {
|
||||
if clsName, ok := containerAnnotations[containerAnnotation]; ok {
|
||||
return clsName, ClassOriginContainerAnnotation
|
||||
}
|
||||
if clsName, ok := podAnnotations[podAnnotationContainerPrefix+containerName]; ok {
|
||||
return clsName, ClassOriginPodAnnotation
|
||||
}
|
||||
if clsName, ok := podAnnotations[podAnnotation]; ok {
|
||||
return clsName, ClassOriginPodAnnotation
|
||||
}
|
||||
return "", ClassOriginNotFound
|
||||
}
|
||||
85
vendor/github.com/intel/goresctrl/pkg/log/log.go
generated
vendored
Normal file
85
vendor/github.com/intel/goresctrl/pkg/log/log.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2019-2021 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stdlog "log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Logger is the logging interface for goresctl
|
||||
type Logger interface {
|
||||
Debugf(format string, v ...interface{})
|
||||
Infof(format string, v ...interface{})
|
||||
Warnf(format string, v ...interface{})
|
||||
Errorf(format string, v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
*stdlog.Logger
|
||||
}
|
||||
|
||||
// NewLoggerWrapper wraps an implementation of the golang standard intreface
|
||||
// into a goresctl specific compatible logger interface
|
||||
func NewLoggerWrapper(l *stdlog.Logger) Logger {
|
||||
return &logger{Logger: l}
|
||||
}
|
||||
|
||||
func (l *logger) Debugf(format string, v ...interface{}) {
|
||||
l.Logger.Printf("DEBUG: "+format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Infof(format string, v ...interface{}) {
|
||||
l.Logger.Printf("INFO: "+format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Warnf(format string, v ...interface{}) {
|
||||
l.Logger.Printf("WARN: "+format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Errorf(format string, v ...interface{}) {
|
||||
l.Logger.Printf("ERROR: "+format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Panicf(format string, v ...interface{}) {
|
||||
l.Logger.Panicf(format, v...)
|
||||
}
|
||||
|
||||
func (l *logger) Fatalf(format string, v ...interface{}) {
|
||||
l.Logger.Fatalf(format, v...)
|
||||
}
|
||||
|
||||
func InfoBlock(l Logger, heading, linePrefix, format string, v ...interface{}) {
|
||||
l.Infof("%s", heading)
|
||||
|
||||
lines := strings.Split(fmt.Sprintf(format, v...), "\n")
|
||||
for _, line := range lines {
|
||||
l.Infof("%s%s", linePrefix, line)
|
||||
}
|
||||
}
|
||||
|
||||
func DebugBlock(l Logger, heading, linePrefix, format string, v ...interface{}) {
|
||||
l.Debugf("%s", heading)
|
||||
|
||||
lines := strings.Split(fmt.Sprintf(format, v...), "\n")
|
||||
for _, line := range lines {
|
||||
l.Debugf("%s%s", linePrefix, line)
|
||||
}
|
||||
}
|
||||
118
vendor/github.com/intel/goresctrl/pkg/rdt/bitmask.go
generated
vendored
Normal file
118
vendor/github.com/intel/goresctrl/pkg/rdt/bitmask.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
Copyright 2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rdt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// bitmask represents a generic 64 bit wide bitmask
|
||||
type bitmask uint64
|
||||
|
||||
// MarshalJSON implements the Marshaler interface of "encoding/json"
|
||||
func (b bitmask) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%#x\"", b)), nil
|
||||
}
|
||||
|
||||
// listStr prints the bitmask in human-readable format, similar to e.g. the
|
||||
// cpuset format of the Linux kernel
|
||||
func (b bitmask) listStr() string {
|
||||
str := ""
|
||||
sep := ""
|
||||
|
||||
shift := int(0)
|
||||
lsbOne := b.lsbOne()
|
||||
|
||||
// Process "ranges of ones"
|
||||
for lsbOne != -1 {
|
||||
b >>= uint(lsbOne)
|
||||
|
||||
// Get range lenght from the position of the first zero
|
||||
numOnes := b.lsbZero()
|
||||
|
||||
if numOnes == 1 {
|
||||
str += sep + strconv.Itoa(lsbOne+shift)
|
||||
} else {
|
||||
str += sep + strconv.Itoa(lsbOne+shift) + "-" + strconv.Itoa(lsbOne+numOnes-1+shift)
|
||||
}
|
||||
|
||||
// Shift away the bits that have been processed
|
||||
b >>= uint(numOnes)
|
||||
shift += lsbOne + numOnes
|
||||
|
||||
// Get next bit that is set (if any)
|
||||
lsbOne = b.lsbOne()
|
||||
|
||||
sep = ","
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// listStrToBitmask parses a string containing a human-readable list of bit
|
||||
// numbers into a bitmask
|
||||
func listStrToBitmask(str string) (bitmask, error) {
|
||||
b := bitmask(0)
|
||||
|
||||
// Empty bitmask
|
||||
if len(str) == 0 {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
ranges := strings.Split(str, ",")
|
||||
for _, ran := range ranges {
|
||||
split := strings.SplitN(ran, "-", 2)
|
||||
|
||||
bitNum, err := strconv.ParseUint(split[0], 10, 6)
|
||||
if err != nil {
|
||||
return b, fmt.Errorf("invalid bitmask %q: %v", str, err)
|
||||
}
|
||||
|
||||
if len(split) == 1 {
|
||||
b |= 1 << bitNum
|
||||
} else {
|
||||
endNum, err := strconv.ParseUint(split[1], 10, 6)
|
||||
if err != nil {
|
||||
return b, fmt.Errorf("invalid bitmask %q: %v", str, err)
|
||||
}
|
||||
if endNum <= bitNum {
|
||||
return b, fmt.Errorf("invalid range %q in bitmask %q", ran, str)
|
||||
}
|
||||
b |= (1<<(endNum-bitNum+1) - 1) << bitNum
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b bitmask) lsbOne() int {
|
||||
if b == 0 {
|
||||
return -1
|
||||
}
|
||||
return bits.TrailingZeros64(uint64(b))
|
||||
}
|
||||
|
||||
func (b bitmask) msbOne() int {
|
||||
// Returns -1 for b == 0
|
||||
return 63 - bits.LeadingZeros64(uint64(b))
|
||||
}
|
||||
|
||||
func (b bitmask) lsbZero() int {
|
||||
return bits.TrailingZeros64(^uint64(b))
|
||||
}
|
||||
1193
vendor/github.com/intel/goresctrl/pkg/rdt/config.go
generated
vendored
Normal file
1193
vendor/github.com/intel/goresctrl/pkg/rdt/config.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
341
vendor/github.com/intel/goresctrl/pkg/rdt/info.go
generated
vendored
Normal file
341
vendor/github.com/intel/goresctrl/pkg/rdt/info.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
Copyright 2019-2021 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rdt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// resctrlInfo contains information about the RDT support in the system
|
||||
type resctrlInfo struct {
|
||||
resctrlPath string
|
||||
resctrlMountOpts map[string]struct{}
|
||||
numClosids uint64
|
||||
cat map[cacheLevel]catInfoAll
|
||||
l3mon l3MonInfo
|
||||
mb mbInfo
|
||||
}
|
||||
|
||||
type cacheLevel string
|
||||
|
||||
const (
|
||||
L2 cacheLevel = "L2"
|
||||
L3 cacheLevel = "L3"
|
||||
)
|
||||
|
||||
type catInfoAll struct {
|
||||
cacheIds []uint64
|
||||
unified catInfo
|
||||
code catInfo
|
||||
data catInfo
|
||||
}
|
||||
|
||||
type catInfo struct {
|
||||
cbmMask bitmask
|
||||
minCbmBits uint64
|
||||
shareableBits bitmask
|
||||
}
|
||||
|
||||
type l3MonInfo struct {
|
||||
numRmids uint64
|
||||
monFeatures []string
|
||||
}
|
||||
|
||||
type mbInfo struct {
|
||||
cacheIds []uint64
|
||||
bandwidthGran uint64
|
||||
delayLinear uint64
|
||||
minBandwidth uint64
|
||||
mbpsEnabled bool // true if MBA_MBps is enabled
|
||||
}
|
||||
|
||||
var mountInfoPath string = "/proc/mounts"
|
||||
|
||||
// getInfo is a helper method for a "unified API" for getting L3 information
|
||||
func (i catInfoAll) getInfo() catInfo {
|
||||
switch {
|
||||
case i.code.Supported():
|
||||
return i.code
|
||||
case i.data.Supported():
|
||||
return i.data
|
||||
}
|
||||
return i.unified
|
||||
}
|
||||
|
||||
func (i catInfoAll) cbmMask() bitmask {
|
||||
mask := i.getInfo().cbmMask
|
||||
if mask != 0 {
|
||||
return mask
|
||||
}
|
||||
return bitmask(^uint64(0))
|
||||
}
|
||||
|
||||
func (i catInfoAll) minCbmBits() uint64 {
|
||||
return i.getInfo().minCbmBits
|
||||
}
|
||||
|
||||
func getRdtInfo() (*resctrlInfo, error) {
|
||||
var err error
|
||||
info := &resctrlInfo{cat: make(map[cacheLevel]catInfoAll)}
|
||||
|
||||
info.resctrlPath, info.resctrlMountOpts, err = getResctrlMountInfo()
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to detect resctrl mount point: %v", err)
|
||||
}
|
||||
log.Infof("detected resctrl filesystem at %q", info.resctrlPath)
|
||||
|
||||
// Check that RDT is available
|
||||
infopath := filepath.Join(info.resctrlPath, "info")
|
||||
if _, err := os.Stat(infopath); err != nil {
|
||||
return info, fmt.Errorf("failed to read RDT info from %q: %v", infopath, err)
|
||||
}
|
||||
|
||||
// Check CAT feature available
|
||||
for _, cl := range []cacheLevel{L2, L3} {
|
||||
cat := catInfoAll{}
|
||||
catFeatures := map[string]*catInfo{
|
||||
"": &cat.unified,
|
||||
"CODE": &cat.code,
|
||||
"DATA": &cat.data,
|
||||
}
|
||||
for suffix, i := range catFeatures {
|
||||
dir := string(cl) + suffix
|
||||
subpath := filepath.Join(infopath, dir)
|
||||
if _, err = os.Stat(subpath); err == nil {
|
||||
*i, info.numClosids, err = getCatInfo(subpath)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get %s info from %q: %v", dir, subpath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if cat.getInfo().Supported() {
|
||||
cat.cacheIds, err = getCacheIds(info.resctrlPath, string(cl))
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get %s CAT cache IDs: %v", cl, err)
|
||||
}
|
||||
}
|
||||
info.cat[cl] = cat
|
||||
}
|
||||
|
||||
// Check MON features available
|
||||
subpath := filepath.Join(infopath, "L3_MON")
|
||||
if _, err = os.Stat(subpath); err == nil {
|
||||
info.l3mon, err = getL3MonInfo(subpath)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get L3_MON info from %q: %v", subpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check MBA feature available
|
||||
subpath = filepath.Join(infopath, "MB")
|
||||
if _, err = os.Stat(subpath); err == nil {
|
||||
info.mb, info.numClosids, err = getMBInfo(subpath)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get MBA info from %q: %v", subpath, err)
|
||||
}
|
||||
|
||||
info.mb.cacheIds, err = getCacheIds(info.resctrlPath, "MB")
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("failed to get MBA cache IDs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func getCatInfo(basepath string) (catInfo, uint64, error) {
|
||||
var err error
|
||||
var numClosids uint64
|
||||
info := catInfo{}
|
||||
|
||||
info.cbmMask, err = readFileBitmask(filepath.Join(basepath, "cbm_mask"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
info.minCbmBits, err = readFileUint64(filepath.Join(basepath, "min_cbm_bits"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
info.shareableBits, err = readFileBitmask(filepath.Join(basepath, "shareable_bits"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
numClosids, err = readFileUint64(filepath.Join(basepath, "num_closids"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
|
||||
return info, numClosids, nil
|
||||
}
|
||||
|
||||
// Supported returns true if L3 cache allocation has is supported and enabled in the system
|
||||
func (i catInfo) Supported() bool {
|
||||
return i.cbmMask != 0
|
||||
}
|
||||
|
||||
func getL3MonInfo(basepath string) (l3MonInfo, error) {
|
||||
var err error
|
||||
info := l3MonInfo{}
|
||||
|
||||
info.numRmids, err = readFileUint64(filepath.Join(basepath, "num_rmids"))
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
lines, err := readFileString(filepath.Join(basepath, "mon_features"))
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
info.monFeatures = strings.Split(lines, "\n")
|
||||
sort.Strings(info.monFeatures)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Supported returns true if L3 monitoring is supported and enabled in the system
|
||||
func (i l3MonInfo) Supported() bool {
|
||||
return i.numRmids != 0 && len(i.monFeatures) > 0
|
||||
}
|
||||
|
||||
func getMBInfo(basepath string) (mbInfo, uint64, error) {
|
||||
var err error
|
||||
var numClosids uint64
|
||||
info := mbInfo{}
|
||||
|
||||
info.bandwidthGran, err = readFileUint64(filepath.Join(basepath, "bandwidth_gran"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
info.delayLinear, err = readFileUint64(filepath.Join(basepath, "delay_linear"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
info.minBandwidth, err = readFileUint64(filepath.Join(basepath, "min_bandwidth"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
numClosids, err = readFileUint64(filepath.Join(basepath, "num_closids"))
|
||||
if err != nil {
|
||||
return info, numClosids, err
|
||||
}
|
||||
|
||||
// Detect MBps mode directly from mount options as it's not visible in MB
|
||||
// info directory
|
||||
_, mountOpts, err := getResctrlMountInfo()
|
||||
if err != nil {
|
||||
return info, numClosids, fmt.Errorf("failed to get resctrl mount options: %v", err)
|
||||
}
|
||||
if _, ok := mountOpts["mba_MBps"]; ok {
|
||||
info.mbpsEnabled = true
|
||||
}
|
||||
|
||||
return info, numClosids, nil
|
||||
}
|
||||
|
||||
// Supported returns true if memory bandwidth allocation has is supported and enabled in the system
|
||||
func (i mbInfo) Supported() bool {
|
||||
return i.minBandwidth != 0
|
||||
}
|
||||
|
||||
func getCacheIds(basepath string, prefix string) ([]uint64, error) {
|
||||
var ids []uint64
|
||||
|
||||
// Parse cache IDs from the root schemata
|
||||
data, err := readFileString(filepath.Join(basepath, "schemata"))
|
||||
if err != nil {
|
||||
return ids, fmt.Errorf("failed to read root schemata: %v", err)
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(data, "\n") {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
lineSplit := strings.SplitN(trimmed, ":", 2)
|
||||
|
||||
// Find line with given resource prefix
|
||||
if len(lineSplit) == 2 && strings.HasPrefix(lineSplit[0], prefix) {
|
||||
schema := strings.Split(lineSplit[1], ";")
|
||||
ids = make([]uint64, len(schema))
|
||||
|
||||
// Get individual cache configurations from the schema
|
||||
for idx, definition := range schema {
|
||||
split := strings.Split(definition, "=")
|
||||
if len(split) != 2 {
|
||||
return ids, fmt.Errorf("looks like an invalid schema %q", trimmed)
|
||||
}
|
||||
ids[idx], err = strconv.ParseUint(split[0], 10, 64)
|
||||
if err != nil {
|
||||
return ids, fmt.Errorf("failed to parse cache id in %q: %v", trimmed, err)
|
||||
}
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
}
|
||||
return ids, fmt.Errorf("no %s resources in root schemata", prefix)
|
||||
}
|
||||
|
||||
func getResctrlMountInfo() (string, map[string]struct{}, error) {
|
||||
mountOptions := map[string]struct{}{}
|
||||
|
||||
f, err := os.Open(mountInfoPath)
|
||||
if err != nil {
|
||||
return "", mountOptions, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
split := strings.Split(s.Text(), " ")
|
||||
if len(split) > 3 && split[2] == "resctrl" {
|
||||
opts := strings.Split(split[3], ",")
|
||||
for _, opt := range opts {
|
||||
mountOptions[opt] = struct{}{}
|
||||
}
|
||||
return split[1], mountOptions, nil
|
||||
}
|
||||
}
|
||||
return "", mountOptions, fmt.Errorf("resctrl not found in " + mountInfoPath)
|
||||
}
|
||||
|
||||
func readFileUint64(path string) (uint64, error) {
|
||||
data, err := readFileString(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return strconv.ParseUint(data, 10, 64)
|
||||
}
|
||||
|
||||
func readFileBitmask(path string) (bitmask, error) {
|
||||
data, err := readFileString(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
value, err := strconv.ParseUint(data, 16, 64)
|
||||
return bitmask(value), err
|
||||
}
|
||||
|
||||
func readFileString(path string) (string, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
return strings.TrimSpace(string(data)), err
|
||||
}
|
||||
74
vendor/github.com/intel/goresctrl/pkg/rdt/kubernetes.go
generated
vendored
Normal file
74
vendor/github.com/intel/goresctrl/pkg/rdt/kubernetes.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2021 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rdt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/intel/goresctrl/pkg/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// RdtContainerAnnotation is the CRI level container annotation for setting
|
||||
// the RDT class (CLOS) of a container
|
||||
RdtContainerAnnotation = "io.kubernetes.cri.rdt-class"
|
||||
|
||||
// RdtPodAnnotation is a Pod annotation for setting the RDT class (CLOS) of
|
||||
// all containers of the pod
|
||||
RdtPodAnnotation = "rdt.resources.beta.kubernetes.io/pod"
|
||||
|
||||
// RdtPodAnnotationContainerPrefix is prefix for per-container Pod annotation
|
||||
// for setting the RDT class (CLOS) of one container of the pod
|
||||
RdtPodAnnotationContainerPrefix = "rdt.resources.beta.kubernetes.io/container."
|
||||
)
|
||||
|
||||
// ContainerClassFromAnnotations determines the effective RDT class of a
|
||||
// container from the Pod annotations and CRI level container annotations of a
|
||||
// container. Verifies that the class exists in goresctrl configuration and that
|
||||
// it is allowed to be used.
|
||||
func ContainerClassFromAnnotations(containerName string, containerAnnotations, podAnnotations map[string]string) (string, error) {
|
||||
clsName, clsOrigin := kubernetes.ContainerClassFromAnnotations(
|
||||
RdtContainerAnnotation, RdtPodAnnotation, RdtPodAnnotationContainerPrefix,
|
||||
containerName, containerAnnotations, podAnnotations)
|
||||
|
||||
if clsOrigin != kubernetes.ClassOriginNotFound {
|
||||
if rdt == nil {
|
||||
return "", fmt.Errorf("RDT not initialized, class %q not available", clsName)
|
||||
}
|
||||
|
||||
// Verify validity of class name
|
||||
if !IsQualifiedClassName(clsName) {
|
||||
return "", fmt.Errorf("unqualified RDT class name %q", clsName)
|
||||
}
|
||||
|
||||
// If RDT has been initialized we check that the class exists
|
||||
if _, ok := rdt.getClass(clsName); !ok {
|
||||
return "", fmt.Errorf("RDT class %q does not exist in configuration", clsName)
|
||||
}
|
||||
|
||||
// If classes have been configured by goresctrl
|
||||
if clsConf, ok := rdt.conf.Classes[unaliasClassName(clsName)]; ok {
|
||||
// Check that the class is allowed
|
||||
if clsOrigin == kubernetes.ClassOriginPodAnnotation && clsConf.Kubernetes.DenyPodAnnotation {
|
||||
return "", fmt.Errorf("RDT class %q not allowed from Pod annotations", clsName)
|
||||
} else if clsOrigin == kubernetes.ClassOriginContainerAnnotation && clsConf.Kubernetes.DenyContainerAnnotation {
|
||||
return "", fmt.Errorf("RDT class %q not allowed from Container annotation", clsName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return clsName, nil
|
||||
}
|
||||
124
vendor/github.com/intel/goresctrl/pkg/rdt/prometheus.go
generated
vendored
Normal file
124
vendor/github.com/intel/goresctrl/pkg/rdt/prometheus.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2020 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rdt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var customLabels []string = []string{}
|
||||
|
||||
// collector implements prometheus.Collector interface
|
||||
type collector struct {
|
||||
descriptors map[string]*prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCollector creates new Prometheus collector of RDT metrics
|
||||
func NewCollector() (prometheus.Collector, error) {
|
||||
c := &collector{descriptors: make(map[string]*prometheus.Desc)}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// RegisterCustomPrometheusLabels registers monitor group annotations to be
|
||||
// exported as Prometheus metrics labels
|
||||
func RegisterCustomPrometheusLabels(names ...string) {
|
||||
Names:
|
||||
for _, n := range names {
|
||||
for _, c := range customLabels {
|
||||
if n == c {
|
||||
break Names
|
||||
}
|
||||
}
|
||||
customLabels = append(customLabels, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Describe method of the prometheus.Collector interface
|
||||
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
for resource, features := range GetMonFeatures() {
|
||||
switch resource {
|
||||
case MonResourceL3:
|
||||
for _, f := range features {
|
||||
ch <- c.describeL3(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect method of the prometheus.Collector interface
|
||||
func (c collector) Collect(ch chan<- prometheus.Metric) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, cls := range GetClasses() {
|
||||
for _, monGrp := range cls.GetMonGroups() {
|
||||
wg.Add(1)
|
||||
g := monGrp
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
c.collectGroupMetrics(ch, g)
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (c *collector) describeL3(feature string) *prometheus.Desc {
|
||||
d, ok := c.descriptors[feature]
|
||||
if !ok {
|
||||
name := "l3_" + feature
|
||||
help := "L3 " + feature
|
||||
|
||||
switch feature {
|
||||
case "llc_occupancy":
|
||||
help = "L3 (LLC) occupancy"
|
||||
case "mbm_local_bytes":
|
||||
help = "bytes transferred to/from local memory through LLC"
|
||||
case "mbm_total_bytes":
|
||||
help = "total bytes transferred to/from memory through LLC"
|
||||
}
|
||||
labels := append([]string{"rdt_class", "rdt_mon_group", "cache_id"}, customLabels...)
|
||||
d = prometheus.NewDesc(name, help, labels, nil)
|
||||
c.descriptors[feature] = d
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (c *collector) collectGroupMetrics(ch chan<- prometheus.Metric, mg MonGroup) {
|
||||
allData := mg.GetMonData()
|
||||
|
||||
annotations := mg.GetAnnotations()
|
||||
customLabelValues := make([]string, len(customLabels))
|
||||
for i, name := range customLabels {
|
||||
customLabelValues[i] = annotations[name]
|
||||
}
|
||||
|
||||
for cacheID, data := range allData.L3 {
|
||||
for feature, value := range data {
|
||||
labels := append([]string{mg.Parent().Name(), mg.Name(), fmt.Sprint(cacheID)}, customLabelValues...)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.describeL3(feature),
|
||||
prometheus.CounterValue,
|
||||
float64(value),
|
||||
labels...,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
859
vendor/github.com/intel/goresctrl/pkg/rdt/rdt.go
generated
vendored
Normal file
859
vendor/github.com/intel/goresctrl/pkg/rdt/rdt.go
generated
vendored
Normal file
@@ -0,0 +1,859 @@
|
||||
/*
|
||||
Copyright 2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package rdt implements an API for managing Intel® RDT technologies via the
|
||||
// resctrl pseudo-filesystem of the Linux kernel. It provides flexible
|
||||
// configuration with a hierarchical approach for easy management of exclusive
|
||||
// cache allocations.
|
||||
//
|
||||
// Goresctrl supports all available RDT technologies, i.e. L2 and L3 Cache
|
||||
// Allocation (CAT) with Code and Data Prioritization (CDP) and Memory
|
||||
// Bandwidth Allocation (MBA) plus Cache Monitoring (CMT) and Memory Bandwidth
|
||||
// Monitoring (MBM).
|
||||
//
|
||||
// Basic usage example:
|
||||
// rdt.SetLogger(logrus.New())
|
||||
//
|
||||
// if err := rdt.Initialize(""); err != nil {
|
||||
// return fmt.Errorf("RDT not supported: %v", err)
|
||||
// }
|
||||
//
|
||||
// if err := rdt.SetConfigFromFile("/path/to/rdt.conf.yaml", false); err != nil {
|
||||
// return fmt.Errorf("RDT configuration failed: %v", err)
|
||||
// }
|
||||
//
|
||||
// if cls, ok := rdt.GetClass("my-class"); ok {
|
||||
// // Set PIDs 12345 and 12346 to class "my-class"
|
||||
// if err := cls.AddPids("12345", "12346"); err != nil {
|
||||
// return fmt.Errorf("failed to add PIDs to RDT class: %v", err)
|
||||
// }
|
||||
// }
|
||||
package rdt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
stdlog "log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
grclog "github.com/intel/goresctrl/pkg/log"
|
||||
"github.com/intel/goresctrl/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// RootClassName is the name we use in our config for the special class
|
||||
// that configures the "root" resctrl group of the system
|
||||
RootClassName = "system/default"
|
||||
// RootClassAlias is an alternative name for the root class
|
||||
RootClassAlias = ""
|
||||
)
|
||||
|
||||
type control struct {
|
||||
grclog.Logger
|
||||
|
||||
resctrlGroupPrefix string
|
||||
conf config
|
||||
rawConf Config
|
||||
classes map[string]*ctrlGroup
|
||||
}
|
||||
|
||||
var log grclog.Logger = grclog.NewLoggerWrapper(stdlog.New(os.Stderr, "[ rdt ] ", 0))
|
||||
|
||||
var info *resctrlInfo
|
||||
|
||||
var rdt *control
|
||||
|
||||
// Function for removing resctrl groups from the filesystem. This is
|
||||
// configurable because of unit tests.
|
||||
var groupRemoveFunc func(string) error = os.Remove
|
||||
|
||||
// CtrlGroup defines the interface of one goresctrl managed RDT class. It maps
|
||||
// to one CTRL group directory in the goresctrl pseudo-filesystem.
|
||||
type CtrlGroup interface {
|
||||
ResctrlGroup
|
||||
|
||||
// CreateMonGroup creates a new monitoring group under this CtrlGroup.
|
||||
CreateMonGroup(name string, annotations map[string]string) (MonGroup, error)
|
||||
|
||||
// DeleteMonGroup deletes a monitoring group from this CtrlGroup.
|
||||
DeleteMonGroup(name string) error
|
||||
|
||||
// DeleteMonGroups deletes all monitoring groups from this CtrlGroup.
|
||||
DeleteMonGroups() error
|
||||
|
||||
// GetMonGroup returns a specific monitoring group under this CtrlGroup.
|
||||
GetMonGroup(name string) (MonGroup, bool)
|
||||
|
||||
// GetMonGroups returns all monitoring groups under this CtrlGroup.
|
||||
GetMonGroups() []MonGroup
|
||||
}
|
||||
|
||||
// ResctrlGroup is the generic interface for resctrl CTRL and MON groups. It
|
||||
// maps to one CTRL or MON group directory in the goresctrl pseudo-filesystem.
|
||||
type ResctrlGroup interface {
|
||||
// Name returns the name of the group.
|
||||
Name() string
|
||||
|
||||
// GetPids returns the process ids assigned to the group.
|
||||
GetPids() ([]string, error)
|
||||
|
||||
// AddPids assigns the given process ids to the group.
|
||||
AddPids(pids ...string) error
|
||||
|
||||
// GetMonData retrieves the monitoring data of the group.
|
||||
GetMonData() MonData
|
||||
}
|
||||
|
||||
// MonGroup represents the interface to a RDT monitoring group. It maps to one
|
||||
// MON group in the goresctrl filesystem.
|
||||
type MonGroup interface {
|
||||
ResctrlGroup
|
||||
|
||||
// Parent returns the CtrlGroup under which the monitoring group exists.
|
||||
Parent() CtrlGroup
|
||||
|
||||
// GetAnnotations returns the annotations stored to the monitoring group.
|
||||
GetAnnotations() map[string]string
|
||||
}
|
||||
|
||||
// MonData contains monitoring stats of one monitoring group.
|
||||
type MonData struct {
|
||||
L3 MonL3Data
|
||||
}
|
||||
|
||||
// MonL3Data contains L3 monitoring stats of one monitoring group.
|
||||
type MonL3Data map[uint64]MonLeafData
|
||||
|
||||
// MonLeafData represents the raw numerical stats from one RDT monitor data leaf.
|
||||
type MonLeafData map[string]uint64
|
||||
|
||||
// MonResource is the type of RDT monitoring resource.
|
||||
type MonResource string
|
||||
|
||||
const (
|
||||
// MonResourceL3 is the RDT L3 cache monitor resource.
|
||||
MonResourceL3 MonResource = "l3"
|
||||
)
|
||||
|
||||
type ctrlGroup struct {
|
||||
resctrlGroup
|
||||
|
||||
monPrefix string
|
||||
monGroups map[string]*monGroup
|
||||
}
|
||||
|
||||
type monGroup struct {
|
||||
resctrlGroup
|
||||
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
type resctrlGroup struct {
|
||||
prefix string
|
||||
name string
|
||||
parent *ctrlGroup // parent for MON groups
|
||||
}
|
||||
|
||||
// SetLogger sets the logger instance to be used by the package. This function
|
||||
// may be called even before Initialize().
|
||||
func SetLogger(l grclog.Logger) {
|
||||
log = l
|
||||
if rdt != nil {
|
||||
rdt.setLogger(l)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize detects RDT from the system and initializes control interface of
|
||||
// the package.
|
||||
func Initialize(resctrlGroupPrefix string) error {
|
||||
var err error
|
||||
|
||||
info = nil
|
||||
rdt = nil
|
||||
|
||||
// Get info from the resctrl filesystem
|
||||
info, err = getRdtInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := &control{Logger: log, resctrlGroupPrefix: resctrlGroupPrefix}
|
||||
|
||||
// NOTE: we lose monitoring group annotations (i.e. prometheus metrics
|
||||
// labels) on re-init
|
||||
if r.classes, err = r.classesFromResctrlFs(); err != nil {
|
||||
return fmt.Errorf("failed to initialize classes from resctrl fs: %v", err)
|
||||
}
|
||||
|
||||
if err := r.pruneMonGroups(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rdt = r
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiscoverClasses discovers existing classes from the resctrl filesystem.
|
||||
// Makes it possible to discover gropus with another prefix than was set with
|
||||
// Initialize(). The original prefix is still used for monitoring groups.
|
||||
func DiscoverClasses(resctrlGroupPrefix string) error {
|
||||
if rdt != nil {
|
||||
return rdt.discoverFromResctrl(resctrlGroupPrefix)
|
||||
}
|
||||
return fmt.Errorf("rdt not initialized")
|
||||
}
|
||||
|
||||
// SetConfig (re-)configures the resctrl filesystem according to the specified
|
||||
// configuration.
|
||||
func SetConfig(c *Config, force bool) error {
|
||||
if rdt != nil {
|
||||
return rdt.setConfig(c, force)
|
||||
}
|
||||
return fmt.Errorf("rdt not initialized")
|
||||
}
|
||||
|
||||
// SetConfigFromData takes configuration as raw data, parses it and
|
||||
// reconfigures the resctrl filesystem.
|
||||
func SetConfigFromData(data []byte, force bool) error {
|
||||
cfg := &Config{}
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return fmt.Errorf("failed to parse configuration data: %v", err)
|
||||
}
|
||||
|
||||
return SetConfig(cfg, force)
|
||||
}
|
||||
|
||||
// SetConfigFromFile reads configuration from the filesystem and reconfigures
|
||||
// the resctrl filesystem.
|
||||
func SetConfigFromFile(path string, force bool) error {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read config file: %v", err)
|
||||
}
|
||||
|
||||
if err := SetConfigFromData(data, force); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("configuration successfully loaded from %q", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClass returns one RDT class.
|
||||
func GetClass(name string) (CtrlGroup, bool) {
|
||||
if rdt != nil {
|
||||
return rdt.getClass(name)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// GetClasses returns all available RDT classes.
|
||||
func GetClasses() []CtrlGroup {
|
||||
if rdt != nil {
|
||||
return rdt.getClasses()
|
||||
}
|
||||
return []CtrlGroup{}
|
||||
}
|
||||
|
||||
// MonSupported returns true if RDT monitoring features are available.
|
||||
func MonSupported() bool {
|
||||
if rdt != nil {
|
||||
return rdt.monSupported()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetMonFeatures returns the available monitoring stats of each available
|
||||
// monitoring technology.
|
||||
func GetMonFeatures() map[MonResource][]string {
|
||||
if rdt != nil {
|
||||
return rdt.getMonFeatures()
|
||||
}
|
||||
return map[MonResource][]string{}
|
||||
}
|
||||
|
||||
// IsQualifiedClassName returns true if given string qualifies as a class name
|
||||
func IsQualifiedClassName(name string) bool {
|
||||
// Must be qualified as a file name
|
||||
return name == RootClassName || (len(name) < 4096 && name != "." && name != ".." && !strings.ContainsAny(name, "/\n"))
|
||||
}
|
||||
|
||||
func (c *control) getClass(name string) (CtrlGroup, bool) {
|
||||
cls, ok := c.classes[unaliasClassName(name)]
|
||||
return cls, ok
|
||||
}
|
||||
|
||||
func (c *control) getClasses() []CtrlGroup {
|
||||
ret := make([]CtrlGroup, 0, len(c.classes))
|
||||
|
||||
for _, v := range c.classes {
|
||||
ret = append(ret, v)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool { return ret[i].Name() < ret[j].Name() })
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *control) monSupported() bool {
|
||||
return info.l3mon.Supported()
|
||||
}
|
||||
|
||||
func (c *control) getMonFeatures() map[MonResource][]string {
|
||||
ret := make(map[MonResource][]string)
|
||||
if info.l3mon.Supported() {
|
||||
ret[MonResourceL3] = append([]string{}, info.l3mon.monFeatures...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *control) setLogger(l grclog.Logger) {
|
||||
c.Logger = l
|
||||
}
|
||||
|
||||
func (c *control) setConfig(newConfig *Config, force bool) error {
|
||||
c.Infof("configuration update")
|
||||
|
||||
conf, err := (*newConfig).resolve()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
err = c.configureResctrl(conf, force)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resctrl configuration failed: %v", err)
|
||||
}
|
||||
|
||||
c.conf = conf
|
||||
// TODO: we'd better create a deep copy
|
||||
c.rawConf = *newConfig
|
||||
c.Infof("configuration finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *control) configureResctrl(conf config, force bool) error {
|
||||
grclog.DebugBlock(c, "applying resolved config:", " ", "%s", utils.DumpJSON(conf))
|
||||
|
||||
// Remove stale resctrl groups
|
||||
classesFromFs, err := c.classesFromResctrlFs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, cls := range classesFromFs {
|
||||
if _, ok := conf.Classes[cls.name]; !isRootClass(cls.name) && !ok {
|
||||
if !force {
|
||||
tasks, err := cls.GetPids()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get resctrl group tasks: %v", err)
|
||||
}
|
||||
if len(tasks) > 0 {
|
||||
return fmt.Errorf("refusing to remove non-empty resctrl group %q", cls.relPath(""))
|
||||
}
|
||||
}
|
||||
log.Debugf("removing existing resctrl group %q", cls.relPath(""))
|
||||
err = groupRemoveFunc(cls.path(""))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove resctrl group %q: %v", cls.relPath(""), err)
|
||||
}
|
||||
|
||||
delete(c.classes, name)
|
||||
}
|
||||
}
|
||||
|
||||
for name, cls := range c.classes {
|
||||
if _, ok := conf.Classes[cls.name]; !ok || cls.prefix != c.resctrlGroupPrefix {
|
||||
if !isRootClass(cls.name) {
|
||||
log.Debugf("dropping stale class %q (%q)", name, cls.path(""))
|
||||
delete(c.classes, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := c.classes[RootClassName]; !ok {
|
||||
log.Warnf("root class missing from runtime data, re-adding...")
|
||||
c.classes[RootClassName] = classesFromFs[RootClassName]
|
||||
}
|
||||
|
||||
// Try to apply given configuration
|
||||
for name, class := range conf.Classes {
|
||||
if _, ok := c.classes[name]; !ok {
|
||||
cg, err := newCtrlGroup(c.resctrlGroupPrefix, c.resctrlGroupPrefix, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.classes[name] = cg
|
||||
}
|
||||
partition := conf.Partitions[class.Partition]
|
||||
if err := c.classes[name].configure(name, class, partition, conf.Options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.pruneMonGroups(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *control) discoverFromResctrl(prefix string) error {
|
||||
c.Debugf("running class discovery from resctrl filesystem using prefix %q", prefix)
|
||||
|
||||
classesFromFs, err := c.classesFromResctrlFsPrefix(prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Drop stale classes
|
||||
for name, cls := range c.classes {
|
||||
if _, ok := classesFromFs[cls.name]; !ok || cls.prefix != prefix {
|
||||
if !isRootClass(cls.name) {
|
||||
log.Debugf("dropping stale class %q (%q)", name, cls.path(""))
|
||||
delete(c.classes, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name, cls := range classesFromFs {
|
||||
if _, ok := c.classes[name]; !ok {
|
||||
c.classes[name] = cls
|
||||
log.Debugf("adding discovered class %q (%q)", name, cls.path(""))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.pruneMonGroups(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *control) classesFromResctrlFs() (map[string]*ctrlGroup, error) {
|
||||
return c.classesFromResctrlFsPrefix(c.resctrlGroupPrefix)
|
||||
}
|
||||
|
||||
func (c *control) classesFromResctrlFsPrefix(prefix string) (map[string]*ctrlGroup, error) {
|
||||
names := []string{RootClassName}
|
||||
if g, err := resctrlGroupsFromFs(prefix, info.resctrlPath); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for _, n := range g {
|
||||
if prefix != c.resctrlGroupPrefix &&
|
||||
strings.HasPrefix(n, c.resctrlGroupPrefix) &&
|
||||
strings.HasPrefix(c.resctrlGroupPrefix, prefix) {
|
||||
// Skip groups in the standard namespace
|
||||
continue
|
||||
}
|
||||
names = append(names, n[len(prefix):])
|
||||
}
|
||||
}
|
||||
|
||||
classes := make(map[string]*ctrlGroup, len(names)+1)
|
||||
for _, name := range names {
|
||||
g, err := newCtrlGroup(prefix, c.resctrlGroupPrefix, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
classes[name] = g
|
||||
}
|
||||
|
||||
return classes, nil
|
||||
}
|
||||
|
||||
func (c *control) pruneMonGroups() error {
|
||||
for name, cls := range c.classes {
|
||||
if err := cls.pruneMonGroups(); err != nil {
|
||||
return fmt.Errorf("failed to prune stale monitoring groups of %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *control) readRdtFile(rdtPath string) ([]byte, error) {
|
||||
return ioutil.ReadFile(filepath.Join(info.resctrlPath, rdtPath))
|
||||
}
|
||||
|
||||
func (c *control) writeRdtFile(rdtPath string, data []byte) error {
|
||||
if err := ioutil.WriteFile(filepath.Join(info.resctrlPath, rdtPath), data, 0644); err != nil {
|
||||
return c.cmdError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *control) cmdError(origErr error) error {
|
||||
errData, readErr := c.readRdtFile(filepath.Join("info", "last_cmd_status"))
|
||||
if readErr != nil {
|
||||
return origErr
|
||||
}
|
||||
cmdStatus := strings.TrimSpace(string(errData))
|
||||
if len(cmdStatus) > 0 && cmdStatus != "ok" {
|
||||
return fmt.Errorf("%s", cmdStatus)
|
||||
}
|
||||
return origErr
|
||||
}
|
||||
|
||||
func newCtrlGroup(prefix, monPrefix, name string) (*ctrlGroup, error) {
|
||||
cg := &ctrlGroup{
|
||||
resctrlGroup: resctrlGroup{prefix: prefix, name: name},
|
||||
monPrefix: monPrefix,
|
||||
}
|
||||
|
||||
if err := os.Mkdir(cg.path(""), 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cg.monGroups, err = cg.monGroupsFromResctrlFs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when retrieving existing monitor groups: %v", err)
|
||||
}
|
||||
|
||||
return cg, nil
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) CreateMonGroup(name string, annotations map[string]string) (MonGroup, error) {
|
||||
if mg, ok := c.monGroups[name]; ok {
|
||||
return mg, nil
|
||||
}
|
||||
|
||||
log.Debugf("creating monitoring group %s/%s", c.name, name)
|
||||
mg, err := newMonGroup(c.monPrefix, name, c, annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new monitoring group %q: %v", name, err)
|
||||
}
|
||||
|
||||
c.monGroups[name] = mg
|
||||
|
||||
return mg, err
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) DeleteMonGroup(name string) error {
|
||||
mg, ok := c.monGroups[name]
|
||||
if !ok {
|
||||
log.Warnf("trying to delete non-existent mon group %s/%s", c.name, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("deleting monitoring group %s/%s", c.name, name)
|
||||
if err := groupRemoveFunc(mg.path("")); err != nil {
|
||||
return fmt.Errorf("failed to remove monitoring group %q: %v", mg.relPath(""), err)
|
||||
}
|
||||
|
||||
delete(c.monGroups, name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) DeleteMonGroups() error {
|
||||
for name := range c.monGroups {
|
||||
if err := c.DeleteMonGroup(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) GetMonGroup(name string) (MonGroup, bool) {
|
||||
mg, ok := c.monGroups[name]
|
||||
return mg, ok
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) GetMonGroups() []MonGroup {
|
||||
ret := make([]MonGroup, 0, len(c.monGroups))
|
||||
|
||||
for _, v := range c.monGroups {
|
||||
ret = append(ret, v)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool { return ret[i].Name() < ret[j].Name() })
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) configure(name string, class *classConfig,
|
||||
partition *partitionConfig, options Options) error {
|
||||
schemata := ""
|
||||
|
||||
// Handle cache allocation
|
||||
for _, lvl := range []cacheLevel{L2, L3} {
|
||||
switch {
|
||||
case info.cat[lvl].unified.Supported():
|
||||
schema, err := class.CATSchema[lvl].toStr(catSchemaTypeUnified, partition.CAT[lvl])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
schemata += schema
|
||||
case info.cat[lvl].data.Supported() || info.cat[lvl].code.Supported():
|
||||
schema, err := class.CATSchema[lvl].toStr(catSchemaTypeCode, partition.CAT[lvl])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
schemata += schema
|
||||
|
||||
schema, err = class.CATSchema[lvl].toStr(catSchemaTypeData, partition.CAT[lvl])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
schemata += schema
|
||||
default:
|
||||
if class.CATSchema[lvl].Alloc != nil && !options.cat(lvl).Optional {
|
||||
return fmt.Errorf("%s cache allocation for %q specified in configuration but not supported by system", lvl, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle memory bandwidth allocation
|
||||
switch {
|
||||
case info.mb.Supported():
|
||||
schemata += class.MBSchema.toStr(partition.MB)
|
||||
default:
|
||||
if class.MBSchema != nil && !options.MB.Optional {
|
||||
return fmt.Errorf("memory bandwidth allocation for %q specified in configuration but not supported by system", name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(schemata) > 0 {
|
||||
log.Debugf("writing schemata %q to %q", schemata, c.relPath(""))
|
||||
if err := rdt.writeRdtFile(c.relPath("schemata"), []byte(schemata)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Debugf("empty schemata")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ctrlGroup) monGroupsFromResctrlFs() (map[string]*monGroup, error) {
|
||||
names, err := resctrlGroupsFromFs(c.monPrefix, c.path("mon_groups"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
grps := make(map[string]*monGroup, len(names))
|
||||
for _, name := range names {
|
||||
name = name[len(c.monPrefix):]
|
||||
mg, err := newMonGroup(c.monPrefix, name, c, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
grps[name] = mg
|
||||
}
|
||||
return grps, nil
|
||||
}
|
||||
|
||||
// Remove empty monitoring groups
|
||||
func (c *ctrlGroup) pruneMonGroups() error {
|
||||
for name, mg := range c.monGroups {
|
||||
pids, err := mg.GetPids()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pids for monitoring group %q: %v", mg.relPath(""), err)
|
||||
}
|
||||
if len(pids) == 0 {
|
||||
if err := c.DeleteMonGroup(name); err != nil {
|
||||
return fmt.Errorf("failed to remove monitoring group %q: %v", mg.relPath(""), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) GetPids() ([]string, error) {
|
||||
data, err := rdt.readRdtFile(r.relPath("tasks"))
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
split := strings.Split(strings.TrimSpace(string(data)), "\n")
|
||||
if len(split[0]) > 0 {
|
||||
return split, nil
|
||||
}
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) AddPids(pids ...string) error {
|
||||
f, err := os.OpenFile(r.path("tasks"), os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for _, pid := range pids {
|
||||
if _, err := f.WriteString(pid + "\n"); err != nil {
|
||||
if errors.Is(err, syscall.ESRCH) {
|
||||
log.Debugf("no task %s", pid)
|
||||
} else {
|
||||
return fmt.Errorf("failed to assign processes %v to class %q: %v", pids, r.name, rdt.cmdError(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) GetMonData() MonData {
|
||||
m := MonData{}
|
||||
|
||||
if info.l3mon.Supported() {
|
||||
l3, err := r.getMonL3Data()
|
||||
if err != nil {
|
||||
log.Warnf("failed to retrieve L3 monitoring data: %v", err)
|
||||
} else {
|
||||
m.L3 = l3
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) getMonL3Data() (MonL3Data, error) {
|
||||
files, err := ioutil.ReadDir(r.path("mon_data"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := MonL3Data{}
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
if strings.HasPrefix(name, "mon_L3_") {
|
||||
// Parse cache id from the dirname
|
||||
id, err := strconv.ParseUint(strings.TrimPrefix(name, "mon_L3_"), 10, 32)
|
||||
if err != nil {
|
||||
// Just print a warning, we try to retrieve as much info as possible
|
||||
log.Warnf("error parsing L3 monitor data directory name %q: %v", name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
data, err := r.getMonLeafData(filepath.Join("mon_data", name))
|
||||
if err != nil {
|
||||
log.Warnf("failed to read monitor data: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
m[id] = data
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) getMonLeafData(path string) (MonLeafData, error) {
|
||||
files, err := ioutil.ReadDir(r.path(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := make(MonLeafData, len(files))
|
||||
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
|
||||
// We expect that all the files in the dir are regular files
|
||||
val, err := readFileUint64(r.path(path, name))
|
||||
if err != nil {
|
||||
// Just print a warning, we want to retrieve as much info as possible
|
||||
log.Warnf("error reading data file: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
m[name] = val
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) relPath(elem ...string) string {
|
||||
if r.parent == nil {
|
||||
if r.name == RootClassName {
|
||||
return filepath.Join(elem...)
|
||||
}
|
||||
return filepath.Join(append([]string{r.prefix + r.name}, elem...)...)
|
||||
}
|
||||
// Parent is only intended for MON groups - non-root CTRL groups are considered
|
||||
// as peers to the root CTRL group (as they are in HW) and do not have a parent
|
||||
return r.parent.relPath(append([]string{"mon_groups", r.prefix + r.name}, elem...)...)
|
||||
}
|
||||
|
||||
func (r *resctrlGroup) path(elem ...string) string {
|
||||
return filepath.Join(info.resctrlPath, r.relPath(elem...))
|
||||
}
|
||||
|
||||
func newMonGroup(prefix string, name string, parent *ctrlGroup, annotations map[string]string) (*monGroup, error) {
|
||||
mg := &monGroup{
|
||||
resctrlGroup: resctrlGroup{prefix: prefix, name: name, parent: parent},
|
||||
annotations: make(map[string]string, len(annotations))}
|
||||
|
||||
if err := os.Mkdir(mg.path(""), 0755); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range annotations {
|
||||
mg.annotations[k] = v
|
||||
}
|
||||
|
||||
return mg, nil
|
||||
}
|
||||
|
||||
func (m *monGroup) Parent() CtrlGroup {
|
||||
return m.parent
|
||||
}
|
||||
|
||||
func (m *monGroup) GetAnnotations() map[string]string {
|
||||
a := make(map[string]string, len(m.annotations))
|
||||
for k, v := range m.annotations {
|
||||
a[k] = v
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func resctrlGroupsFromFs(prefix string, path string) ([]string, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
grps := make([]string, 0, len(files))
|
||||
for _, file := range files {
|
||||
filename := file.Name()
|
||||
if strings.HasPrefix(filename, prefix) {
|
||||
if s, err := os.Stat(filepath.Join(path, filename, "tasks")); err == nil && !s.IsDir() {
|
||||
grps = append(grps, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
return grps, nil
|
||||
}
|
||||
|
||||
func isRootClass(name string) bool {
|
||||
return name == RootClassName || name == RootClassAlias
|
||||
}
|
||||
|
||||
func unaliasClassName(name string) string {
|
||||
if isRootClass(name) {
|
||||
return RootClassName
|
||||
}
|
||||
return name
|
||||
}
|
||||
180
vendor/github.com/intel/goresctrl/pkg/utils/idset.go
generated
vendored
Normal file
180
vendor/github.com/intel/goresctrl/pkg/utils/idset.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2019-2021 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// Unknown represents an unknown id.
|
||||
Unknown ID = -1
|
||||
)
|
||||
|
||||
// ID is nn integer id, used to identify packages, CPUs, nodes, etc.
|
||||
type ID = int
|
||||
|
||||
// IDSet is an unordered set of integer ids.
|
||||
type IDSet map[ID]struct{}
|
||||
|
||||
// NewIDSet creates a new unordered set of (integer) ids.
|
||||
func NewIDSet(ids ...ID) IDSet {
|
||||
s := make(map[ID]struct{})
|
||||
|
||||
for _, id := range ids {
|
||||
s[id] = struct{}{}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewIDSetFromIntSlice creates a new unordered set from an integer slice.
|
||||
func NewIDSetFromIntSlice(ids ...int) IDSet {
|
||||
s := make(map[ID]struct{})
|
||||
|
||||
for _, id := range ids {
|
||||
s[ID(id)] = struct{}{}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Clone returns a copy of this IdSet.
|
||||
func (s IDSet) Clone() IDSet {
|
||||
return NewIDSet(s.Members()...)
|
||||
}
|
||||
|
||||
// Add adds the given ids into the set.
|
||||
func (s IDSet) Add(ids ...ID) {
|
||||
for _, id := range ids {
|
||||
s[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Del deletes the given ids from the set.
|
||||
func (s IDSet) Del(ids ...ID) {
|
||||
if s != nil {
|
||||
for _, id := range ids {
|
||||
delete(s, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of ids in the set.
|
||||
func (s IDSet) Size() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Has tests if all the ids are present in the set.
|
||||
func (s IDSet) Has(ids ...ID) bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
_, ok := s[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Members returns all ids in the set as a randomly ordered slice.
|
||||
func (s IDSet) Members() []ID {
|
||||
if s == nil {
|
||||
return []ID{}
|
||||
}
|
||||
ids := make([]ID, len(s))
|
||||
idx := 0
|
||||
for id := range s {
|
||||
ids[idx] = id
|
||||
idx++
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// SortedMembers returns all ids in the set as a sorted slice.
|
||||
func (s IDSet) SortedMembers() []ID {
|
||||
ids := s.Members()
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
return ids[i] < ids[j]
|
||||
})
|
||||
return ids
|
||||
}
|
||||
|
||||
// String returns the set as a string.
|
||||
func (s IDSet) String() string {
|
||||
return s.StringWithSeparator(",")
|
||||
}
|
||||
|
||||
// StringWithSeparator returns the set as a string, separated with the given separator.
|
||||
func (s IDSet) StringWithSeparator(args ...string) string {
|
||||
if s == nil || len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var sep string
|
||||
|
||||
if len(args) == 1 {
|
||||
sep = args[0]
|
||||
} else {
|
||||
sep = ","
|
||||
}
|
||||
|
||||
str := ""
|
||||
t := ""
|
||||
for _, id := range s.SortedMembers() {
|
||||
str = str + t + strconv.Itoa(int(id))
|
||||
t = sep
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// MarshalJSON is the JSON marshaller for IDSet.
|
||||
func (s IDSet) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(s.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the JSON unmarshaller for IDSet.
|
||||
func (s *IDSet) UnmarshalJSON(data []byte) error {
|
||||
str := ""
|
||||
if err := json.Unmarshal(data, &str); err != nil {
|
||||
return fmt.Errorf("invalid IDSet entry '%s': %v", string(data), err)
|
||||
}
|
||||
|
||||
*s = NewIDSet()
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, idstr := range strings.Split(str, ",") {
|
||||
id, err := strconv.ParseUint(idstr, 10, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid IDSet entry '%s': %v", idstr, err)
|
||||
}
|
||||
s.Add(ID(id))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
32
vendor/github.com/intel/goresctrl/pkg/utils/json.go
generated
vendored
Normal file
32
vendor/github.com/intel/goresctrl/pkg/utils/json.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright 2019 Intel Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// DumpJSON dumps a json-compatible struct in human-readable form
|
||||
func DumpJSON(r interface{}) string {
|
||||
out, err := yaml.Marshal(r)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("!!!!!\nUnable to stringify %T: %v\n!!!!!", r, err)
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
36
vendor/github.com/intel/goresctrl/pkg/utils/sort.go
generated
vendored
Normal file
36
vendor/github.com/intel/goresctrl/pkg/utils/sort.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2020 Intel Corporation. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortUint64s sorts a slice of uint64 in increasing order.
|
||||
func SortUint64s(a []uint64) {
|
||||
sort.Sort(Uint64Slice(a))
|
||||
}
|
||||
|
||||
// Uint64Slice implmenents sort.Interface for a slice of uint64.
|
||||
type Uint64Slice []uint64
|
||||
|
||||
// Len returns the length of an UintSlice
|
||||
func (s Uint64Slice) Len() int { return len(s) }
|
||||
|
||||
// Less returns true if element at 'i' is less than the element at 'j'
|
||||
func (s Uint64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// Swap swaps the values of two elements
|
||||
func (s Uint64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
|
||||
|
||||
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
|
||||
2
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
2
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
@@ -18,6 +18,8 @@ include Makefile.common
|
||||
./ttar -C $(dir $*) -x -f $*.ttar
|
||||
touch $@
|
||||
|
||||
fixtures: fixtures/.unpacked
|
||||
|
||||
update_fixtures:
|
||||
rm -vf fixtures/.unpacked
|
||||
./ttar -c -f fixtures.ttar fixtures/
|
||||
|
||||
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@@ -78,12 +78,12 @@ ifneq ($(shell which gotestsum),)
|
||||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.7.0
|
||||
PROMU_VERSION ?= 0.12.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.18.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.39.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@@ -118,7 +118,7 @@ endif
|
||||
%: common-% ;
|
||||
|
||||
.PHONY: common-all
|
||||
common-all: precheck style check_license lint unused build test
|
||||
common-all: precheck style check_license lint yamllint unused build test
|
||||
|
||||
.PHONY: common-style
|
||||
common-style:
|
||||
@@ -198,6 +198,15 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: common-yamllint
|
||||
common-yamllint:
|
||||
@echo ">> running yamllint on all YAML files in the repository"
|
||||
ifeq (, $(shell which yamllint))
|
||||
@echo "yamllint not installed so skipping"
|
||||
else
|
||||
yamllint .
|
||||
endif
|
||||
|
||||
# For backward-compatibility.
|
||||
.PHONY: common-staticcheck
|
||||
common-staticcheck: lint
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/README.md
generated
vendored
4
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -6,8 +6,8 @@ metrics from the pseudo-filesystems /proc and /sys.
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/procfs)
|
||||
[](https://travis-ci.org/prometheus/procfs)
|
||||
[](https://pkg.go.dev/github.com/prometheus/procfs)
|
||||
[](https://circleci.com/gh/prometheus/procfs/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||
|
||||
## Usage
|
||||
|
||||
30
vendor/github.com/prometheus/procfs/cmdline.go
generated
vendored
Normal file
30
vendor/github.com/prometheus/procfs/cmdline.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// CmdLine returns the command line of the kernel.
|
||||
func (fs FS) CmdLine() ([]string, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return strings.Fields(string(data)), nil
|
||||
}
|
||||
2
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
2
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
@@ -31,7 +31,7 @@
|
||||
// log.Fatalf("could not get process: %s", err)
|
||||
// }
|
||||
//
|
||||
// stat, err := p.NewStat()
|
||||
// stat, err := p.Stat()
|
||||
// if err != nil {
|
||||
// log.Fatalf("could not get process stat: %s", err)
|
||||
// }
|
||||
|
||||
1178
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
1178
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load Diff
105
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
105
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@@ -22,9 +22,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
||||
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
|
||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
||||
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
||||
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
||||
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
||||
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
|
||||
)
|
||||
|
||||
// MDStat holds info parsed from /proc/mdstat.
|
||||
@@ -39,12 +42,20 @@ type MDStat struct {
|
||||
DisksTotal int64
|
||||
// Number of failed disks.
|
||||
DisksFailed int64
|
||||
// Number of "down" disks. (the _ indicator in the status line)
|
||||
DisksDown int64
|
||||
// Spare disks in the device.
|
||||
DisksSpare int64
|
||||
// Number of blocks the device holds.
|
||||
BlocksTotal int64
|
||||
// Number of blocks on the device that are in sync.
|
||||
BlocksSynced int64
|
||||
// progress percentage of current sync
|
||||
BlocksSyncedPct float64
|
||||
// estimated finishing time for current sync (in minutes)
|
||||
BlocksSyncedFinishTime float64
|
||||
// current sync speed (in Kilobytes/sec)
|
||||
BlocksSyncedSpeed float64
|
||||
// Name of md component devices
|
||||
Devices []string
|
||||
}
|
||||
@@ -91,7 +102,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
|
||||
fail := int64(strings.Count(line, "(F)"))
|
||||
spare := int64(strings.Count(line, "(S)"))
|
||||
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing md device lines: %w", err)
|
||||
@@ -105,6 +116,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
// If device is syncing at the moment, get the number of currently
|
||||
// synced bytes, otherwise that number equals the size of the device.
|
||||
syncedBlocks := size
|
||||
speed := float64(0)
|
||||
finish := float64(0)
|
||||
pct := float64(0)
|
||||
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
||||
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
||||
checking := strings.Contains(lines[syncLineIdx], "check")
|
||||
@@ -124,7 +138,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||
syncedBlocks = 0
|
||||
} else {
|
||||
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
|
||||
}
|
||||
@@ -132,69 +146,104 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||
}
|
||||
|
||||
mdStats = append(mdStats, MDStat{
|
||||
Name: mdName,
|
||||
ActivityState: state,
|
||||
DisksActive: active,
|
||||
DisksFailed: fail,
|
||||
DisksSpare: spare,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
Devices: evalComponentDevices(deviceFields),
|
||||
Name: mdName,
|
||||
ActivityState: state,
|
||||
DisksActive: active,
|
||||
DisksFailed: fail,
|
||||
DisksDown: down,
|
||||
DisksSpare: spare,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
BlocksSyncedPct: pct,
|
||||
BlocksSyncedFinishTime: finish,
|
||||
BlocksSyncedSpeed: speed,
|
||||
Devices: evalComponentDevices(deviceFields),
|
||||
})
|
||||
}
|
||||
|
||||
return mdStats, nil
|
||||
}
|
||||
|
||||
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
|
||||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||
|
||||
sizeStr := strings.Fields(statusLine)[0]
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
}
|
||||
|
||||
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||
// In the device deviceLine, only disks have a number associated with them in [].
|
||||
total = int64(strings.Count(deviceLine, "["))
|
||||
return total, total, size, nil
|
||||
return total, total, 0, size, nil
|
||||
}
|
||||
|
||||
if strings.Contains(deviceLine, "inactive") {
|
||||
return 0, 0, size, nil
|
||||
return 0, 0, 0, size, nil
|
||||
}
|
||||
|
||||
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||
if len(matches) != 4 {
|
||||
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
|
||||
if len(matches) != 5 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
|
||||
}
|
||||
|
||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
}
|
||||
|
||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
}
|
||||
down = int64(strings.Count(matches[4], "_"))
|
||||
|
||||
return active, total, size, nil
|
||||
return active, total, down, size, nil
|
||||
}
|
||||
|
||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
|
||||
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
|
||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
|
||||
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
|
||||
}
|
||||
|
||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
|
||||
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
|
||||
}
|
||||
|
||||
return syncedBlocks, nil
|
||||
// Get percentage complete
|
||||
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
|
||||
}
|
||||
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||
}
|
||||
|
||||
// Get time expected left to complete
|
||||
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
|
||||
}
|
||||
finish, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||
}
|
||||
|
||||
// Get recovery speed
|
||||
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
||||
if len(matches) != 2 {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
|
||||
}
|
||||
speed, err = strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||
}
|
||||
|
||||
return syncedBlocks, pct, finish, speed, nil
|
||||
}
|
||||
|
||||
func evalComponentDevices(deviceFields []string) []string {
|
||||
|
||||
10
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
10
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@@ -65,6 +65,7 @@ type (
|
||||
TxQueue uint64
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
Inode uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -150,9 +151,9 @@ func parseIP(hexIP string) (net.IP, error) {
|
||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
line := &netIPSocketLine{}
|
||||
if len(fields) < 8 {
|
||||
if len(fields) < 10 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse net socket line as it has less then 8 columns %q",
|
||||
"cannot parse net socket line as it has less then 10 columns %q",
|
||||
strings.Join(fields, " "),
|
||||
)
|
||||
}
|
||||
@@ -216,5 +217,10 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
|
||||
}
|
||||
|
||||
// inode
|
||||
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
68
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
Normal file
68
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetStat contains statistics for all the counters from one file
|
||||
type NetStat struct {
|
||||
Filename string
|
||||
Stats map[string][]uint64
|
||||
}
|
||||
|
||||
// NetStat retrieves stats from /proc/net/stat/
|
||||
func (fs FS) NetStat() ([]NetStat, error) {
|
||||
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var netStatsTotal []NetStat
|
||||
|
||||
for _, filePath := range statFiles {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
netStatFile := NetStat{
|
||||
Filename: filepath.Base(filePath),
|
||||
Stats: make(map[string][]uint64),
|
||||
}
|
||||
scanner := bufio.NewScanner(file)
|
||||
scanner.Scan()
|
||||
// First string is always a header for stats
|
||||
var headers []string
|
||||
headers = append(headers, strings.Fields(scanner.Text())...)
|
||||
|
||||
// Other strings represent per-CPU counters
|
||||
for scanner.Scan() {
|
||||
for num, counter := range strings.Fields(scanner.Text()) {
|
||||
value, err := strconv.ParseUint(counter, 16, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
|
||||
}
|
||||
}
|
||||
netStatsTotal = append(netStatsTotal, netStatFile)
|
||||
}
|
||||
return netStatsTotal, nil
|
||||
}
|
||||
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@@ -90,7 +90,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
|
||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
||||
func (p Proc) Cgroups() ([]Cgroup, error) {
|
||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
|
||||
data, err := util.ReadFileNoStat(p.path("cgroup"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
32
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
32
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@@ -100,6 +100,15 @@ type ProcStat struct {
|
||||
VSize uint
|
||||
// Resident set size in pages.
|
||||
RSS int
|
||||
// Soft limit in bytes on the rss of the process.
|
||||
RSSLimit uint64
|
||||
// Real-time scheduling priority, a number in the range 1 to 99 for processes
|
||||
// scheduled under a real-time policy, or 0, for non-real-time processes.
|
||||
RTPriority uint
|
||||
// Scheduling policy.
|
||||
Policy uint
|
||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||
DelayAcctBlkIOTicks uint64
|
||||
|
||||
proc fs.FS
|
||||
}
|
||||
@@ -119,7 +128,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
ignore int
|
||||
ignoreInt64 int64
|
||||
ignoreUint64 uint64
|
||||
|
||||
s = ProcStat{PID: p.PID, proc: p.fs}
|
||||
l = bytes.Index(data, []byte("("))
|
||||
@@ -151,10 +161,28 @@ func (p Proc) Stat() (ProcStat, error) {
|
||||
&s.Priority,
|
||||
&s.Nice,
|
||||
&s.NumThreads,
|
||||
&ignore,
|
||||
&ignoreInt64,
|
||||
&s.Starttime,
|
||||
&s.VSize,
|
||||
&s.RSS,
|
||||
&s.RSSLimit,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreUint64,
|
||||
&ignoreInt64,
|
||||
&ignoreInt64,
|
||||
&s.RTPriority,
|
||||
&s.Policy,
|
||||
&s.DelayAcctBlkIOTicks,
|
||||
)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
|
||||
1
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
1
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
@@ -99,7 +99,6 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
|
||||
zoneinfoElement.Zone = ""
|
||||
continue
|
||||
}
|
||||
parts := strings.Fields(strings.TrimSpace(line))
|
||||
|
||||
Reference in New Issue
Block a user