vendor: bump prometheus/procfs v0.0.8
full diff: cb4147076a
...v0.0.8
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
ed6ae81861
commit
99911ea668
@ -36,7 +36,7 @@ github.com/pkg/errors ba968bfe8b2f7e042a574c888954
|
|||||||
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823
|
||||||
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
|
github.com/prometheus/client_model d1d2010b5beead3fa1c5f271a5cf626e40b3ad6e # v0.1.0
|
||||||
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
|
github.com/prometheus/common 287d3e634a1e550c9e463dd7e5a75a422c614505 # v0.7.0
|
||||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
github.com/prometheus/procfs 6d489fc7f1d9cd890a250f3ea3431b1744b9623f # v0.0.8
|
||||||
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
|
github.com/russross/blackfriday 05f3235734ad95d0016f6a23902f06461fcf567a # v1.5.2
|
||||||
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
|
github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1
|
||||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||||
|
54
vendor/github.com/prometheus/procfs/README.md
generated
vendored
54
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
# procfs
|
# procfs
|
||||||
|
|
||||||
This procfs package provides functions to retrieve system, kernel and process
|
This package provides functions to retrieve system, kernel, and process
|
||||||
metrics from the pseudo-filesystem proc.
|
metrics from the pseudo-filesystems /proc and /sys.
|
||||||
|
|
||||||
*WARNING*: This package is a work in progress. Its API may still break in
|
*WARNING*: This package is a work in progress. Its API may still break in
|
||||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
@ -9,3 +9,53 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
|||||||
[](https://godoc.org/github.com/prometheus/procfs)
|
[](https://godoc.org/github.com/prometheus/procfs)
|
||||||
[](https://travis-ci.org/prometheus/procfs)
|
[](https://travis-ci.org/prometheus/procfs)
|
||||||
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The procfs library is organized by packages based on whether the gathered data is coming from
|
||||||
|
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
|
||||||
|
/sys, or both. For example, cpu statistics are gathered from
|
||||||
|
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
|
||||||
|
point is initialized, and then the stat information is read.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := procfs.NewFS("/proc")
|
||||||
|
stats, err := fs.Stat()
|
||||||
|
```
|
||||||
|
|
||||||
|
Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fs, err := blockdevice.NewFS("/proc", "/sys")
|
||||||
|
stats, err := fs.ProcDiskstats()
|
||||||
|
```
|
||||||
|
|
||||||
|
## Package Organization
|
||||||
|
|
||||||
|
The packages in this project are organized according to (1) whether the data comes from the `/proc` or
|
||||||
|
`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
|
||||||
|
can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
|
||||||
|
is available in the `blockdevices` sub-package.
|
||||||
|
|
||||||
|
## Building and Testing
|
||||||
|
|
||||||
|
The procfs library is intended to be built as part of another application, so there are no distributable binaries.
|
||||||
|
However, most of the API includes unit tests which can be run with `make test`.
|
||||||
|
|
||||||
|
### Updating Test Fixtures
|
||||||
|
|
||||||
|
The procfs library includes a set of test fixtures which include many example files from
|
||||||
|
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||||
|
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||||
|
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
||||||
|
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf fixtures
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
||||||
|
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||||
|
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||||
|
`git diff fixtures.ttar`.
|
||||||
|
85
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ARPEntry contains a single row of the columnar data represented in
|
||||||
|
// /proc/net/arp.
|
||||||
|
type ARPEntry struct {
|
||||||
|
// IP address
|
||||||
|
IPAddr net.IP
|
||||||
|
// MAC address
|
||||||
|
HWAddr net.HardwareAddr
|
||||||
|
// Name of the device
|
||||||
|
Device string
|
||||||
|
}
|
||||||
|
|
||||||
|
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||||
|
// and then return a slice of ARPEntry's.
|
||||||
|
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseARPEntries(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
entries := make([]ARPEntry, 0)
|
||||||
|
var err error
|
||||||
|
const (
|
||||||
|
expectedDataWidth = 6
|
||||||
|
expectedHeaderWidth = 9
|
||||||
|
)
|
||||||
|
for _, line := range lines {
|
||||||
|
columns := strings.Fields(line)
|
||||||
|
width := len(columns)
|
||||||
|
|
||||||
|
if width == expectedHeaderWidth || width == 0 {
|
||||||
|
continue
|
||||||
|
} else if width == expectedDataWidth {
|
||||||
|
entry, err := parseARPEntry(columns)
|
||||||
|
if err != nil {
|
||||||
|
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
} else {
|
||||||
|
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseARPEntry(columns []string) (ARPEntry, error) {
|
||||||
|
ip := net.ParseIP(columns[0])
|
||||||
|
mac := net.HardwareAddr(columns[3])
|
||||||
|
|
||||||
|
entry := ARPEntry{
|
||||||
|
IPAddr: ip,
|
||||||
|
HWAddr: mac,
|
||||||
|
Device: columns[5],
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
16
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
16
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
@ -31,19 +31,9 @@ type BuddyInfo struct {
|
|||||||
Sizes []float64
|
Sizes []float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics.
|
// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
file, err := os.Open(fs.proc.Path("buddyinfo"))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewBuddyInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
|
||||||
file, err := os.Open(fs.Path("buddyinfo"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
167
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
Normal file
167
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
||||||
|
type CPUInfo struct {
|
||||||
|
Processor uint
|
||||||
|
VendorID string
|
||||||
|
CPUFamily string
|
||||||
|
Model string
|
||||||
|
ModelName string
|
||||||
|
Stepping string
|
||||||
|
Microcode string
|
||||||
|
CPUMHz float64
|
||||||
|
CacheSize string
|
||||||
|
PhysicalID string
|
||||||
|
Siblings uint
|
||||||
|
CoreID string
|
||||||
|
CPUCores uint
|
||||||
|
APICID string
|
||||||
|
InitialAPICID string
|
||||||
|
FPU string
|
||||||
|
FPUException string
|
||||||
|
CPUIDLevel uint
|
||||||
|
WP string
|
||||||
|
Flags []string
|
||||||
|
Bugs []string
|
||||||
|
BogoMips float64
|
||||||
|
CLFlushSize uint
|
||||||
|
CacheAlignment uint
|
||||||
|
AddressSizes string
|
||||||
|
PowerManagement string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPUInfo returns information about current system CPUs.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||||
|
data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseCPUInfo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCPUInfo parses data from /proc/cpuinfo
|
||||||
|
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||||
|
cpuinfo := []CPUInfo{}
|
||||||
|
i := -1
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if strings.TrimSpace(line) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
field := strings.SplitN(line, ": ", 2)
|
||||||
|
switch strings.TrimSpace(field[0]) {
|
||||||
|
case "processor":
|
||||||
|
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||||
|
i++
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].Processor = uint(v)
|
||||||
|
case "vendor_id":
|
||||||
|
cpuinfo[i].VendorID = field[1]
|
||||||
|
case "cpu family":
|
||||||
|
cpuinfo[i].CPUFamily = field[1]
|
||||||
|
case "model":
|
||||||
|
cpuinfo[i].Model = field[1]
|
||||||
|
case "model name":
|
||||||
|
cpuinfo[i].ModelName = field[1]
|
||||||
|
case "stepping":
|
||||||
|
cpuinfo[i].Stepping = field[1]
|
||||||
|
case "microcode":
|
||||||
|
cpuinfo[i].Microcode = field[1]
|
||||||
|
case "cpu MHz":
|
||||||
|
v, err := strconv.ParseFloat(field[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUMHz = v
|
||||||
|
case "cache size":
|
||||||
|
cpuinfo[i].CacheSize = field[1]
|
||||||
|
case "physical id":
|
||||||
|
cpuinfo[i].PhysicalID = field[1]
|
||||||
|
case "siblings":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].Siblings = uint(v)
|
||||||
|
case "core id":
|
||||||
|
cpuinfo[i].CoreID = field[1]
|
||||||
|
case "cpu cores":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUCores = uint(v)
|
||||||
|
case "apicid":
|
||||||
|
cpuinfo[i].APICID = field[1]
|
||||||
|
case "initial apicid":
|
||||||
|
cpuinfo[i].InitialAPICID = field[1]
|
||||||
|
case "fpu":
|
||||||
|
cpuinfo[i].FPU = field[1]
|
||||||
|
case "fpu_exception":
|
||||||
|
cpuinfo[i].FPUException = field[1]
|
||||||
|
case "cpuid level":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CPUIDLevel = uint(v)
|
||||||
|
case "wp":
|
||||||
|
cpuinfo[i].WP = field[1]
|
||||||
|
case "flags":
|
||||||
|
cpuinfo[i].Flags = strings.Fields(field[1])
|
||||||
|
case "bugs":
|
||||||
|
cpuinfo[i].Bugs = strings.Fields(field[1])
|
||||||
|
case "bogomips":
|
||||||
|
v, err := strconv.ParseFloat(field[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].BogoMips = v
|
||||||
|
case "clflush size":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CLFlushSize = uint(v)
|
||||||
|
case "cache_alignment":
|
||||||
|
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cpuinfo[i].CacheAlignment = uint(v)
|
||||||
|
case "address sizes":
|
||||||
|
cpuinfo[i].AddressSizes = field[1]
|
||||||
|
case "power management":
|
||||||
|
cpuinfo[i].PowerManagement = field[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cpuinfo, nil
|
||||||
|
|
||||||
|
}
|
131
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
Normal file
131
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Crypto holds info parsed from /proc/crypto.
|
||||||
|
type Crypto struct {
|
||||||
|
Alignmask *uint64
|
||||||
|
Async bool
|
||||||
|
Blocksize *uint64
|
||||||
|
Chunksize *uint64
|
||||||
|
Ctxsize *uint64
|
||||||
|
Digestsize *uint64
|
||||||
|
Driver string
|
||||||
|
Geniv string
|
||||||
|
Internal string
|
||||||
|
Ivsize *uint64
|
||||||
|
Maxauthsize *uint64
|
||||||
|
MaxKeysize *uint64
|
||||||
|
MinKeysize *uint64
|
||||||
|
Module string
|
||||||
|
Name string
|
||||||
|
Priority *int64
|
||||||
|
Refcnt *int64
|
||||||
|
Seedsize *uint64
|
||||||
|
Selftest string
|
||||||
|
Type string
|
||||||
|
Walksize *uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
|
||||||
|
// structs containing the relevant info. More information available here:
|
||||||
|
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||||
|
func (fs FS) Crypto() ([]Crypto, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||||
|
}
|
||||||
|
crypto, err := parseCrypto(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||||
|
}
|
||||||
|
return crypto, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
|
||||||
|
crypto := []Crypto{}
|
||||||
|
|
||||||
|
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
|
||||||
|
|
||||||
|
for _, block := range cryptoBlocks {
|
||||||
|
var newCryptoElem Crypto
|
||||||
|
|
||||||
|
lines := strings.Split(string(block), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if strings.TrimSpace(line) == "" || line[0] == ' ' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fields := strings.Split(line, ":")
|
||||||
|
key := strings.TrimSpace(fields[0])
|
||||||
|
value := strings.TrimSpace(fields[1])
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
|
||||||
|
switch strings.TrimSpace(key) {
|
||||||
|
case "async":
|
||||||
|
b, err := strconv.ParseBool(value)
|
||||||
|
if err == nil {
|
||||||
|
newCryptoElem.Async = b
|
||||||
|
}
|
||||||
|
case "blocksize":
|
||||||
|
newCryptoElem.Blocksize = vp.PUInt64()
|
||||||
|
case "chunksize":
|
||||||
|
newCryptoElem.Chunksize = vp.PUInt64()
|
||||||
|
case "digestsize":
|
||||||
|
newCryptoElem.Digestsize = vp.PUInt64()
|
||||||
|
case "driver":
|
||||||
|
newCryptoElem.Driver = value
|
||||||
|
case "geniv":
|
||||||
|
newCryptoElem.Geniv = value
|
||||||
|
case "internal":
|
||||||
|
newCryptoElem.Internal = value
|
||||||
|
case "ivsize":
|
||||||
|
newCryptoElem.Ivsize = vp.PUInt64()
|
||||||
|
case "maxauthsize":
|
||||||
|
newCryptoElem.Maxauthsize = vp.PUInt64()
|
||||||
|
case "max keysize":
|
||||||
|
newCryptoElem.MaxKeysize = vp.PUInt64()
|
||||||
|
case "min keysize":
|
||||||
|
newCryptoElem.MinKeysize = vp.PUInt64()
|
||||||
|
case "module":
|
||||||
|
newCryptoElem.Module = value
|
||||||
|
case "name":
|
||||||
|
newCryptoElem.Name = value
|
||||||
|
case "priority":
|
||||||
|
newCryptoElem.Priority = vp.PInt64()
|
||||||
|
case "refcnt":
|
||||||
|
newCryptoElem.Refcnt = vp.PInt64()
|
||||||
|
case "seedsize":
|
||||||
|
newCryptoElem.Seedsize = vp.PUInt64()
|
||||||
|
case "selftest":
|
||||||
|
newCryptoElem.Selftest = value
|
||||||
|
case "type":
|
||||||
|
newCryptoElem.Type = value
|
||||||
|
case "walksize":
|
||||||
|
newCryptoElem.Walksize = vp.PUInt64()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
crypto = append(crypto, newCryptoElem)
|
||||||
|
}
|
||||||
|
return crypto, nil
|
||||||
|
}
|
86
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
86
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@ -1,69 +1,43 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/nfs"
|
|
||||||
"github.com/prometheus/procfs/xfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
// FS represents the pseudo-filesystem sys, which provides an interface to
|
||||||
// kernel data structures.
|
// kernel data structures.
|
||||||
type FS string
|
type FS struct {
|
||||||
|
proc fs.FS
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
const DefaultMountPoint = "/proc"
|
const DefaultMountPoint = fs.DefaultProcMountPoint
|
||||||
|
|
||||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||||
// if the mount point can't be read.
|
// It will error if the mount point directory can't be read or is a file.
|
||||||
|
func NewDefaultFS() (FS, error) {
|
||||||
|
return NewFS(DefaultMountPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
|
||||||
|
// if the mount point directory can't be read or is a file.
|
||||||
func NewFS(mountPoint string) (FS, error) {
|
func NewFS(mountPoint string) (FS, error) {
|
||||||
info, err := os.Stat(mountPoint)
|
fs, err := fs.NewFS(mountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
return FS{}, err
|
||||||
}
|
}
|
||||||
if !info.IsDir() {
|
return FS{fs}, nil
|
||||||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
return FS(mountPoint), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the path of the given subsystem relative to the procfs root.
|
|
||||||
func (fs FS) Path(p ...string) string {
|
|
||||||
return path.Join(append([]string{string(fs)}, p...)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
|
||||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
|
||||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return xfs.ParseStats(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NFSdClientRPCStats retrieves NFS daemon RPC statistics.
|
|
||||||
func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) {
|
|
||||||
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return nfs.ParseClientRPCStats(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
|
||||||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
|
||||||
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return nfs.ParseServerRPCStats(f)
|
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
Normal file
8
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
module github.com/prometheus/procfs
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/google/go-cmp v0.3.1
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||||
|
)
|
55
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
Normal file
55
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultProcMountPoint is the common mount point of the proc filesystem.
|
||||||
|
DefaultProcMountPoint = "/proc"
|
||||||
|
|
||||||
|
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||||
|
DefaultSysMountPoint = "/sys"
|
||||||
|
|
||||||
|
// DefaultConfigfsMountPoint is the common mount point of the configfs
|
||||||
|
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||||
|
// interface to kernel data structures.
|
||||||
|
type FS string
|
||||||
|
|
||||||
|
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||||
|
// if the mount point can't be read.
|
||||||
|
func NewFS(mountPoint string) (FS, error) {
|
||||||
|
info, err := os.Stat(mountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
return FS(mountPoint), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path appends the given path elements to the filesystem path, adding separators
|
||||||
|
// as necessary.
|
||||||
|
func (fs FS) Path(p ...string) string {
|
||||||
|
return filepath.Join(append([]string{string(fs)}, p...)...)
|
||||||
|
}
|
44
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
44
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -13,7 +13,11 @@
|
|||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import "strconv"
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||||
@ -44,3 +48,41 @@ func ParseUint64s(ss []string) ([]uint64, error) {
|
|||||||
|
|
||||||
return us, nil
|
return us, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
|
||||||
|
func ParsePInt64s(ss []string) ([]*int64, error) {
|
||||||
|
us := make([]*int64, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
u, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
us = append(us, &u)
|
||||||
|
}
|
||||||
|
|
||||||
|
return us, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
|
data, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBool parses a string into a boolean pointer.
|
||||||
|
func ParseBool(b string) *bool {
|
||||||
|
var truth bool
|
||||||
|
switch b {
|
||||||
|
case "enabled":
|
||||||
|
truth = true
|
||||||
|
case "disabled":
|
||||||
|
truth = false
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &truth
|
||||||
|
}
|
||||||
|
38
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
|
||||||
|
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
|
||||||
|
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
|
||||||
|
// Reads a max file size of 512kB. For files larger than this, a scanner
|
||||||
|
// should be used.
|
||||||
|
func ReadFileNoStat(filename string) ([]byte, error) {
|
||||||
|
const maxBufferSize = 1024 * 512
|
||||||
|
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
reader := io.LimitReader(f, maxBufferSize)
|
||||||
|
return ioutil.ReadAll(reader)
|
||||||
|
}
|
48
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
Normal file
48
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build linux,!appengine
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||||
|
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||||
|
//
|
||||||
|
// Note that this function will not read files larger than 128 bytes.
|
||||||
|
func SysReadFile(file string) (string, error) {
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||||
|
// Go's ioutil.ReadFile implementation to poll forever.
|
||||||
|
//
|
||||||
|
// Since we either want to read data or bail immediately, do the simplest
|
||||||
|
// possible read using syscall directly.
|
||||||
|
const sysFileBufferSize = 128
|
||||||
|
b := make([]byte, sysFileBufferSize)
|
||||||
|
n, err := syscall.Read(int(f.Fd()), b)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(bytes.TrimSpace(b[:n])), nil
|
||||||
|
}
|
26
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build linux,appengine !linux
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SysReadFile is here implemented as a noop for builds that do not support
|
||||||
|
// the read syscall. For example Windows, or Linux on Google App Engine.
|
||||||
|
func SysReadFile(file string) (string, error) {
|
||||||
|
return "", fmt.Errorf("not supported on this platform")
|
||||||
|
}
|
91
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
Normal file
91
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(mdlayher): util packages are an anti-pattern and this should be moved
|
||||||
|
// somewhere else that is more focused in the future.
|
||||||
|
|
||||||
|
// A ValueParser enables parsing a single string into a variety of data types
|
||||||
|
// in a concise and safe way. The Err method must be invoked after invoking
|
||||||
|
// any other methods to ensure a value was successfully parsed.
|
||||||
|
type ValueParser struct {
|
||||||
|
v string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValueParser creates a ValueParser using the input string.
|
||||||
|
func NewValueParser(v string) *ValueParser {
|
||||||
|
return &ValueParser{v: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int interprets the underlying value as an int and returns that value.
|
||||||
|
func (vp *ValueParser) Int() int { return int(vp.int64()) }
|
||||||
|
|
||||||
|
// PInt64 interprets the underlying value as an int64 and returns a pointer to
|
||||||
|
// that value.
|
||||||
|
func (vp *ValueParser) PInt64() *int64 {
|
||||||
|
if vp.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v := vp.int64()
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// int64 interprets the underlying value as an int64 and returns that value.
|
||||||
|
// TODO: export if/when necessary.
|
||||||
|
func (vp *ValueParser) int64() int64 {
|
||||||
|
if vp.err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// A base value of zero makes ParseInt infer the correct base using the
|
||||||
|
// string's prefix, if any.
|
||||||
|
const base = 0
|
||||||
|
v, err := strconv.ParseInt(vp.v, base, 64)
|
||||||
|
if err != nil {
|
||||||
|
vp.err = err
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
|
||||||
|
// that value.
|
||||||
|
func (vp *ValueParser) PUInt64() *uint64 {
|
||||||
|
if vp.err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A base value of zero makes ParseInt infer the correct base using the
|
||||||
|
// string's prefix, if any.
|
||||||
|
const base = 0
|
||||||
|
v, err := strconv.ParseUint(vp.v, base, 64)
|
||||||
|
if err != nil {
|
||||||
|
vp.err = err
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the last error, if any, encountered by the ValueParser.
|
||||||
|
func (vp *ValueParser) Err() error {
|
||||||
|
return vp.err
|
||||||
|
}
|
55
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
55
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -1,7 +1,21 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -11,6 +25,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
|
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
|
||||||
@ -49,29 +65,18 @@ type IPVSBackendStatus struct {
|
|||||||
Weight uint64
|
Weight uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics.
|
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||||
func NewIPVSStats() (IPVSStats, error) {
|
func (fs FS) IPVSStats() (IPVSStats, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.NewIPVSStats()
|
return parseIPVSStats(bytes.NewReader(data))
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
|
||||||
if err != nil {
|
|
||||||
return IPVSStats{}, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
return parseIPVSStats(file)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
|
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
|
||||||
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
func parseIPVSStats(r io.Reader) (IPVSStats, error) {
|
||||||
var (
|
var (
|
||||||
statContent []byte
|
statContent []byte
|
||||||
statLines []string
|
statLines []string
|
||||||
@ -79,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
|||||||
stats IPVSStats
|
stats IPVSStats
|
||||||
)
|
)
|
||||||
|
|
||||||
statContent, err := ioutil.ReadAll(file)
|
statContent, err := ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
}
|
}
|
||||||
@ -118,19 +123,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
|||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
|
// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
file, err := os.Open(fs.proc.Path("net/ip_vs"))
|
||||||
if err != nil {
|
|
||||||
return []IPVSBackendStatus{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewIPVSBackendStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
|
||||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
|
||||||
file, err := os.Open(fs.Path("net/ip_vs"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
156
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
156
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -9,8 +22,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
||||||
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// MDStat holds info parsed from /proc/mdstat.
|
// MDStat holds info parsed from /proc/mdstat.
|
||||||
@ -21,117 +34,160 @@ type MDStat struct {
|
|||||||
ActivityState string
|
ActivityState string
|
||||||
// Number of active disks.
|
// Number of active disks.
|
||||||
DisksActive int64
|
DisksActive int64
|
||||||
// Total number of disks the device consists of.
|
// Total number of disks the device requires.
|
||||||
DisksTotal int64
|
DisksTotal int64
|
||||||
|
// Number of failed disks.
|
||||||
|
DisksFailed int64
|
||||||
|
// Spare disks in the device.
|
||||||
|
DisksSpare int64
|
||||||
// Number of blocks the device holds.
|
// Number of blocks the device holds.
|
||||||
BlocksTotal int64
|
BlocksTotal int64
|
||||||
// Number of blocks on the device that are in sync.
|
// Number of blocks on the device that are in sync.
|
||||||
BlocksSynced int64
|
BlocksSynced int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
|
||||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
// structs containing the relevant info. More information available here:
|
||||||
mdStatusFilePath := fs.Path("mdstat")
|
// https://raid.wiki.kernel.org/index.php/Mdstat
|
||||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
func (fs FS) MDStat() ([]MDStat, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
|
}
|
||||||
|
mdstat, err := parseMDStat(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||||
|
}
|
||||||
|
return mdstat, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStates := []MDStat{}
|
// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
|
||||||
lines := strings.Split(string(content), "\n")
|
// structs containing the relevant info.
|
||||||
for i, l := range lines {
|
func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
||||||
if l == "" {
|
mdStats := []MDStat{}
|
||||||
continue
|
lines := strings.Split(string(mdStatData), "\n")
|
||||||
}
|
|
||||||
if l[0] == ' ' {
|
for i, line := range lines {
|
||||||
continue
|
if strings.TrimSpace(line) == "" || line[0] == ' ' ||
|
||||||
}
|
strings.HasPrefix(line, "Personalities") ||
|
||||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
strings.HasPrefix(line, "unused") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mainLine := strings.Split(l, " ")
|
deviceFields := strings.Fields(line)
|
||||||
if len(mainLine) < 3 {
|
if len(deviceFields) < 3 {
|
||||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
|
||||||
}
|
}
|
||||||
mdName := mainLine[0]
|
mdName := deviceFields[0] // mdx
|
||||||
activityState := mainLine[2]
|
state := deviceFields[2] // active or inactive
|
||||||
|
|
||||||
if len(lines) <= i+3 {
|
if len(lines) <= i+3 {
|
||||||
return mdStates, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"error parsing %s: too few lines for md device %s",
|
"error parsing %s: too few lines for md device",
|
||||||
mdStatusFilePath,
|
|
||||||
mdName,
|
mdName,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, total, size, err := evalStatusline(lines[i+1])
|
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
|
||||||
|
fail := int64(strings.Count(line, "(F)"))
|
||||||
|
spare := int64(strings.Count(line, "(S)"))
|
||||||
|
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, fmt.Errorf("error parsing md device lines: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// j is the line number of the syncing-line.
|
syncLineIdx := i + 2
|
||||||
j := i + 2
|
|
||||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||||
j = i + 3
|
syncLineIdx++
|
||||||
}
|
}
|
||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently
|
// If device is syncing at the moment, get the number of currently
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
syncedBlocks := size
|
||||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
||||||
syncedBlocks, err = evalBuildline(lines[j])
|
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
||||||
|
|
||||||
|
// Append recovery and resyncing state info.
|
||||||
|
if recovering || resyncing {
|
||||||
|
if recovering {
|
||||||
|
state = "recovering"
|
||||||
|
} else {
|
||||||
|
state = "resyncing"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle case when resync=PENDING or resync=DELAYED.
|
||||||
|
if strings.Contains(lines[syncLineIdx], "PENDING") ||
|
||||||
|
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||||
|
syncedBlocks = 0
|
||||||
|
} else {
|
||||||
|
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStates = append(mdStates, MDStat{
|
mdStats = append(mdStats, MDStat{
|
||||||
Name: mdName,
|
Name: mdName,
|
||||||
ActivityState: activityState,
|
ActivityState: state,
|
||||||
DisksActive: active,
|
DisksActive: active,
|
||||||
|
DisksFailed: fail,
|
||||||
|
DisksSpare: spare,
|
||||||
DisksTotal: total,
|
DisksTotal: total,
|
||||||
BlocksTotal: size,
|
BlocksTotal: size,
|
||||||
BlocksSynced: syncedBlocks,
|
BlocksSynced: syncedBlocks,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return mdStates, nil
|
return mdStats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
|
||||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
|
||||||
if len(matches) != 4 {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err = strconv.ParseInt(matches[1], 10, 64)
|
sizeStr := strings.Fields(statusLine)[0]
|
||||||
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||||
|
// In the device deviceLine, only disks have a number associated with them in [].
|
||||||
|
total = int64(strings.Count(deviceLine, "["))
|
||||||
|
return total, total, size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(deviceLine, "inactive") {
|
||||||
|
return 0, 0, size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||||
|
if len(matches) != 4 {
|
||||||
|
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return active, total, size, nil
|
return active, total, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
|
||||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
return syncedBlocks, nil
|
return syncedBlocks, nil
|
||||||
|
277
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
Normal file
277
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Meminfo represents memory statistics.
|
||||||
|
type Meminfo struct {
|
||||||
|
// Total usable ram (i.e. physical ram minus a few reserved
|
||||||
|
// bits and the kernel binary code)
|
||||||
|
MemTotal uint64
|
||||||
|
// The sum of LowFree+HighFree
|
||||||
|
MemFree uint64
|
||||||
|
// An estimate of how much memory is available for starting
|
||||||
|
// new applications, without swapping. Calculated from
|
||||||
|
// MemFree, SReclaimable, the size of the file LRU lists, and
|
||||||
|
// the low watermarks in each zone. The estimate takes into
|
||||||
|
// account that the system needs some page cache to function
|
||||||
|
// well, and that not all reclaimable slab will be
|
||||||
|
// reclaimable, due to items being in use. The impact of those
|
||||||
|
// factors will vary from system to system.
|
||||||
|
MemAvailable uint64
|
||||||
|
// Relatively temporary storage for raw disk blocks shouldn't
|
||||||
|
// get tremendously large (20MB or so)
|
||||||
|
Buffers uint64
|
||||||
|
Cached uint64
|
||||||
|
// Memory that once was swapped out, is swapped back in but
|
||||||
|
// still also is in the swapfile (if memory is needed it
|
||||||
|
// doesn't need to be swapped out AGAIN because it is already
|
||||||
|
// in the swapfile. This saves I/O)
|
||||||
|
SwapCached uint64
|
||||||
|
// Memory that has been used more recently and usually not
|
||||||
|
// reclaimed unless absolutely necessary.
|
||||||
|
Active uint64
|
||||||
|
// Memory which has been less recently used. It is more
|
||||||
|
// eligible to be reclaimed for other purposes
|
||||||
|
Inactive uint64
|
||||||
|
ActiveAnon uint64
|
||||||
|
InactiveAnon uint64
|
||||||
|
ActiveFile uint64
|
||||||
|
InactiveFile uint64
|
||||||
|
Unevictable uint64
|
||||||
|
Mlocked uint64
|
||||||
|
// total amount of swap space available
|
||||||
|
SwapTotal uint64
|
||||||
|
// Memory which has been evicted from RAM, and is temporarily
|
||||||
|
// on the disk
|
||||||
|
SwapFree uint64
|
||||||
|
// Memory which is waiting to get written back to the disk
|
||||||
|
Dirty uint64
|
||||||
|
// Memory which is actively being written back to the disk
|
||||||
|
Writeback uint64
|
||||||
|
// Non-file backed pages mapped into userspace page tables
|
||||||
|
AnonPages uint64
|
||||||
|
// files which have been mapped, such as libraries
|
||||||
|
Mapped uint64
|
||||||
|
Shmem uint64
|
||||||
|
// in-kernel data structures cache
|
||||||
|
Slab uint64
|
||||||
|
// Part of Slab, that might be reclaimed, such as caches
|
||||||
|
SReclaimable uint64
|
||||||
|
// Part of Slab, that cannot be reclaimed on memory pressure
|
||||||
|
SUnreclaim uint64
|
||||||
|
KernelStack uint64
|
||||||
|
// amount of memory dedicated to the lowest level of page
|
||||||
|
// tables.
|
||||||
|
PageTables uint64
|
||||||
|
// NFS pages sent to the server, but not yet committed to
|
||||||
|
// stable storage
|
||||||
|
NFSUnstable uint64
|
||||||
|
// Memory used for block device "bounce buffers"
|
||||||
|
Bounce uint64
|
||||||
|
// Memory used by FUSE for temporary writeback buffers
|
||||||
|
WritebackTmp uint64
|
||||||
|
// Based on the overcommit ratio ('vm.overcommit_ratio'),
|
||||||
|
// this is the total amount of memory currently available to
|
||||||
|
// be allocated on the system. This limit is only adhered to
|
||||||
|
// if strict overcommit accounting is enabled (mode 2 in
|
||||||
|
// 'vm.overcommit_memory').
|
||||||
|
// The CommitLimit is calculated with the following formula:
|
||||||
|
// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
|
||||||
|
// overcommit_ratio / 100 + [total swap pages]
|
||||||
|
// For example, on a system with 1G of physical RAM and 7G
|
||||||
|
// of swap with a `vm.overcommit_ratio` of 30 it would
|
||||||
|
// yield a CommitLimit of 7.3G.
|
||||||
|
// For more details, see the memory overcommit documentation
|
||||||
|
// in vm/overcommit-accounting.
|
||||||
|
CommitLimit uint64
|
||||||
|
// The amount of memory presently allocated on the system.
|
||||||
|
// The committed memory is a sum of all of the memory which
|
||||||
|
// has been allocated by processes, even if it has not been
|
||||||
|
// "used" by them as of yet. A process which malloc()'s 1G
|
||||||
|
// of memory, but only touches 300M of it will show up as
|
||||||
|
// using 1G. This 1G is memory which has been "committed" to
|
||||||
|
// by the VM and can be used at any time by the allocating
|
||||||
|
// application. With strict overcommit enabled on the system
|
||||||
|
// (mode 2 in 'vm.overcommit_memory'),allocations which would
|
||||||
|
// exceed the CommitLimit (detailed above) will not be permitted.
|
||||||
|
// This is useful if one needs to guarantee that processes will
|
||||||
|
// not fail due to lack of memory once that memory has been
|
||||||
|
// successfully allocated.
|
||||||
|
CommittedAS uint64
|
||||||
|
// total size of vmalloc memory area
|
||||||
|
VmallocTotal uint64
|
||||||
|
// amount of vmalloc area which is used
|
||||||
|
VmallocUsed uint64
|
||||||
|
// largest contiguous block of vmalloc area which is free
|
||||||
|
VmallocChunk uint64
|
||||||
|
HardwareCorrupted uint64
|
||||||
|
AnonHugePages uint64
|
||||||
|
ShmemHugePages uint64
|
||||||
|
ShmemPmdMapped uint64
|
||||||
|
CmaTotal uint64
|
||||||
|
CmaFree uint64
|
||||||
|
HugePagesTotal uint64
|
||||||
|
HugePagesFree uint64
|
||||||
|
HugePagesRsvd uint64
|
||||||
|
HugePagesSurp uint64
|
||||||
|
Hugepagesize uint64
|
||||||
|
DirectMap4k uint64
|
||||||
|
DirectMap2M uint64
|
||||||
|
DirectMap1G uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meminfo returns an information about current kernel/system memory statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) Meminfo() (Meminfo, error) {
|
||||||
|
b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
|
||||||
|
if err != nil {
|
||||||
|
return Meminfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := parseMemInfo(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return *m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
||||||
|
var m Meminfo
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
// Each line has at least a name and value; we ignore the unit.
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := strconv.ParseUint(fields[1], 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fields[0] {
|
||||||
|
case "MemTotal:":
|
||||||
|
m.MemTotal = v
|
||||||
|
case "MemFree:":
|
||||||
|
m.MemFree = v
|
||||||
|
case "MemAvailable:":
|
||||||
|
m.MemAvailable = v
|
||||||
|
case "Buffers:":
|
||||||
|
m.Buffers = v
|
||||||
|
case "Cached:":
|
||||||
|
m.Cached = v
|
||||||
|
case "SwapCached:":
|
||||||
|
m.SwapCached = v
|
||||||
|
case "Active:":
|
||||||
|
m.Active = v
|
||||||
|
case "Inactive:":
|
||||||
|
m.Inactive = v
|
||||||
|
case "Active(anon):":
|
||||||
|
m.ActiveAnon = v
|
||||||
|
case "Inactive(anon):":
|
||||||
|
m.InactiveAnon = v
|
||||||
|
case "Active(file):":
|
||||||
|
m.ActiveFile = v
|
||||||
|
case "Inactive(file):":
|
||||||
|
m.InactiveFile = v
|
||||||
|
case "Unevictable:":
|
||||||
|
m.Unevictable = v
|
||||||
|
case "Mlocked:":
|
||||||
|
m.Mlocked = v
|
||||||
|
case "SwapTotal:":
|
||||||
|
m.SwapTotal = v
|
||||||
|
case "SwapFree:":
|
||||||
|
m.SwapFree = v
|
||||||
|
case "Dirty:":
|
||||||
|
m.Dirty = v
|
||||||
|
case "Writeback:":
|
||||||
|
m.Writeback = v
|
||||||
|
case "AnonPages:":
|
||||||
|
m.AnonPages = v
|
||||||
|
case "Mapped:":
|
||||||
|
m.Mapped = v
|
||||||
|
case "Shmem:":
|
||||||
|
m.Shmem = v
|
||||||
|
case "Slab:":
|
||||||
|
m.Slab = v
|
||||||
|
case "SReclaimable:":
|
||||||
|
m.SReclaimable = v
|
||||||
|
case "SUnreclaim:":
|
||||||
|
m.SUnreclaim = v
|
||||||
|
case "KernelStack:":
|
||||||
|
m.KernelStack = v
|
||||||
|
case "PageTables:":
|
||||||
|
m.PageTables = v
|
||||||
|
case "NFS_Unstable:":
|
||||||
|
m.NFSUnstable = v
|
||||||
|
case "Bounce:":
|
||||||
|
m.Bounce = v
|
||||||
|
case "WritebackTmp:":
|
||||||
|
m.WritebackTmp = v
|
||||||
|
case "CommitLimit:":
|
||||||
|
m.CommitLimit = v
|
||||||
|
case "Committed_AS:":
|
||||||
|
m.CommittedAS = v
|
||||||
|
case "VmallocTotal:":
|
||||||
|
m.VmallocTotal = v
|
||||||
|
case "VmallocUsed:":
|
||||||
|
m.VmallocUsed = v
|
||||||
|
case "VmallocChunk:":
|
||||||
|
m.VmallocChunk = v
|
||||||
|
case "HardwareCorrupted:":
|
||||||
|
m.HardwareCorrupted = v
|
||||||
|
case "AnonHugePages:":
|
||||||
|
m.AnonHugePages = v
|
||||||
|
case "ShmemHugePages:":
|
||||||
|
m.ShmemHugePages = v
|
||||||
|
case "ShmemPmdMapped:":
|
||||||
|
m.ShmemPmdMapped = v
|
||||||
|
case "CmaTotal:":
|
||||||
|
m.CmaTotal = v
|
||||||
|
case "CmaFree:":
|
||||||
|
m.CmaFree = v
|
||||||
|
case "HugePages_Total:":
|
||||||
|
m.HugePagesTotal = v
|
||||||
|
case "HugePages_Free:":
|
||||||
|
m.HugePagesFree = v
|
||||||
|
case "HugePages_Rsvd:":
|
||||||
|
m.HugePagesRsvd = v
|
||||||
|
case "HugePages_Surp:":
|
||||||
|
m.HugePagesSurp = v
|
||||||
|
case "Hugepagesize:":
|
||||||
|
m.Hugepagesize = v
|
||||||
|
case "DirectMap4k:":
|
||||||
|
m.DirectMap4k = v
|
||||||
|
case "DirectMap2M:":
|
||||||
|
m.DirectMap2M = v
|
||||||
|
case "DirectMap1G:":
|
||||||
|
m.DirectMap1G = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &m, nil
|
||||||
|
}
|
180
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
Normal file
180
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A MountInfo is a type that describes the details, options
|
||||||
|
// for each mount, parsed from /proc/self/mountinfo.
|
||||||
|
// The fields described in each entry of /proc/self/mountinfo
|
||||||
|
// is described in the following man page.
|
||||||
|
// http://man7.org/linux/man-pages/man5/proc.5.html
|
||||||
|
type MountInfo struct {
|
||||||
|
// Unique Id for the mount
|
||||||
|
MountId int
|
||||||
|
// The Id of the parent mount
|
||||||
|
ParentId int
|
||||||
|
// The value of `st_dev` for the files on this FS
|
||||||
|
MajorMinorVer string
|
||||||
|
// The pathname of the directory in the FS that forms
|
||||||
|
// the root for this mount
|
||||||
|
Root string
|
||||||
|
// The pathname of the mount point relative to the root
|
||||||
|
MountPoint string
|
||||||
|
// Mount options
|
||||||
|
Options map[string]string
|
||||||
|
// Zero or more optional fields
|
||||||
|
OptionalFields map[string]string
|
||||||
|
// The Filesystem type
|
||||||
|
FSType string
|
||||||
|
// FS specific information or "none"
|
||||||
|
Source string
|
||||||
|
// Superblock options
|
||||||
|
SuperOptions map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
|
||||||
|
func parseMountInfo(info []byte) ([]*MountInfo, error) {
|
||||||
|
mounts := []*MountInfo{}
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||||
|
for scanner.Scan() {
|
||||||
|
mountString := scanner.Text()
|
||||||
|
parsedMounts, err := parseMountInfoString(mountString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mounts = append(mounts, parsedMounts)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := scanner.Err()
|
||||||
|
return mounts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses a mountinfo file line, and converts it to a MountInfo struct.
|
||||||
|
// An important check here is to see if the hyphen separator, as if it does not exist,
|
||||||
|
// it means that the line is malformed.
|
||||||
|
func parseMountInfoString(mountString string) (*MountInfo, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
mountInfo := strings.Split(mountString, " ")
|
||||||
|
mountInfoLength := len(mountInfo)
|
||||||
|
if mountInfoLength < 11 {
|
||||||
|
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mountInfo[mountInfoLength-4] != "-" {
|
||||||
|
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
|
||||||
|
}
|
||||||
|
|
||||||
|
mount := &MountInfo{
|
||||||
|
MajorMinorVer: mountInfo[2],
|
||||||
|
Root: mountInfo[3],
|
||||||
|
MountPoint: mountInfo[4],
|
||||||
|
Options: mountOptionsParser(mountInfo[5]),
|
||||||
|
OptionalFields: nil,
|
||||||
|
FSType: mountInfo[mountInfoLength-3],
|
||||||
|
Source: mountInfo[mountInfoLength-2],
|
||||||
|
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
|
||||||
|
}
|
||||||
|
|
||||||
|
mount.MountId, err = strconv.Atoi(mountInfo[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse mount ID")
|
||||||
|
}
|
||||||
|
mount.ParentId, err = strconv.Atoi(mountInfo[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse parent ID")
|
||||||
|
}
|
||||||
|
// Has optional fields, which is a space separated list of values.
|
||||||
|
// Example: shared:2 master:7
|
||||||
|
if mountInfo[6] != "" {
|
||||||
|
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
|
||||||
|
func mountOptionsIsValidField(s string) bool {
|
||||||
|
switch s {
|
||||||
|
case
|
||||||
|
"shared",
|
||||||
|
"master",
|
||||||
|
"propagate_from",
|
||||||
|
"unbindable":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
|
||||||
|
func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
|
||||||
|
optionalFields := make(map[string]string)
|
||||||
|
for _, field := range o {
|
||||||
|
optionSplit := strings.SplitN(field, ":", 2)
|
||||||
|
value := ""
|
||||||
|
if len(optionSplit) == 2 {
|
||||||
|
value = optionSplit[1]
|
||||||
|
}
|
||||||
|
if mountOptionsIsValidField(optionSplit[0]) {
|
||||||
|
optionalFields[optionSplit[0]] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return optionalFields, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses the mount options, superblock options.
|
||||||
|
func mountOptionsParser(mountOptions string) map[string]string {
|
||||||
|
opts := make(map[string]string)
|
||||||
|
options := strings.Split(mountOptions, ",")
|
||||||
|
for _, opt := range options {
|
||||||
|
splitOption := strings.Split(opt, "=")
|
||||||
|
if len(splitOption) < 2 {
|
||||||
|
key := splitOption[0]
|
||||||
|
opts[key] = ""
|
||||||
|
} else {
|
||||||
|
key, value := splitOption[0], splitOption[1]
|
||||||
|
opts[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||||
|
func GetMounts() ([]*MountInfo, error) {
|
||||||
|
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseMountInfo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||||
|
func GetProcMounts(pid int) ([]*MountInfo, error) {
|
||||||
|
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseMountInfo(data)
|
||||||
|
}
|
95
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
95
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||||
@ -26,8 +39,11 @@ const (
|
|||||||
statVersion10 = "1.0"
|
statVersion10 = "1.0"
|
||||||
statVersion11 = "1.1"
|
statVersion11 = "1.1"
|
||||||
|
|
||||||
fieldTransport10Len = 10
|
fieldTransport10TCPLen = 10
|
||||||
fieldTransport11Len = 13
|
fieldTransport10UDPLen = 7
|
||||||
|
|
||||||
|
fieldTransport11TCPLen = 13
|
||||||
|
fieldTransport11UDPLen = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||||
@ -53,6 +69,8 @@ type MountStats interface {
|
|||||||
type MountStatsNFS struct {
|
type MountStatsNFS struct {
|
||||||
// The version of statistics provided.
|
// The version of statistics provided.
|
||||||
StatVersion string
|
StatVersion string
|
||||||
|
// The mount options of the NFS mount.
|
||||||
|
Opts map[string]string
|
||||||
// The age of the NFS mount.
|
// The age of the NFS mount.
|
||||||
Age time.Duration
|
Age time.Duration
|
||||||
// Statistics related to byte counters for various operations.
|
// Statistics related to byte counters for various operations.
|
||||||
@ -163,16 +181,18 @@ type NFSOperationStats struct {
|
|||||||
// Number of bytes received for this operation, including RPC headers and payload.
|
// Number of bytes received for this operation, including RPC headers and payload.
|
||||||
BytesReceived uint64
|
BytesReceived uint64
|
||||||
// Duration all requests spent queued for transmission before they were sent.
|
// Duration all requests spent queued for transmission before they were sent.
|
||||||
CumulativeQueueTime time.Duration
|
CumulativeQueueMilliseconds uint64
|
||||||
// Duration it took to get a reply back after the request was transmitted.
|
// Duration it took to get a reply back after the request was transmitted.
|
||||||
CumulativeTotalResponseTime time.Duration
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestTime time.Duration
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||||
// responses.
|
// responses.
|
||||||
type NFSTransportStats struct {
|
type NFSTransportStats struct {
|
||||||
|
// The transport protocol used for the NFS mount.
|
||||||
|
Protocol string
|
||||||
// The local port used for the NFS mount.
|
// The local port used for the NFS mount.
|
||||||
Port uint64
|
Port uint64
|
||||||
// Number of times the client has had to establish a connection from scratch
|
// Number of times the client has had to establish a connection from scratch
|
||||||
@ -184,7 +204,7 @@ type NFSTransportStats struct {
|
|||||||
// spent waiting for connections to the server to be established.
|
// spent waiting for connections to the server to be established.
|
||||||
ConnectIdleTime uint64
|
ConnectIdleTime uint64
|
||||||
// Duration since the NFS mount last saw any RPC traffic.
|
// Duration since the NFS mount last saw any RPC traffic.
|
||||||
IdleTime time.Duration
|
IdleTimeSeconds uint64
|
||||||
// Number of RPC requests for this mount sent to the NFS server.
|
// Number of RPC requests for this mount sent to the NFS server.
|
||||||
Sends uint64
|
Sends uint64
|
||||||
// Number of RPC responses for this mount received from the NFS server.
|
// Number of RPC responses for this mount received from the NFS server.
|
||||||
@ -299,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) {
|
|||||||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||||
// Field indicators for parsing specific types of data
|
// Field indicators for parsing specific types of data
|
||||||
const (
|
const (
|
||||||
|
fieldOpts = "opts:"
|
||||||
fieldAge = "age:"
|
fieldAge = "age:"
|
||||||
fieldBytes = "bytes:"
|
fieldBytes = "bytes:"
|
||||||
fieldEvents = "events:"
|
fieldEvents = "events:"
|
||||||
@ -320,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch ss[0] {
|
switch ss[0] {
|
||||||
|
case fieldOpts:
|
||||||
|
if stats.Opts == nil {
|
||||||
|
stats.Opts = map[string]string{}
|
||||||
|
}
|
||||||
|
for _, opt := range strings.Split(ss[1], ",") {
|
||||||
|
split := strings.Split(opt, "=")
|
||||||
|
if len(split) == 2 {
|
||||||
|
stats.Opts[split[0]] = split[1]
|
||||||
|
} else {
|
||||||
|
stats.Opts[opt] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
case fieldAge:
|
case fieldAge:
|
||||||
// Age integer is in seconds
|
// Age integer is in seconds
|
||||||
d, err := time.ParseDuration(ss[1] + "s")
|
d, err := time.ParseDuration(ss[1] + "s")
|
||||||
@ -347,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
|||||||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||||
}
|
}
|
||||||
|
|
||||||
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -497,9 +530,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
MajorTimeouts: ns[2],
|
MajorTimeouts: ns[2],
|
||||||
BytesSent: ns[3],
|
BytesSent: ns[3],
|
||||||
BytesReceived: ns[4],
|
BytesReceived: ns[4],
|
||||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
CumulativeQueueMilliseconds: ns[5],
|
||||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -509,13 +542,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||||
// integer fields matched to a specific stats version.
|
// integer fields matched to a specific stats version.
|
||||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||||
|
// Extract the protocol field. It is the only string value in the line
|
||||||
|
protocol := ss[0]
|
||||||
|
ss = ss[1:]
|
||||||
|
|
||||||
switch statVersion {
|
switch statVersion {
|
||||||
case statVersion10:
|
case statVersion10:
|
||||||
if len(ss) != fieldTransport10Len {
|
var expectedLength int
|
||||||
|
if protocol == "tcp" {
|
||||||
|
expectedLength = fieldTransport10TCPLen
|
||||||
|
} else if protocol == "udp" {
|
||||||
|
expectedLength = fieldTransport10UDPLen
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
|
||||||
|
}
|
||||||
|
if len(ss) != expectedLength {
|
||||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||||
}
|
}
|
||||||
case statVersion11:
|
case statVersion11:
|
||||||
if len(ss) != fieldTransport11Len {
|
var expectedLength int
|
||||||
|
if protocol == "tcp" {
|
||||||
|
expectedLength = fieldTransport11TCPLen
|
||||||
|
} else if protocol == "udp" {
|
||||||
|
expectedLength = fieldTransport11UDPLen
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
|
||||||
|
}
|
||||||
|
if len(ss) != expectedLength {
|
||||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -523,12 +576,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||||
// in a v1.0 response.
|
// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
|
||||||
|
// the TCP length here.
|
||||||
//
|
//
|
||||||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||||
// only v1.0 stats are present.
|
// only v1.0 stats are present.
|
||||||
// See: https://github.com/prometheus/node_exporter/issues/571.
|
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||||
ns := make([]uint64, fieldTransport11Len)
|
ns := make([]uint64, fieldTransport11TCPLen)
|
||||||
for i, s := range ss {
|
for i, s := range ss {
|
||||||
n, err := strconv.ParseUint(s, 10, 64)
|
n, err := strconv.ParseUint(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -538,12 +592,23 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||||||
ns[i] = n
|
ns[i] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The fields differ depending on the transport protocol (TCP or UDP)
|
||||||
|
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
|
||||||
|
//
|
||||||
|
// For the udp RPC transport there is no connection count, connect idle time,
|
||||||
|
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||||
|
// we set them to 0 here.
|
||||||
|
if protocol == "udp" {
|
||||||
|
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||||
|
}
|
||||||
|
|
||||||
return &NFSTransportStats{
|
return &NFSTransportStats{
|
||||||
|
Protocol: protocol,
|
||||||
Port: ns[0],
|
Port: ns[0],
|
||||||
Bind: ns[1],
|
Bind: ns[1],
|
||||||
Connect: ns[2],
|
Connect: ns[2],
|
||||||
ConnectIdleTime: ns[3],
|
ConnectIdleTime: ns[3],
|
||||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
IdleTimeSeconds: ns[4],
|
||||||
Sends: ns[5],
|
Sends: ns[5],
|
||||||
Receives: ns[6],
|
Receives: ns[6],
|
||||||
BadTransactionIDs: ns[7],
|
BadTransactionIDs: ns[7],
|
||||||
|
54
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
54
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -34,23 +47,13 @@ type NetDevLine struct {
|
|||||||
// are interface names.
|
// are interface names.
|
||||||
type NetDev map[string]NetDevLine
|
type NetDev map[string]NetDevLine
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
// NetDev returns kernel/system statistics read from /proc/net/dev.
|
||||||
func NewNetDev() (NetDev, error) {
|
func (fs FS) NetDev() (NetDev, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
return newNetDev(fs.proc.Path("net/dev"))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.NewNetDev()
|
// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||||
}
|
func (p Proc) NetDev() (NetDev, error) {
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
|
||||||
func (fs FS) NewNetDev() (NetDev, error) {
|
|
||||||
return newNetDev(fs.Path("net/dev"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
|
||||||
func (p Proc) NewNetDev() (NetDev, error) {
|
|
||||||
return newNetDev(p.path("net/dev"))
|
return newNetDev(p.path("net/dev"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +65,7 @@ func newNetDev(file string) (NetDev, error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
nd := NetDev{}
|
netDev := NetDev{}
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for n := 0; s.Scan(); n++ {
|
for n := 0; s.Scan(); n++ {
|
||||||
// Skip the 2 header lines.
|
// Skip the 2 header lines.
|
||||||
@ -70,20 +73,20 @@ func newNetDev(file string) (NetDev, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
line, err := nd.parseLine(s.Text())
|
line, err := netDev.parseLine(s.Text())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nd, err
|
return netDev, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nd[line.Name] = *line
|
netDev[line.Name] = *line
|
||||||
}
|
}
|
||||||
|
|
||||||
return nd, s.Err()
|
return netDev, s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||||
// must be filtered prior to calling this method.
|
// must be filtered prior to calling this method.
|
||||||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
parts := strings.SplitN(rawLine, ":", 2)
|
parts := strings.SplitN(rawLine, ":", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, errors.New("invalid net/dev line, missing colon")
|
return nil, errors.New("invalid net/dev line, missing colon")
|
||||||
@ -171,16 +174,15 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||||
// The Name field will be a sorted comma seperated list of interface names.
|
// The Name field will be a sorted comma separated list of interface names.
|
||||||
func (nd NetDev) Total() NetDevLine {
|
func (netDev NetDev) Total() NetDevLine {
|
||||||
total := NetDevLine{}
|
total := NetDevLine{}
|
||||||
|
|
||||||
names := make([]string, 0, len(nd))
|
names := make([]string, 0, len(netDev))
|
||||||
for _, ifc := range nd {
|
for _, ifc := range netDev {
|
||||||
names = append(names, ifc.Name)
|
names = append(names, ifc.Name)
|
||||||
total.RxBytes += ifc.RxBytes
|
total.RxBytes += ifc.RxBytes
|
||||||
total.RxPackets += ifc.RxPackets
|
total.RxPackets += ifc.RxPackets
|
||||||
total.RxPackets += ifc.RxPackets
|
|
||||||
total.RxErrors += ifc.RxErrors
|
total.RxErrors += ifc.RxErrors
|
||||||
total.RxDropped += ifc.RxDropped
|
total.RxDropped += ifc.RxDropped
|
||||||
total.RxFIFO += ifc.RxFIFO
|
total.RxFIFO += ifc.RxFIFO
|
||||||
|
163
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
Normal file
163
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
|
||||||
|
// respectively.
|
||||||
|
type NetSockstat struct {
|
||||||
|
// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
|
||||||
|
Used *int
|
||||||
|
Protocols []NetSockstatProtocol
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NetSockstatProtocol contains statistics about a given socket protocol.
|
||||||
|
// Pointer fields indicate that the value may or may not be present on any
|
||||||
|
// given protocol.
|
||||||
|
type NetSockstatProtocol struct {
|
||||||
|
Protocol string
|
||||||
|
InUse int
|
||||||
|
Orphan *int
|
||||||
|
TW *int
|
||||||
|
Alloc *int
|
||||||
|
Mem *int
|
||||||
|
Memory *int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetSockstat retrieves IPv4 socket statistics.
|
||||||
|
func (fs FS) NetSockstat() (*NetSockstat, error) {
|
||||||
|
return readSockstat(fs.proc.Path("net", "sockstat"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetSockstat6 retrieves IPv6 socket statistics.
|
||||||
|
//
|
||||||
|
// If IPv6 is disabled on this kernel, the returned error can be checked with
|
||||||
|
// os.IsNotExist.
|
||||||
|
func (fs FS) NetSockstat6() (*NetSockstat, error) {
|
||||||
|
return readSockstat(fs.proc.Path("net", "sockstat6"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// readSockstat opens and parses a NetSockstat from the input file.
|
||||||
|
func readSockstat(name string) (*NetSockstat, error) {
|
||||||
|
// This file is small and can be read with one syscall.
|
||||||
|
b, err := util.ReadFileNoStat(name)
|
||||||
|
if err != nil {
|
||||||
|
// Do not wrap this error so the caller can detect os.IsNotExist and
|
||||||
|
// similar conditions.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := parseSockstat(bytes.NewReader(b))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stat, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
|
||||||
|
func parseSockstat(r io.Reader) (*NetSockstat, error) {
|
||||||
|
var stat NetSockstat
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
// Expect a minimum of a protocol and one key/value pair.
|
||||||
|
fields := strings.Split(s.Text(), " ")
|
||||||
|
if len(fields) < 3 {
|
||||||
|
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
|
||||||
|
}
|
||||||
|
|
||||||
|
// The remaining fields are key/value pairs.
|
||||||
|
kvs, err := parseSockstatKVs(fields[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The first field is the protocol. We must trim its colon suffix.
|
||||||
|
proto := strings.TrimSuffix(fields[0], ":")
|
||||||
|
switch proto {
|
||||||
|
case "sockets":
|
||||||
|
// Special case: IPv4 has a sockets "used" key/value pair that we
|
||||||
|
// embed at the top level of the structure.
|
||||||
|
used := kvs["used"]
|
||||||
|
stat.Used = &used
|
||||||
|
default:
|
||||||
|
// Parse all other lines as individual protocols.
|
||||||
|
nsp := parseSockstatProtocol(kvs)
|
||||||
|
nsp.Protocol = proto
|
||||||
|
stat.Protocols = append(stat.Protocols, nsp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &stat, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSockstatKVs parses a string slice into a map of key/value pairs.
|
||||||
|
func parseSockstatKVs(kvs []string) (map[string]int, error) {
|
||||||
|
if len(kvs)%2 != 0 {
|
||||||
|
return nil, errors.New("odd number of fields in key/value pairs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate two values at a time to gather key/value pairs.
|
||||||
|
out := make(map[string]int, len(kvs)/2)
|
||||||
|
for i := 0; i < len(kvs); i += 2 {
|
||||||
|
vp := util.NewValueParser(kvs[i+1])
|
||||||
|
out[kvs[i]] = vp.Int()
|
||||||
|
|
||||||
|
if err := vp.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
|
||||||
|
func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
|
||||||
|
var nsp NetSockstatProtocol
|
||||||
|
for k, v := range kvs {
|
||||||
|
// Capture the range variable to ensure we get unique pointers for
|
||||||
|
// each of the optional fields.
|
||||||
|
v := v
|
||||||
|
switch k {
|
||||||
|
case "inuse":
|
||||||
|
nsp.InUse = v
|
||||||
|
case "orphan":
|
||||||
|
nsp.Orphan = &v
|
||||||
|
case "tw":
|
||||||
|
nsp.TW = &v
|
||||||
|
case "alloc":
|
||||||
|
nsp.Alloc = &v
|
||||||
|
case "mem":
|
||||||
|
nsp.Mem = &v
|
||||||
|
case "memory":
|
||||||
|
nsp.Memory = &v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nsp
|
||||||
|
}
|
91
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
Normal file
91
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For the proc file format details,
|
||||||
|
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||||
|
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||||
|
|
||||||
|
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
|
||||||
|
type SoftnetEntry struct {
|
||||||
|
// Number of processed packets
|
||||||
|
Processed uint
|
||||||
|
// Number of dropped packets
|
||||||
|
Dropped uint
|
||||||
|
// Number of times processing packets ran out of quota
|
||||||
|
TimeSqueezed uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
|
||||||
|
// and then return a slice of SoftnetEntry's.
|
||||||
|
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parseSoftnetEntries(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
entries := make([]SoftnetEntry, 0)
|
||||||
|
var err error
|
||||||
|
const (
|
||||||
|
expectedColumns = 11
|
||||||
|
)
|
||||||
|
for _, line := range lines {
|
||||||
|
columns := strings.Fields(line)
|
||||||
|
width := len(columns)
|
||||||
|
if width == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if width != expectedColumns {
|
||||||
|
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
|
||||||
|
}
|
||||||
|
var entry SoftnetEntry
|
||||||
|
if entry, err = parseSoftnetEntry(columns); err != nil {
|
||||||
|
return []SoftnetEntry{}, err
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
|
||||||
|
var err error
|
||||||
|
var processed, dropped, timeSqueezed uint64
|
||||||
|
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
|
||||||
|
}
|
||||||
|
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
|
||||||
|
}
|
||||||
|
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
|
||||||
|
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
|
||||||
|
}
|
||||||
|
return SoftnetEntry{
|
||||||
|
Processed: uint(processed),
|
||||||
|
Dropped: uint(dropped),
|
||||||
|
TimeSqueezed: uint(timeSqueezed),
|
||||||
|
}, nil
|
||||||
|
}
|
271
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
Normal file
271
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// For the proc file format details,
|
||||||
|
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||||
|
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixKernelPtrIdx = iota
|
||||||
|
netUnixRefCountIdx
|
||||||
|
_
|
||||||
|
netUnixFlagsIdx
|
||||||
|
netUnixTypeIdx
|
||||||
|
netUnixStateIdx
|
||||||
|
netUnixInodeIdx
|
||||||
|
|
||||||
|
// Inode and Path are optional.
|
||||||
|
netUnixStaticFieldsCnt = 6
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
netUnixTypeStream = 1
|
||||||
|
netUnixTypeDgram = 2
|
||||||
|
netUnixTypeSeqpacket = 5
|
||||||
|
|
||||||
|
netUnixFlagListen = 1 << 16
|
||||||
|
|
||||||
|
netUnixStateUnconnected = 1
|
||||||
|
netUnixStateConnecting = 2
|
||||||
|
netUnixStateConnected = 3
|
||||||
|
netUnixStateDisconnected = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
|
||||||
|
|
||||||
|
// NetUnixType is the type of the type field.
|
||||||
|
type NetUnixType uint64
|
||||||
|
|
||||||
|
// NetUnixFlags is the type of the flags field.
|
||||||
|
type NetUnixFlags uint64
|
||||||
|
|
||||||
|
// NetUnixState is the type of the state field.
|
||||||
|
type NetUnixState uint64
|
||||||
|
|
||||||
|
// NetUnixLine represents a line of /proc/net/unix.
|
||||||
|
type NetUnixLine struct {
|
||||||
|
KernelPtr string
|
||||||
|
RefCount uint64
|
||||||
|
Protocol uint64
|
||||||
|
Flags NetUnixFlags
|
||||||
|
Type NetUnixType
|
||||||
|
State NetUnixState
|
||||||
|
Inode uint64
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetUnix holds the data read from /proc/net/unix.
|
||||||
|
type NetUnix struct {
|
||||||
|
Rows []*NetUnixLine
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func NewNetUnix() (*NetUnix, error) {
|
||||||
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.NewNetUnix()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnix returns data read from /proc/net/unix.
|
||||||
|
func (fs FS) NewNetUnix() (*NetUnix, error) {
|
||||||
|
return NewNetUnixByPath(fs.proc.Path("net/unix"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByPath(path string) (*NetUnix, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
return NewNetUnixByReader(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||||
|
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||||
|
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
|
||||||
|
nu := &NetUnix{
|
||||||
|
Rows: make([]*NetUnixLine, 0, 32),
|
||||||
|
}
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
// Omit the header line.
|
||||||
|
scanner.Scan()
|
||||||
|
header := scanner.Text()
|
||||||
|
// From the man page of proc(5), it does not contain an Inode field,
|
||||||
|
// but in actually it exists.
|
||||||
|
// This code works for both cases.
|
||||||
|
hasInode := strings.Contains(header, "Inode")
|
||||||
|
|
||||||
|
minFieldsCnt := netUnixStaticFieldsCnt
|
||||||
|
if hasInode {
|
||||||
|
minFieldsCnt++
|
||||||
|
}
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
|
||||||
|
if err != nil {
|
||||||
|
return nu, err
|
||||||
|
}
|
||||||
|
nu.Rows = append(nu.Rows, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nu, scanner.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
fieldsLen := len(fields)
|
||||||
|
if fieldsLen < minFieldsCnt {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Parse Unix domain failed: expect at least %d fields but got %d",
|
||||||
|
minFieldsCnt, fieldsLen)
|
||||||
|
}
|
||||||
|
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
|
||||||
|
}
|
||||||
|
users, err := u.parseUsers(fields[netUnixRefCountIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
|
||||||
|
}
|
||||||
|
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
|
||||||
|
}
|
||||||
|
typ, err := u.parseType(fields[netUnixTypeIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
|
||||||
|
}
|
||||||
|
state, err := u.parseState(fields[netUnixStateIdx])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
|
||||||
|
}
|
||||||
|
var inode uint64
|
||||||
|
if hasInode {
|
||||||
|
inodeStr := fields[netUnixInodeIdx]
|
||||||
|
inode, err = u.parseInode(inodeStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nuLine := &NetUnixLine{
|
||||||
|
KernelPtr: kernelPtr,
|
||||||
|
RefCount: users,
|
||||||
|
Type: typ,
|
||||||
|
Flags: flags,
|
||||||
|
State: state,
|
||||||
|
Inode: inode,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path field is optional.
|
||||||
|
if fieldsLen > minFieldsCnt {
|
||||||
|
pathIdx := netUnixInodeIdx + 1
|
||||||
|
if !hasInode {
|
||||||
|
pathIdx--
|
||||||
|
}
|
||||||
|
nuLine.Path = fields[pathIdx]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nuLine, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseKernelPtr(str string) (string, error) {
|
||||||
|
if !strings.HasSuffix(str, ":") {
|
||||||
|
return "", errInvalidKernelPtrFmt
|
||||||
|
}
|
||||||
|
return str[:len(str)-1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
|
||||||
|
typ, err := strconv.ParseUint(hexStr, 16, 16)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixType(typ), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
|
||||||
|
flags, err := strconv.ParseUint(hexStr, 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixFlags(flags), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
|
||||||
|
st, err := strconv.ParseInt(hexStr, 16, 8)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return NetUnixState(st), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(inodeStr, 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t NetUnixType) String() string {
|
||||||
|
switch t {
|
||||||
|
case netUnixTypeStream:
|
||||||
|
return "stream"
|
||||||
|
case netUnixTypeDgram:
|
||||||
|
return "dgram"
|
||||||
|
case netUnixTypeSeqpacket:
|
||||||
|
return "seqpacket"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f NetUnixFlags) String() string {
|
||||||
|
switch f {
|
||||||
|
case netUnixFlagListen:
|
||||||
|
return "listen"
|
||||||
|
default:
|
||||||
|
return "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s NetUnixState) String() string {
|
||||||
|
switch s {
|
||||||
|
case netUnixStateUnconnected:
|
||||||
|
return "unconnected"
|
||||||
|
case netUnixStateConnecting:
|
||||||
|
return "connecting"
|
||||||
|
case netUnixStateConnected:
|
||||||
|
return "connected"
|
||||||
|
case netUnixStateDisconnected:
|
||||||
|
return "disconnected"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
263
vendor/github.com/prometheus/procfs/nfs/nfs.go
generated
vendored
@ -1,263 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package nfsd implements parsing of /proc/net/rpc/nfsd.
|
|
||||||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
// ReplyCache models the "rc" line.
|
|
||||||
type ReplyCache struct {
|
|
||||||
Hits uint64
|
|
||||||
Misses uint64
|
|
||||||
NoCache uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileHandles models the "fh" line.
|
|
||||||
type FileHandles struct {
|
|
||||||
Stale uint64
|
|
||||||
TotalLookups uint64
|
|
||||||
AnonLookups uint64
|
|
||||||
DirNoCache uint64
|
|
||||||
NoDirNoCache uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// InputOutput models the "io" line.
|
|
||||||
type InputOutput struct {
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Threads models the "th" line.
|
|
||||||
type Threads struct {
|
|
||||||
Threads uint64
|
|
||||||
FullCnt uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAheadCache models the "ra" line.
|
|
||||||
type ReadAheadCache struct {
|
|
||||||
CacheSize uint64
|
|
||||||
CacheHistogram []uint64
|
|
||||||
NotFound uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Network models the "net" line.
|
|
||||||
type Network struct {
|
|
||||||
NetCount uint64
|
|
||||||
UDPCount uint64
|
|
||||||
TCPCount uint64
|
|
||||||
TCPConnect uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientRPC models the nfs "rpc" line.
|
|
||||||
type ClientRPC struct {
|
|
||||||
RPCCount uint64
|
|
||||||
Retransmissions uint64
|
|
||||||
AuthRefreshes uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerRPC models the nfsd "rpc" line.
|
|
||||||
type ServerRPC struct {
|
|
||||||
RPCCount uint64
|
|
||||||
BadCnt uint64
|
|
||||||
BadFmt uint64
|
|
||||||
BadAuth uint64
|
|
||||||
BadcInt uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2Stats models the "proc2" line.
|
|
||||||
type V2Stats struct {
|
|
||||||
Null uint64
|
|
||||||
GetAttr uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Root uint64
|
|
||||||
Lookup uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Read uint64
|
|
||||||
WrCache uint64
|
|
||||||
Write uint64
|
|
||||||
Create uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
SymLink uint64
|
|
||||||
MkDir uint64
|
|
||||||
RmDir uint64
|
|
||||||
ReadDir uint64
|
|
||||||
FsStat uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V3Stats models the "proc3" line.
|
|
||||||
type V3Stats struct {
|
|
||||||
Null uint64
|
|
||||||
GetAttr uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Lookup uint64
|
|
||||||
Access uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
Create uint64
|
|
||||||
MkDir uint64
|
|
||||||
SymLink uint64
|
|
||||||
MkNod uint64
|
|
||||||
Remove uint64
|
|
||||||
RmDir uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ReadDirPlus uint64
|
|
||||||
FsStat uint64
|
|
||||||
FsInfo uint64
|
|
||||||
PathConf uint64
|
|
||||||
Commit uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientV4Stats models the nfs "proc4" line.
|
|
||||||
type ClientV4Stats struct {
|
|
||||||
Null uint64
|
|
||||||
Read uint64
|
|
||||||
Write uint64
|
|
||||||
Commit uint64
|
|
||||||
Open uint64
|
|
||||||
OpenConfirm uint64
|
|
||||||
OpenNoattr uint64
|
|
||||||
OpenDowngrade uint64
|
|
||||||
Close uint64
|
|
||||||
Setattr uint64
|
|
||||||
FsInfo uint64
|
|
||||||
Renew uint64
|
|
||||||
SetClientId uint64
|
|
||||||
SetClientIdConfirm uint64
|
|
||||||
Lock uint64
|
|
||||||
Lockt uint64
|
|
||||||
Locku uint64
|
|
||||||
Access uint64
|
|
||||||
Getattr uint64
|
|
||||||
Lookup uint64
|
|
||||||
LookupRoot uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Link uint64
|
|
||||||
Symlink uint64
|
|
||||||
Create uint64
|
|
||||||
Pathconf uint64
|
|
||||||
StatFs uint64
|
|
||||||
ReadLink uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ServerCaps uint64
|
|
||||||
DelegReturn uint64
|
|
||||||
GetAcl uint64
|
|
||||||
SetAcl uint64
|
|
||||||
FsLocations uint64
|
|
||||||
ReleaseLockowner uint64
|
|
||||||
Secinfo uint64
|
|
||||||
FsidPresent uint64
|
|
||||||
ExchangeId uint64
|
|
||||||
CreateSession uint64
|
|
||||||
DestroySession uint64
|
|
||||||
Sequence uint64
|
|
||||||
GetLeaseTime uint64
|
|
||||||
ReclaimComplete uint64
|
|
||||||
LayoutGet uint64
|
|
||||||
GetDeviceInfo uint64
|
|
||||||
LayoutCommit uint64
|
|
||||||
LayoutReturn uint64
|
|
||||||
SecinfoNoName uint64
|
|
||||||
TestStateId uint64
|
|
||||||
FreeStateId uint64
|
|
||||||
GetDeviceList uint64
|
|
||||||
BindConnToSession uint64
|
|
||||||
DestroyClientId uint64
|
|
||||||
Seek uint64
|
|
||||||
Allocate uint64
|
|
||||||
DeAllocate uint64
|
|
||||||
LayoutStats uint64
|
|
||||||
Clone uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerV4Stats models the nfsd "proc4" line.
|
|
||||||
type ServerV4Stats struct {
|
|
||||||
Null uint64
|
|
||||||
Compound uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// V4Ops models the "proc4ops" line: NFSv4 operations
|
|
||||||
// Variable list, see:
|
|
||||||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
|
||||||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
|
||||||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
|
||||||
type V4Ops struct {
|
|
||||||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
|
||||||
Op0Unused uint64
|
|
||||||
Op1Unused uint64
|
|
||||||
Op2Future uint64
|
|
||||||
Access uint64
|
|
||||||
Close uint64
|
|
||||||
Commit uint64
|
|
||||||
Create uint64
|
|
||||||
DelegPurge uint64
|
|
||||||
DelegReturn uint64
|
|
||||||
GetAttr uint64
|
|
||||||
GetFH uint64
|
|
||||||
Link uint64
|
|
||||||
Lock uint64
|
|
||||||
Lockt uint64
|
|
||||||
Locku uint64
|
|
||||||
Lookup uint64
|
|
||||||
LookupRoot uint64
|
|
||||||
Nverify uint64
|
|
||||||
Open uint64
|
|
||||||
OpenAttr uint64
|
|
||||||
OpenConfirm uint64
|
|
||||||
OpenDgrd uint64
|
|
||||||
PutFH uint64
|
|
||||||
PutPubFH uint64
|
|
||||||
PutRootFH uint64
|
|
||||||
Read uint64
|
|
||||||
ReadDir uint64
|
|
||||||
ReadLink uint64
|
|
||||||
Remove uint64
|
|
||||||
Rename uint64
|
|
||||||
Renew uint64
|
|
||||||
RestoreFH uint64
|
|
||||||
SaveFH uint64
|
|
||||||
SecInfo uint64
|
|
||||||
SetAttr uint64
|
|
||||||
Verify uint64
|
|
||||||
Write uint64
|
|
||||||
RelLockOwner uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// RPCStats models all stats from /proc/net/rpc/nfs.
|
|
||||||
type ClientRPCStats struct {
|
|
||||||
Network Network
|
|
||||||
ClientRPC ClientRPC
|
|
||||||
V2Stats V2Stats
|
|
||||||
V3Stats V3Stats
|
|
||||||
ClientV4Stats ClientV4Stats
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
|
||||||
type ServerRPCStats struct {
|
|
||||||
ReplyCache ReplyCache
|
|
||||||
FileHandles FileHandles
|
|
||||||
InputOutput InputOutput
|
|
||||||
Threads Threads
|
|
||||||
ReadAheadCache ReadAheadCache
|
|
||||||
Network Network
|
|
||||||
ServerRPC ServerRPC
|
|
||||||
V2Stats V2Stats
|
|
||||||
V3Stats V3Stats
|
|
||||||
ServerV4Stats ServerV4Stats
|
|
||||||
V4Ops V4Ops
|
|
||||||
}
|
|
308
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
308
vendor/github.com/prometheus/procfs/nfs/parse.go
generated
vendored
@ -1,308 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseReplyCache(v []uint64) (ReplyCache, error) {
|
|
||||||
if len(v) != 3 {
|
|
||||||
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReplyCache{
|
|
||||||
Hits: v[0],
|
|
||||||
Misses: v[1],
|
|
||||||
NoCache: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseFileHandles(v []uint64) (FileHandles, error) {
|
|
||||||
if len(v) != 5 {
|
|
||||||
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return FileHandles{
|
|
||||||
Stale: v[0],
|
|
||||||
TotalLookups: v[1],
|
|
||||||
AnonLookups: v[2],
|
|
||||||
DirNoCache: v[3],
|
|
||||||
NoDirNoCache: v[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseInputOutput(v []uint64) (InputOutput, error) {
|
|
||||||
if len(v) != 2 {
|
|
||||||
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InputOutput{
|
|
||||||
Read: v[0],
|
|
||||||
Write: v[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseThreads(v []uint64) (Threads, error) {
|
|
||||||
if len(v) != 2 {
|
|
||||||
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Threads{
|
|
||||||
Threads: v[0],
|
|
||||||
FullCnt: v[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
|
|
||||||
if len(v) != 12 {
|
|
||||||
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReadAheadCache{
|
|
||||||
CacheSize: v[0],
|
|
||||||
CacheHistogram: v[1:11],
|
|
||||||
NotFound: v[11],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseNetwork(v []uint64) (Network, error) {
|
|
||||||
if len(v) != 4 {
|
|
||||||
return Network{}, fmt.Errorf("invalid Network line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Network{
|
|
||||||
NetCount: v[0],
|
|
||||||
UDPCount: v[1],
|
|
||||||
TCPCount: v[2],
|
|
||||||
TCPConnect: v[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerRPC(v []uint64) (ServerRPC, error) {
|
|
||||||
if len(v) != 5 {
|
|
||||||
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ServerRPC{
|
|
||||||
RPCCount: v[0],
|
|
||||||
BadCnt: v[1],
|
|
||||||
BadFmt: v[2],
|
|
||||||
BadAuth: v[3],
|
|
||||||
BadcInt: v[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseClientRPC(v []uint64) (ClientRPC, error) {
|
|
||||||
if len(v) != 3 {
|
|
||||||
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ClientRPC{
|
|
||||||
RPCCount: v[0],
|
|
||||||
Retransmissions: v[1],
|
|
||||||
AuthRefreshes: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV2Stats(v []uint64) (V2Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 18 {
|
|
||||||
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return V2Stats{
|
|
||||||
Null: v[1],
|
|
||||||
GetAttr: v[2],
|
|
||||||
SetAttr: v[3],
|
|
||||||
Root: v[4],
|
|
||||||
Lookup: v[5],
|
|
||||||
ReadLink: v[6],
|
|
||||||
Read: v[7],
|
|
||||||
WrCache: v[8],
|
|
||||||
Write: v[9],
|
|
||||||
Create: v[10],
|
|
||||||
Remove: v[11],
|
|
||||||
Rename: v[12],
|
|
||||||
Link: v[13],
|
|
||||||
SymLink: v[14],
|
|
||||||
MkDir: v[15],
|
|
||||||
RmDir: v[16],
|
|
||||||
ReadDir: v[17],
|
|
||||||
FsStat: v[18],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV3Stats(v []uint64) (V3Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 22 {
|
|
||||||
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return V3Stats{
|
|
||||||
Null: v[1],
|
|
||||||
GetAttr: v[2],
|
|
||||||
SetAttr: v[3],
|
|
||||||
Lookup: v[4],
|
|
||||||
Access: v[5],
|
|
||||||
ReadLink: v[6],
|
|
||||||
Read: v[7],
|
|
||||||
Write: v[8],
|
|
||||||
Create: v[9],
|
|
||||||
MkDir: v[10],
|
|
||||||
SymLink: v[11],
|
|
||||||
MkNod: v[12],
|
|
||||||
Remove: v[13],
|
|
||||||
RmDir: v[14],
|
|
||||||
Rename: v[15],
|
|
||||||
Link: v[16],
|
|
||||||
ReadDir: v[17],
|
|
||||||
ReadDirPlus: v[18],
|
|
||||||
FsStat: v[19],
|
|
||||||
FsInfo: v[20],
|
|
||||||
PathConf: v[21],
|
|
||||||
Commit: v[22],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values < 59 {
|
|
||||||
return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ClientV4Stats{
|
|
||||||
Null: v[1],
|
|
||||||
Read: v[2],
|
|
||||||
Write: v[3],
|
|
||||||
Commit: v[4],
|
|
||||||
Open: v[5],
|
|
||||||
OpenConfirm: v[6],
|
|
||||||
OpenNoattr: v[7],
|
|
||||||
OpenDowngrade: v[8],
|
|
||||||
Close: v[9],
|
|
||||||
Setattr: v[10],
|
|
||||||
FsInfo: v[11],
|
|
||||||
Renew: v[12],
|
|
||||||
SetClientId: v[13],
|
|
||||||
SetClientIdConfirm: v[14],
|
|
||||||
Lock: v[15],
|
|
||||||
Lockt: v[16],
|
|
||||||
Locku: v[17],
|
|
||||||
Access: v[18],
|
|
||||||
Getattr: v[19],
|
|
||||||
Lookup: v[20],
|
|
||||||
LookupRoot: v[21],
|
|
||||||
Remove: v[22],
|
|
||||||
Rename: v[23],
|
|
||||||
Link: v[24],
|
|
||||||
Symlink: v[25],
|
|
||||||
Create: v[26],
|
|
||||||
Pathconf: v[27],
|
|
||||||
StatFs: v[28],
|
|
||||||
ReadLink: v[29],
|
|
||||||
ReadDir: v[30],
|
|
||||||
ServerCaps: v[31],
|
|
||||||
DelegReturn: v[32],
|
|
||||||
GetAcl: v[33],
|
|
||||||
SetAcl: v[34],
|
|
||||||
FsLocations: v[35],
|
|
||||||
ReleaseLockowner: v[36],
|
|
||||||
Secinfo: v[37],
|
|
||||||
FsidPresent: v[38],
|
|
||||||
ExchangeId: v[39],
|
|
||||||
CreateSession: v[40],
|
|
||||||
DestroySession: v[41],
|
|
||||||
Sequence: v[42],
|
|
||||||
GetLeaseTime: v[43],
|
|
||||||
ReclaimComplete: v[44],
|
|
||||||
LayoutGet: v[45],
|
|
||||||
GetDeviceInfo: v[46],
|
|
||||||
LayoutCommit: v[47],
|
|
||||||
LayoutReturn: v[48],
|
|
||||||
SecinfoNoName: v[49],
|
|
||||||
TestStateId: v[50],
|
|
||||||
FreeStateId: v[51],
|
|
||||||
GetDeviceList: v[52],
|
|
||||||
BindConnToSession: v[53],
|
|
||||||
DestroyClientId: v[54],
|
|
||||||
Seek: v[55],
|
|
||||||
Allocate: v[56],
|
|
||||||
DeAllocate: v[57],
|
|
||||||
LayoutStats: v[58],
|
|
||||||
Clone: v[59],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values != 2 {
|
|
||||||
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ServerV4Stats{
|
|
||||||
Null: v[1],
|
|
||||||
Compound: v[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseV4Ops(v []uint64) (V4Ops, error) {
|
|
||||||
values := int(v[0])
|
|
||||||
if len(v[1:]) != values || values < 39 {
|
|
||||||
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := V4Ops{
|
|
||||||
Op0Unused: v[1],
|
|
||||||
Op1Unused: v[2],
|
|
||||||
Op2Future: v[3],
|
|
||||||
Access: v[4],
|
|
||||||
Close: v[5],
|
|
||||||
Commit: v[6],
|
|
||||||
Create: v[7],
|
|
||||||
DelegPurge: v[8],
|
|
||||||
DelegReturn: v[9],
|
|
||||||
GetAttr: v[10],
|
|
||||||
GetFH: v[11],
|
|
||||||
Link: v[12],
|
|
||||||
Lock: v[13],
|
|
||||||
Lockt: v[14],
|
|
||||||
Locku: v[15],
|
|
||||||
Lookup: v[16],
|
|
||||||
LookupRoot: v[17],
|
|
||||||
Nverify: v[18],
|
|
||||||
Open: v[19],
|
|
||||||
OpenAttr: v[20],
|
|
||||||
OpenConfirm: v[21],
|
|
||||||
OpenDgrd: v[22],
|
|
||||||
PutFH: v[23],
|
|
||||||
PutPubFH: v[24],
|
|
||||||
PutRootFH: v[25],
|
|
||||||
Read: v[26],
|
|
||||||
ReadDir: v[27],
|
|
||||||
ReadLink: v[28],
|
|
||||||
Remove: v[29],
|
|
||||||
Rename: v[30],
|
|
||||||
Renew: v[31],
|
|
||||||
RestoreFH: v[32],
|
|
||||||
SaveFH: v[33],
|
|
||||||
SecInfo: v[34],
|
|
||||||
SetAttr: v[35],
|
|
||||||
Verify: v[36],
|
|
||||||
Write: v[37],
|
|
||||||
RelLockOwner: v[38],
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
67
vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
|
||||||
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
|
|
||||||
stats := &ClientRPCStats{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
parts := strings.Fields(scanner.Text())
|
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
values, err := util.ParseUint64s(parts[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine {
|
|
||||||
case "net":
|
|
||||||
stats.Network, err = parseNetwork(values)
|
|
||||||
case "rpc":
|
|
||||||
stats.ClientRPC, err = parseClientRPC(values)
|
|
||||||
case "proc2":
|
|
||||||
stats.V2Stats, err = parseV2Stats(values)
|
|
||||||
case "proc3":
|
|
||||||
stats.V3Stats, err = parseV3Stats(values)
|
|
||||||
case "proc4":
|
|
||||||
stats.ClientV4Stats, err = parseClientV4Stats(values)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
89
vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
generated
vendored
@ -1,89 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
|
||||||
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
|
|
||||||
stats := &ServerRPCStats{}
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(r)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
parts := strings.Fields(scanner.Text())
|
|
||||||
// require at least <key> <value>
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
|
||||||
}
|
|
||||||
label := parts[0]
|
|
||||||
|
|
||||||
var values []uint64
|
|
||||||
var err error
|
|
||||||
if label == "th" {
|
|
||||||
if len(parts) < 3 {
|
|
||||||
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
|
|
||||||
}
|
|
||||||
values, err = util.ParseUint64s(parts[1:3])
|
|
||||||
} else {
|
|
||||||
values, err = util.ParseUint64s(parts[1:])
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch metricLine := parts[0]; metricLine {
|
|
||||||
case "rc":
|
|
||||||
stats.ReplyCache, err = parseReplyCache(values)
|
|
||||||
case "fh":
|
|
||||||
stats.FileHandles, err = parseFileHandles(values)
|
|
||||||
case "io":
|
|
||||||
stats.InputOutput, err = parseInputOutput(values)
|
|
||||||
case "th":
|
|
||||||
stats.Threads, err = parseThreads(values)
|
|
||||||
case "ra":
|
|
||||||
stats.ReadAheadCache, err = parseReadAheadCache(values)
|
|
||||||
case "net":
|
|
||||||
stats.Network, err = parseNetwork(values)
|
|
||||||
case "rpc":
|
|
||||||
stats.ServerRPC, err = parseServerRPC(values)
|
|
||||||
case "proc2":
|
|
||||||
stats.V2Stats, err = parseV2Stats(values)
|
|
||||||
case "proc3":
|
|
||||||
stats.V3Stats, err = parseV3Stats(values)
|
|
||||||
case "proc4":
|
|
||||||
stats.ServerV4Stats, err = parseServerV4Stats(values)
|
|
||||||
case "proc4ops":
|
|
||||||
stats.V4Ops, err = parseV4Ops(values)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
122
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
122
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -1,11 +1,28 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proc provides information about a running process.
|
// Proc provides information about a running process.
|
||||||
@ -13,7 +30,7 @@ type Proc struct {
|
|||||||
// The process ID.
|
// The process ID.
|
||||||
PID int
|
PID int
|
||||||
|
|
||||||
fs FS
|
fs fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// Procs represents a list of Proc structs.
|
// Procs represents a list of Proc structs.
|
||||||
@ -38,7 +55,7 @@ func NewProc(pid int) (Proc, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes under /proc.
|
// AllProcs returns a list of all currently available processes under /proc.
|
||||||
@ -52,28 +69,35 @@ func AllProcs() (Procs, error) {
|
|||||||
|
|
||||||
// Self returns a process for the current process.
|
// Self returns a process for the current process.
|
||||||
func (fs FS) Self() (Proc, error) {
|
func (fs FS) Self() (Proc, error) {
|
||||||
p, err := os.Readlink(fs.Path("self"))
|
p, err := os.Readlink(fs.proc.Path("self"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
|
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return fs.NewProc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Proc() instead
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
return fs.Proc(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proc returns a process for the given pid.
|
||||||
|
func (fs FS) Proc(pid int) (Proc, error) {
|
||||||
|
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: pid, fs: fs}, nil
|
return Proc{PID: pid, fs: fs.proc}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes.
|
// AllProcs returns a list of all currently available processes.
|
||||||
func (fs FS) AllProcs() (Procs, error) {
|
func (fs FS) AllProcs() (Procs, error) {
|
||||||
d, err := os.Open(fs.Path())
|
d, err := os.Open(fs.proc.Path())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, err
|
return Procs{}, err
|
||||||
}
|
}
|
||||||
@ -90,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p = append(p, Proc{PID: int(pid), fs: fs})
|
p = append(p, Proc{PID: int(pid), fs: fs.proc})
|
||||||
}
|
}
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
@ -98,13 +122,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
|
|
||||||
// CmdLine returns the command line of a process.
|
// CmdLine returns the command line of a process.
|
||||||
func (p Proc) CmdLine() ([]string, error) {
|
func (p Proc) CmdLine() ([]string, error) {
|
||||||
f, err := os.Open(p.path("cmdline"))
|
data, err := util.ReadFileNoStat(p.path("cmdline"))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -113,18 +131,12 @@ func (p Proc) CmdLine() ([]string, error) {
|
|||||||
return []string{}, nil
|
return []string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Comm returns the command name of a process.
|
// Comm returns the command name of a process.
|
||||||
func (p Proc) Comm() (string, error) {
|
func (p Proc) Comm() (string, error) {
|
||||||
f, err := os.Open(p.path("comm"))
|
data, err := util.ReadFileNoStat(p.path("comm"))
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -142,6 +154,26 @@ func (p Proc) Executable() (string, error) {
|
|||||||
return exe, err
|
return exe, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cwd returns the absolute path to the current working directory of the process.
|
||||||
|
func (p Proc) Cwd() (string, error) {
|
||||||
|
wd, err := os.Readlink(p.path("cwd"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return wd, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RootDir returns the absolute path to the process's root directory (as set by chroot)
|
||||||
|
func (p Proc) RootDir() (string, error) {
|
||||||
|
rdir, err := os.Readlink(p.path("root"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdir, err
|
||||||
|
}
|
||||||
|
|
||||||
// FileDescriptors returns the currently open file descriptors of a process.
|
// FileDescriptors returns the currently open file descriptors of a process.
|
||||||
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||||
names, err := p.fileDescriptors()
|
names, err := p.fileDescriptors()
|
||||||
@ -204,6 +236,18 @@ func (p Proc) MountStats() ([]*Mount, error) {
|
|||||||
return parseMountStats(f)
|
return parseMountStats(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MountInfo retrieves mount information for mount points in a
|
||||||
|
// process's namespace.
|
||||||
|
// It supplies information missing in `/proc/self/mounts` and
|
||||||
|
// fixes various other problems with that file too.
|
||||||
|
func (p Proc) MountInfo() ([]*MountInfo, error) {
|
||||||
|
data, err := util.ReadFileNoStat(p.path("mountinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseMountInfo(data)
|
||||||
|
}
|
||||||
|
|
||||||
func (p Proc) fileDescriptors() ([]string, error) {
|
func (p Proc) fileDescriptors() ([]string, error) {
|
||||||
d, err := os.Open(p.path("fd"))
|
d, err := os.Open(p.path("fd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -222,3 +266,33 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
func (p Proc) path(pa ...string) string {
|
func (p Proc) path(pa ...string) string {
|
||||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileDescriptorsInfo retrieves information about all file descriptors of
|
||||||
|
// the process.
|
||||||
|
func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
|
||||||
|
names, err := p.fileDescriptors()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fdinfos ProcFDInfos
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
fdinfo, err := p.FDInfo(n)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fdinfos = append(fdinfos, *fdinfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fdinfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedstat returns task scheduling information for the process.
|
||||||
|
func (p Proc) Schedstat() (ProcSchedstat, error) {
|
||||||
|
contents, err := ioutil.ReadFile(p.path("schedstat"))
|
||||||
|
if err != nil {
|
||||||
|
return ProcSchedstat{}, err
|
||||||
|
}
|
||||||
|
return parseProcSchedstat(string(contents))
|
||||||
|
}
|
||||||
|
37
vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
Normal file
37
vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Environ reads process environments from /proc/<pid>/environ
|
||||||
|
func (p Proc) Environ() ([]string, error) {
|
||||||
|
environments := make([]string, 0)
|
||||||
|
|
||||||
|
data, err := util.ReadFileNoStat(p.path("environ"))
|
||||||
|
if err != nil {
|
||||||
|
return environments, err
|
||||||
|
}
|
||||||
|
|
||||||
|
environments = strings.Split(string(data), "\000")
|
||||||
|
if len(environments) > 0 {
|
||||||
|
environments = environments[:len(environments)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return environments, nil
|
||||||
|
}
|
125
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
Normal file
125
vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Regexp variables
|
||||||
|
var (
|
||||||
|
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
||||||
|
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
||||||
|
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
|
||||||
|
rInotify = regexp.MustCompile(`^inotify`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcFDInfo contains represents file descriptor information.
|
||||||
|
type ProcFDInfo struct {
|
||||||
|
// File descriptor
|
||||||
|
FD string
|
||||||
|
// File offset
|
||||||
|
Pos string
|
||||||
|
// File access mode and status flags
|
||||||
|
Flags string
|
||||||
|
// Mount point ID
|
||||||
|
MntID string
|
||||||
|
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
|
||||||
|
InotifyInfos []InotifyInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
|
||||||
|
func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
|
||||||
|
data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var text, pos, flags, mntid string
|
||||||
|
var inotify []InotifyInfo
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||||
|
for scanner.Scan() {
|
||||||
|
text = scanner.Text()
|
||||||
|
if rPos.MatchString(text) {
|
||||||
|
pos = rPos.FindStringSubmatch(text)[1]
|
||||||
|
} else if rFlags.MatchString(text) {
|
||||||
|
flags = rFlags.FindStringSubmatch(text)[1]
|
||||||
|
} else if rMntID.MatchString(text) {
|
||||||
|
mntid = rMntID.FindStringSubmatch(text)[1]
|
||||||
|
} else if rInotify.MatchString(text) {
|
||||||
|
newInotify, err := parseInotifyInfo(text)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
inotify = append(inotify, *newInotify)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i := &ProcFDInfo{
|
||||||
|
FD: fd,
|
||||||
|
Pos: pos,
|
||||||
|
Flags: flags,
|
||||||
|
MntID: mntid,
|
||||||
|
InotifyInfos: inotify,
|
||||||
|
}
|
||||||
|
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InotifyInfo represents a single inotify line in the fdinfo file.
|
||||||
|
type InotifyInfo struct {
|
||||||
|
// Watch descriptor number
|
||||||
|
WD string
|
||||||
|
// Inode number
|
||||||
|
Ino string
|
||||||
|
// Device ID
|
||||||
|
Sdev string
|
||||||
|
// Mask of events being monitored
|
||||||
|
Mask string
|
||||||
|
}
|
||||||
|
|
||||||
|
// InotifyInfo constructor. Only available on kernel 3.8+.
|
||||||
|
func parseInotifyInfo(line string) (*InotifyInfo, error) {
|
||||||
|
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
|
||||||
|
m := r.FindStringSubmatch(line)
|
||||||
|
i := &InotifyInfo{
|
||||||
|
WD: m[1],
|
||||||
|
Ino: m[2],
|
||||||
|
Sdev: m[3],
|
||||||
|
Mask: m[4],
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcFDInfos represents a list of ProcFDInfo structs.
|
||||||
|
type ProcFDInfos []ProcFDInfo
|
||||||
|
|
||||||
|
func (p ProcFDInfos) Len() int { return len(p) }
|
||||||
|
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
|
||||||
|
|
||||||
|
// InotifyWatchLen returns the total number of inotify watches
|
||||||
|
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
|
||||||
|
length := 0
|
||||||
|
for _, f := range p {
|
||||||
|
length += len(f.InotifyInfos)
|
||||||
|
}
|
||||||
|
|
||||||
|
return length, nil
|
||||||
|
}
|
29
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
29
vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@ -1,9 +1,22 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcIO models the content of /proc/<pid>/io.
|
// ProcIO models the content of /proc/<pid>/io.
|
||||||
@ -26,17 +39,11 @@ type ProcIO struct {
|
|||||||
CancelledWriteBytes int64
|
CancelledWriteBytes int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIO creates a new ProcIO instance from a given Proc instance.
|
// IO creates a new ProcIO instance from a given Proc instance.
|
||||||
func (p Proc) NewIO() (ProcIO, error) {
|
func (p Proc) IO() (ProcIO, error) {
|
||||||
pio := ProcIO{}
|
pio := ProcIO{}
|
||||||
|
|
||||||
f, err := os.Open(p.path("io"))
|
data, err := util.ReadFileNoStat(p.path("io"))
|
||||||
if err != nil {
|
|
||||||
return pio, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pio, err
|
return pio, err
|
||||||
}
|
}
|
||||||
|
20
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
20
vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -65,7 +78,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use p.Limits() instead
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
|
return p.Limits()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limits returns the current soft limits of the process.
|
||||||
|
func (p Proc) Limits() (ProcLimits, error) {
|
||||||
f, err := os.Open(p.path("limits"))
|
f, err := os.Open(p.path("limits"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
|
17
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
17
vendor/github.com/prometheus/procfs/proc_ns.go
generated
vendored
@ -1,3 +1,16 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -16,9 +29,9 @@ type Namespace struct {
|
|||||||
// Namespaces contains all of the namespaces that the process is contained in.
|
// Namespaces contains all of the namespaces that the process is contained in.
|
||||||
type Namespaces map[string]Namespace
|
type Namespaces map[string]Namespace
|
||||||
|
|
||||||
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
|
||||||
// process is a member.
|
// process is a member.
|
||||||
func (p Proc) NewNamespaces() (Namespaces, error) {
|
func (p Proc) Namespaces() (Namespaces, error) {
|
||||||
d, err := os.Open(p.path("ns"))
|
d, err := os.Open(p.path("ns"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
100
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
Normal file
100
vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
// The PSI / pressure interface is described at
|
||||||
|
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
|
||||||
|
// Each resource (cpu, io, memory, ...) is exposed as a single file.
|
||||||
|
// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
|
||||||
|
// Each line contains several averages (over n seconds) and a total in µs.
|
||||||
|
//
|
||||||
|
// Example io pressure file:
|
||||||
|
// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
|
||||||
|
// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
|
||||||
|
|
||||||
|
// PSILine is a single line of values as returned by /proc/pressure/*
|
||||||
|
// The Avg entries are averages over n seconds, as a percentage
|
||||||
|
// The Total line is in microseconds
|
||||||
|
type PSILine struct {
|
||||||
|
Avg10 float64
|
||||||
|
Avg60 float64
|
||||||
|
Avg300 float64
|
||||||
|
Total uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// PSIStats represent pressure stall information from /proc/pressure/*
|
||||||
|
// Some indicates the share of time in which at least some tasks are stalled
|
||||||
|
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
|
||||||
|
type PSIStats struct {
|
||||||
|
Some *PSILine
|
||||||
|
Full *PSILine
|
||||||
|
}
|
||||||
|
|
||||||
|
// PSIStatsForResource reads pressure stall information for the specified
|
||||||
|
// resource from /proc/pressure/<resource>. At time of writing this can be
|
||||||
|
// either "cpu", "memory" or "io".
|
||||||
|
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
||||||
|
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsePSIStats(resource, bytes.NewReader(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePSIStats parses the specified file for pressure stall information
|
||||||
|
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
|
||||||
|
psiStats := PSIStats{}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
l := scanner.Text()
|
||||||
|
prefix := strings.Split(l, " ")[0]
|
||||||
|
switch prefix {
|
||||||
|
case "some":
|
||||||
|
psi := PSILine{}
|
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, err
|
||||||
|
}
|
||||||
|
psiStats.Some = &psi
|
||||||
|
case "full":
|
||||||
|
psi := PSILine{}
|
||||||
|
_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
|
||||||
|
if err != nil {
|
||||||
|
return PSIStats{}, err
|
||||||
|
}
|
||||||
|
psiStats.Full = &psi
|
||||||
|
default:
|
||||||
|
// If we encounter a line with an unknown prefix, ignore it and move on
|
||||||
|
// Should new measurement types be added in the future we'll simply ignore them instead
|
||||||
|
// of erroring on retrieval
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return psiStats, nil
|
||||||
|
}
|
39
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
39
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -1,10 +1,25 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
||||||
@ -82,22 +97,23 @@ type ProcStat struct {
|
|||||||
// in clock ticks.
|
// in clock ticks.
|
||||||
Starttime uint64
|
Starttime uint64
|
||||||
// Virtual memory size in bytes.
|
// Virtual memory size in bytes.
|
||||||
VSize int
|
VSize uint
|
||||||
// Resident set size in pages.
|
// Resident set size in pages.
|
||||||
RSS int
|
RSS int
|
||||||
|
|
||||||
fs FS
|
proc fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
|
//
|
||||||
|
// Deprecated: use p.Stat() instead
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
f, err := os.Open(p.path("stat"))
|
return p.Stat()
|
||||||
if err != nil {
|
|
||||||
return ProcStat{}, err
|
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
// Stat returns the current status information of the process.
|
||||||
|
func (p Proc) Stat() (ProcStat, error) {
|
||||||
|
data, err := util.ReadFileNoStat(p.path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
}
|
}
|
||||||
@ -105,7 +121,7 @@ func (p Proc) NewStat() (ProcStat, error) {
|
|||||||
var (
|
var (
|
||||||
ignore int
|
ignore int
|
||||||
|
|
||||||
s = ProcStat{PID: p.PID, fs: p.fs}
|
s = ProcStat{PID: p.PID, proc: p.fs}
|
||||||
l = bytes.Index(data, []byte("("))
|
l = bytes.Index(data, []byte("("))
|
||||||
r = bytes.LastIndex(data, []byte(")"))
|
r = bytes.LastIndex(data, []byte(")"))
|
||||||
)
|
)
|
||||||
@ -151,7 +167,7 @@ func (p Proc) NewStat() (ProcStat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VirtualMemory returns the virtual memory size in bytes.
|
// VirtualMemory returns the virtual memory size in bytes.
|
||||||
func (s ProcStat) VirtualMemory() int {
|
func (s ProcStat) VirtualMemory() uint {
|
||||||
return s.VSize
|
return s.VSize
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,7 +178,8 @@ func (s ProcStat) ResidentMemory() int {
|
|||||||
|
|
||||||
// StartTime returns the unix timestamp of the process in seconds.
|
// StartTime returns the unix timestamp of the process in seconds.
|
||||||
func (s ProcStat) StartTime() (float64, error) {
|
func (s ProcStat) StartTime() (float64, error) {
|
||||||
stat, err := s.fs.NewStat()
|
fs := FS{proc: s.proc}
|
||||||
|
stat, err := fs.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
161
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
Normal file
161
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcStatus provides status information about the process,
|
||||||
|
// read from /proc/[pid]/stat.
|
||||||
|
type ProcStatus struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
// The process name.
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Thread group ID.
|
||||||
|
TGID int
|
||||||
|
|
||||||
|
// Peak virtual memory size.
|
||||||
|
VmPeak uint64
|
||||||
|
// Virtual memory size.
|
||||||
|
VmSize uint64
|
||||||
|
// Locked memory size.
|
||||||
|
VmLck uint64
|
||||||
|
// Pinned memory size.
|
||||||
|
VmPin uint64
|
||||||
|
// Peak resident set size.
|
||||||
|
VmHWM uint64
|
||||||
|
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||||
|
VmRSS uint64
|
||||||
|
// Size of resident anonymous memory.
|
||||||
|
RssAnon uint64
|
||||||
|
// Size of resident file mappings.
|
||||||
|
RssFile uint64
|
||||||
|
// Size of resident shared memory.
|
||||||
|
RssShmem uint64
|
||||||
|
// Size of data segments.
|
||||||
|
VmData uint64
|
||||||
|
// Size of stack segments.
|
||||||
|
VmStk uint64
|
||||||
|
// Size of text segments.
|
||||||
|
VmExe uint64
|
||||||
|
// Shared library code size.
|
||||||
|
VmLib uint64
|
||||||
|
// Page table entries size.
|
||||||
|
VmPTE uint64
|
||||||
|
// Size of second-level page tables.
|
||||||
|
VmPMD uint64
|
||||||
|
// Swapped-out virtual memory size by anonymous private.
|
||||||
|
VmSwap uint64
|
||||||
|
// Size of hugetlb memory portions
|
||||||
|
HugetlbPages uint64
|
||||||
|
|
||||||
|
// Number of voluntary context switches.
|
||||||
|
VoluntaryCtxtSwitches uint64
|
||||||
|
// Number of involuntary context switches.
|
||||||
|
NonVoluntaryCtxtSwitches uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStatus returns the current status information of the process.
|
||||||
|
func (p Proc) NewStatus() (ProcStatus, error) {
|
||||||
|
data, err := util.ReadFileNoStat(p.path("status"))
|
||||||
|
if err != nil {
|
||||||
|
return ProcStatus{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := ProcStatus{PID: p.PID}
|
||||||
|
|
||||||
|
lines := strings.Split(string(data), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if !bytes.Contains([]byte(line), []byte(":")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
kv := strings.SplitN(line, ":", 2)
|
||||||
|
|
||||||
|
// removes spaces
|
||||||
|
k := string(strings.TrimSpace(kv[0]))
|
||||||
|
v := string(strings.TrimSpace(kv[1]))
|
||||||
|
// removes "kB"
|
||||||
|
v = string(bytes.Trim([]byte(v), " kB"))
|
||||||
|
|
||||||
|
// value to int when possible
|
||||||
|
// we can skip error check here, 'cause vKBytes is not used when value is a string
|
||||||
|
vKBytes, _ := strconv.ParseUint(v, 10, 64)
|
||||||
|
// convert kB to B
|
||||||
|
vBytes := vKBytes * 1024
|
||||||
|
|
||||||
|
s.fillStatus(k, v, vKBytes, vBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
|
||||||
|
switch k {
|
||||||
|
case "Tgid":
|
||||||
|
s.TGID = int(vUint)
|
||||||
|
case "Name":
|
||||||
|
s.Name = vString
|
||||||
|
case "VmPeak":
|
||||||
|
s.VmPeak = vUintBytes
|
||||||
|
case "VmSize":
|
||||||
|
s.VmSize = vUintBytes
|
||||||
|
case "VmLck":
|
||||||
|
s.VmLck = vUintBytes
|
||||||
|
case "VmPin":
|
||||||
|
s.VmPin = vUintBytes
|
||||||
|
case "VmHWM":
|
||||||
|
s.VmHWM = vUintBytes
|
||||||
|
case "VmRSS":
|
||||||
|
s.VmRSS = vUintBytes
|
||||||
|
case "RssAnon":
|
||||||
|
s.RssAnon = vUintBytes
|
||||||
|
case "RssFile":
|
||||||
|
s.RssFile = vUintBytes
|
||||||
|
case "RssShmem":
|
||||||
|
s.RssShmem = vUintBytes
|
||||||
|
case "VmData":
|
||||||
|
s.VmData = vUintBytes
|
||||||
|
case "VmStk":
|
||||||
|
s.VmStk = vUintBytes
|
||||||
|
case "VmExe":
|
||||||
|
s.VmExe = vUintBytes
|
||||||
|
case "VmLib":
|
||||||
|
s.VmLib = vUintBytes
|
||||||
|
case "VmPTE":
|
||||||
|
s.VmPTE = vUintBytes
|
||||||
|
case "VmPMD":
|
||||||
|
s.VmPMD = vUintBytes
|
||||||
|
case "VmSwap":
|
||||||
|
s.VmSwap = vUintBytes
|
||||||
|
case "HugetlbPages":
|
||||||
|
s.HugetlbPages = vUintBytes
|
||||||
|
case "voluntary_ctxt_switches":
|
||||||
|
s.VoluntaryCtxtSwitches = vUint
|
||||||
|
case "nonvoluntary_ctxt_switches":
|
||||||
|
s.NonVoluntaryCtxtSwitches = vUint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
|
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
||||||
|
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
||||||
|
}
|
118
vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
Normal file
118
vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
|
||||||
|
procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schedstat contains scheduler statistics from /proc/schedstat
|
||||||
|
//
|
||||||
|
// See
|
||||||
|
// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
|
||||||
|
// for a detailed description of what these numbers mean.
|
||||||
|
//
|
||||||
|
// Note the current kernel documentation claims some of the time units are in
|
||||||
|
// jiffies when they are actually in nanoseconds since 2.6.23 with the
|
||||||
|
// introduction of CFS. A fix to the documentation is pending. See
|
||||||
|
// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
|
||||||
|
type Schedstat struct {
|
||||||
|
CPUs []*SchedstatCPU
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchedstatCPU contains the values from one "cpu<N>" line
|
||||||
|
type SchedstatCPU struct {
|
||||||
|
CPUNum string
|
||||||
|
|
||||||
|
RunningNanoseconds uint64
|
||||||
|
WaitingNanoseconds uint64
|
||||||
|
RunTimeslices uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcSchedstat contains the values from /proc/<pid>/schedstat
|
||||||
|
type ProcSchedstat struct {
|
||||||
|
RunningNanoseconds uint64
|
||||||
|
WaitingNanoseconds uint64
|
||||||
|
RunTimeslices uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedstat reads data from /proc/schedstat
|
||||||
|
func (fs FS) Schedstat() (*Schedstat, error) {
|
||||||
|
file, err := os.Open(fs.proc.Path("schedstat"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
stats := &Schedstat{}
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
match := cpuLineRE.FindStringSubmatch(scanner.Text())
|
||||||
|
if match != nil {
|
||||||
|
cpu := &SchedstatCPU{}
|
||||||
|
cpu.CPUNum = match[1]
|
||||||
|
|
||||||
|
cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.CPUs = append(stats.CPUs, cpu)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
|
||||||
|
match := procLineRE.FindStringSubmatch(contents)
|
||||||
|
|
||||||
|
if match != nil {
|
||||||
|
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = errors.New("could not parse schedstat")
|
||||||
|
return
|
||||||
|
}
|
61
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
61
vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -1,12 +1,28 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CPUStat shows how much time the cpu spend in various stages.
|
// CPUStat shows how much time the cpu spend in various stages.
|
||||||
@ -65,16 +81,6 @@ type Stat struct {
|
|||||||
SoftIRQ SoftIRQStat
|
SoftIRQ SoftIRQStat
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
|
||||||
func NewStat() (Stat, error) {
|
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return Stat{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs.NewStat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||||
cpuStat := CPUStat{}
|
cpuStat := CPUStat{}
|
||||||
@ -136,19 +142,38 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
|||||||
return softIRQStat, total, nil
|
return softIRQStat, total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns information about current cpu/process statistics.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
//
|
||||||
f, err := os.Open(fs.Path("stat"))
|
// Deprecated: use fs.Stat() instead
|
||||||
|
func NewStat() (Stat, error) {
|
||||||
|
fs, err := NewFS(fs.DefaultProcMountPoint)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
//
|
||||||
|
// Deprecated: use fs.Stat() instead
|
||||||
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
|
return fs.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns information about current cpu/process statistics.
|
||||||
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
|
func (fs FS) Stat() (Stat, error) {
|
||||||
|
fileName := fs.proc.Path("stat")
|
||||||
|
data, err := util.ReadFileNoStat(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
stat := Stat{}
|
stat := Stat{}
|
||||||
|
|
||||||
scanner := bufio.NewScanner(f)
|
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
parts := strings.Fields(scanner.Text())
|
parts := strings.Fields(scanner.Text())
|
||||||
@ -212,7 +237,7 @@ func (fs FS) NewStat() (Stat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stat, nil
|
return stat, nil
|
||||||
|
210
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
Normal file
210
vendor/github.com/prometheus/procfs/vm.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The VM interface is described at
|
||||||
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
|
// Each setting is exposed as a single file.
|
||||||
|
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
||||||
|
// and numa_zonelist_order (deprecated) which is a string
|
||||||
|
type VM struct {
|
||||||
|
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
|
||||||
|
BlockDump *int64 // /proc/sys/vm/block_dump
|
||||||
|
CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed
|
||||||
|
DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes
|
||||||
|
DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio
|
||||||
|
DirtyBytes *int64 // /proc/sys/vm/dirty_bytes
|
||||||
|
DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs
|
||||||
|
DirtyRatio *int64 // /proc/sys/vm/dirty_ratio
|
||||||
|
DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds
|
||||||
|
DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs
|
||||||
|
DropCaches *int64 // /proc/sys/vm/drop_caches
|
||||||
|
ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold
|
||||||
|
HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group
|
||||||
|
LaptopMode *int64 // /proc/sys/vm/laptop_mode
|
||||||
|
LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout
|
||||||
|
LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio
|
||||||
|
MaxMapCount *int64 // /proc/sys/vm/max_map_count
|
||||||
|
MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill
|
||||||
|
MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery
|
||||||
|
MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes
|
||||||
|
MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio
|
||||||
|
MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio
|
||||||
|
MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr
|
||||||
|
NrHugepages *int64 // /proc/sys/vm/nr_hugepages
|
||||||
|
NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy
|
||||||
|
NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages
|
||||||
|
NumaStat *int64 // /proc/sys/vm/numa_stat
|
||||||
|
NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order
|
||||||
|
OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks
|
||||||
|
OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task
|
||||||
|
OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes
|
||||||
|
OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory
|
||||||
|
OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio
|
||||||
|
PageCluster *int64 // /proc/sys/vm/page-cluster
|
||||||
|
PanicOnOom *int64 // /proc/sys/vm/panic_on_oom
|
||||||
|
PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction
|
||||||
|
StatInterval *int64 // /proc/sys/vm/stat_interval
|
||||||
|
Swappiness *int64 // /proc/sys/vm/swappiness
|
||||||
|
UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes
|
||||||
|
VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure
|
||||||
|
WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor
|
||||||
|
WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor
|
||||||
|
ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// VM reads the VM statistics from the specified `proc` filesystem.
|
||||||
|
func (fs FS) VM() (*VM, error) {
|
||||||
|
path := fs.proc.Path("sys/vm")
|
||||||
|
file, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !file.Mode().IsDir() {
|
||||||
|
return nil, fmt.Errorf("%s is not a directory", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var vm VM
|
||||||
|
for _, f := range files {
|
||||||
|
if f.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := filepath.Join(path, f.Name())
|
||||||
|
// ignore errors on read, as there are some write only
|
||||||
|
// in /proc/sys/vm
|
||||||
|
value, err := util.SysReadFile(name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
|
||||||
|
switch f.Name() {
|
||||||
|
case "admin_reserve_kbytes":
|
||||||
|
vm.AdminReserveKbytes = vp.PInt64()
|
||||||
|
case "block_dump":
|
||||||
|
vm.BlockDump = vp.PInt64()
|
||||||
|
case "compact_unevictable_allowed":
|
||||||
|
vm.CompactUnevictableAllowed = vp.PInt64()
|
||||||
|
case "dirty_background_bytes":
|
||||||
|
vm.DirtyBackgroundBytes = vp.PInt64()
|
||||||
|
case "dirty_background_ratio":
|
||||||
|
vm.DirtyBackgroundRatio = vp.PInt64()
|
||||||
|
case "dirty_bytes":
|
||||||
|
vm.DirtyBytes = vp.PInt64()
|
||||||
|
case "dirty_expire_centisecs":
|
||||||
|
vm.DirtyExpireCentisecs = vp.PInt64()
|
||||||
|
case "dirty_ratio":
|
||||||
|
vm.DirtyRatio = vp.PInt64()
|
||||||
|
case "dirtytime_expire_seconds":
|
||||||
|
vm.DirtytimeExpireSeconds = vp.PInt64()
|
||||||
|
case "dirty_writeback_centisecs":
|
||||||
|
vm.DirtyWritebackCentisecs = vp.PInt64()
|
||||||
|
case "drop_caches":
|
||||||
|
vm.DropCaches = vp.PInt64()
|
||||||
|
case "extfrag_threshold":
|
||||||
|
vm.ExtfragThreshold = vp.PInt64()
|
||||||
|
case "hugetlb_shm_group":
|
||||||
|
vm.HugetlbShmGroup = vp.PInt64()
|
||||||
|
case "laptop_mode":
|
||||||
|
vm.LaptopMode = vp.PInt64()
|
||||||
|
case "legacy_va_layout":
|
||||||
|
vm.LegacyVaLayout = vp.PInt64()
|
||||||
|
case "lowmem_reserve_ratio":
|
||||||
|
stringSlice := strings.Fields(value)
|
||||||
|
pint64Slice := make([]*int64, 0, len(stringSlice))
|
||||||
|
for _, value := range stringSlice {
|
||||||
|
vp := util.NewValueParser(value)
|
||||||
|
pint64Slice = append(pint64Slice, vp.PInt64())
|
||||||
|
}
|
||||||
|
vm.LowmemReserveRatio = pint64Slice
|
||||||
|
case "max_map_count":
|
||||||
|
vm.MaxMapCount = vp.PInt64()
|
||||||
|
case "memory_failure_early_kill":
|
||||||
|
vm.MemoryFailureEarlyKill = vp.PInt64()
|
||||||
|
case "memory_failure_recovery":
|
||||||
|
vm.MemoryFailureRecovery = vp.PInt64()
|
||||||
|
case "min_free_kbytes":
|
||||||
|
vm.MinFreeKbytes = vp.PInt64()
|
||||||
|
case "min_slab_ratio":
|
||||||
|
vm.MinSlabRatio = vp.PInt64()
|
||||||
|
case "min_unmapped_ratio":
|
||||||
|
vm.MinUnmappedRatio = vp.PInt64()
|
||||||
|
case "mmap_min_addr":
|
||||||
|
vm.MmapMinAddr = vp.PInt64()
|
||||||
|
case "nr_hugepages":
|
||||||
|
vm.NrHugepages = vp.PInt64()
|
||||||
|
case "nr_hugepages_mempolicy":
|
||||||
|
vm.NrHugepagesMempolicy = vp.PInt64()
|
||||||
|
case "nr_overcommit_hugepages":
|
||||||
|
vm.NrOvercommitHugepages = vp.PInt64()
|
||||||
|
case "numa_stat":
|
||||||
|
vm.NumaStat = vp.PInt64()
|
||||||
|
case "numa_zonelist_order":
|
||||||
|
vm.NumaZonelistOrder = value
|
||||||
|
case "oom_dump_tasks":
|
||||||
|
vm.OomDumpTasks = vp.PInt64()
|
||||||
|
case "oom_kill_allocating_task":
|
||||||
|
vm.OomKillAllocatingTask = vp.PInt64()
|
||||||
|
case "overcommit_kbytes":
|
||||||
|
vm.OvercommitKbytes = vp.PInt64()
|
||||||
|
case "overcommit_memory":
|
||||||
|
vm.OvercommitMemory = vp.PInt64()
|
||||||
|
case "overcommit_ratio":
|
||||||
|
vm.OvercommitRatio = vp.PInt64()
|
||||||
|
case "page-cluster":
|
||||||
|
vm.PageCluster = vp.PInt64()
|
||||||
|
case "panic_on_oom":
|
||||||
|
vm.PanicOnOom = vp.PInt64()
|
||||||
|
case "percpu_pagelist_fraction":
|
||||||
|
vm.PercpuPagelistFraction = vp.PInt64()
|
||||||
|
case "stat_interval":
|
||||||
|
vm.StatInterval = vp.PInt64()
|
||||||
|
case "swappiness":
|
||||||
|
vm.Swappiness = vp.PInt64()
|
||||||
|
case "user_reserve_kbytes":
|
||||||
|
vm.UserReserveKbytes = vp.PInt64()
|
||||||
|
case "vfs_cache_pressure":
|
||||||
|
vm.VfsCachePressure = vp.PInt64()
|
||||||
|
case "watermark_boost_factor":
|
||||||
|
vm.WatermarkBoostFactor = vp.PInt64()
|
||||||
|
case "watermark_scale_factor":
|
||||||
|
vm.WatermarkScaleFactor = vp.PInt64()
|
||||||
|
case "zone_reclaim_mode":
|
||||||
|
vm.ZoneReclaimMode = vp.PInt64()
|
||||||
|
}
|
||||||
|
if err := vp.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &vm, nil
|
||||||
|
}
|
4
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
4
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
@ -97,7 +97,7 @@ func NewXfrmStat() (XfrmStat, error) {
|
|||||||
|
|
||||||
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||||
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||||
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return XfrmStat{}, err
|
return XfrmStat{}, err
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
|||||||
|
|
||||||
if len(fields) != 2 {
|
if len(fields) != 2 {
|
||||||
return XfrmStat{}, fmt.Errorf(
|
return XfrmStat{}, fmt.Errorf(
|
||||||
"couldnt parse %s line %s", file.Name(), s.Text())
|
"couldn't parse %s line %s", file.Name(), s.Text())
|
||||||
}
|
}
|
||||||
|
|
||||||
name := fields[0]
|
name := fields[0]
|
||||||
|
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
330
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
@ -1,330 +0,0 @@
|
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package xfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
|
||||||
// found in /proc/fs/xfs/stat.
|
|
||||||
func ParseStats(r io.Reader) (*Stats, error) {
|
|
||||||
const (
|
|
||||||
// Fields parsed into stats structures.
|
|
||||||
fieldExtentAlloc = "extent_alloc"
|
|
||||||
fieldAbt = "abt"
|
|
||||||
fieldBlkMap = "blk_map"
|
|
||||||
fieldBmbt = "bmbt"
|
|
||||||
fieldDir = "dir"
|
|
||||||
fieldTrans = "trans"
|
|
||||||
fieldIg = "ig"
|
|
||||||
fieldLog = "log"
|
|
||||||
fieldRw = "rw"
|
|
||||||
fieldAttr = "attr"
|
|
||||||
fieldIcluster = "icluster"
|
|
||||||
fieldVnodes = "vnodes"
|
|
||||||
fieldBuf = "buf"
|
|
||||||
fieldXpc = "xpc"
|
|
||||||
|
|
||||||
// Unimplemented at this time due to lack of documentation.
|
|
||||||
fieldPushAil = "push_ail"
|
|
||||||
fieldXstrat = "xstrat"
|
|
||||||
fieldAbtb2 = "abtb2"
|
|
||||||
fieldAbtc2 = "abtc2"
|
|
||||||
fieldBmbt2 = "bmbt2"
|
|
||||||
fieldIbt2 = "ibt2"
|
|
||||||
fieldFibt2 = "fibt2"
|
|
||||||
fieldQm = "qm"
|
|
||||||
fieldDebug = "debug"
|
|
||||||
)
|
|
||||||
|
|
||||||
var xfss Stats
|
|
||||||
|
|
||||||
s := bufio.NewScanner(r)
|
|
||||||
for s.Scan() {
|
|
||||||
// Expect at least a string label and a single integer value, ex:
|
|
||||||
// - abt 0
|
|
||||||
// - rw 1 2
|
|
||||||
ss := strings.Fields(string(s.Bytes()))
|
|
||||||
if len(ss) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
label := ss[0]
|
|
||||||
|
|
||||||
// Extended precision counters are uint64 values.
|
|
||||||
if label == fieldXpc {
|
|
||||||
us, err := util.ParseUint64s(ss[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// All other counters are uint32 values.
|
|
||||||
us, err := util.ParseUint32s(ss[1:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch label {
|
|
||||||
case fieldExtentAlloc:
|
|
||||||
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
|
||||||
case fieldAbt:
|
|
||||||
xfss.AllocationBTree, err = btreeStats(us)
|
|
||||||
case fieldBlkMap:
|
|
||||||
xfss.BlockMapping, err = blockMappingStats(us)
|
|
||||||
case fieldBmbt:
|
|
||||||
xfss.BlockMapBTree, err = btreeStats(us)
|
|
||||||
case fieldDir:
|
|
||||||
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
|
||||||
case fieldTrans:
|
|
||||||
xfss.Transaction, err = transactionStats(us)
|
|
||||||
case fieldIg:
|
|
||||||
xfss.InodeOperation, err = inodeOperationStats(us)
|
|
||||||
case fieldLog:
|
|
||||||
xfss.LogOperation, err = logOperationStats(us)
|
|
||||||
case fieldRw:
|
|
||||||
xfss.ReadWrite, err = readWriteStats(us)
|
|
||||||
case fieldAttr:
|
|
||||||
xfss.AttributeOperation, err = attributeOperationStats(us)
|
|
||||||
case fieldIcluster:
|
|
||||||
xfss.InodeClustering, err = inodeClusteringStats(us)
|
|
||||||
case fieldVnodes:
|
|
||||||
xfss.Vnode, err = vnodeStats(us)
|
|
||||||
case fieldBuf:
|
|
||||||
xfss.Buffer, err = bufferStats(us)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &xfss, s.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
|
||||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ExtentAllocationStats{
|
|
||||||
ExtentsAllocated: us[0],
|
|
||||||
BlocksAllocated: us[1],
|
|
||||||
ExtentsFreed: us[2],
|
|
||||||
BlocksFreed: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
|
||||||
func btreeStats(us []uint32) (BTreeStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BTreeStats{
|
|
||||||
Lookups: us[0],
|
|
||||||
Compares: us[1],
|
|
||||||
RecordsInserted: us[2],
|
|
||||||
RecordsDeleted: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
|
||||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
|
||||||
if l := len(us); l != 7 {
|
|
||||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BlockMappingStats{
|
|
||||||
Reads: us[0],
|
|
||||||
Writes: us[1],
|
|
||||||
Unmaps: us[2],
|
|
||||||
ExtentListInsertions: us[3],
|
|
||||||
ExtentListDeletions: us[4],
|
|
||||||
ExtentListLookups: us[5],
|
|
||||||
ExtentListCompares: us[6],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
|
||||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return DirectoryOperationStats{
|
|
||||||
Lookups: us[0],
|
|
||||||
Creates: us[1],
|
|
||||||
Removes: us[2],
|
|
||||||
Getdents: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
|
||||||
func transactionStats(us []uint32) (TransactionStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return TransactionStats{
|
|
||||||
Sync: us[0],
|
|
||||||
Async: us[1],
|
|
||||||
Empty: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
|
||||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
|
||||||
if l := len(us); l != 7 {
|
|
||||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InodeOperationStats{
|
|
||||||
Attempts: us[0],
|
|
||||||
Found: us[1],
|
|
||||||
Recycle: us[2],
|
|
||||||
Missed: us[3],
|
|
||||||
Duplicate: us[4],
|
|
||||||
Reclaims: us[5],
|
|
||||||
AttributeChange: us[6],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
|
||||||
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
|
||||||
if l := len(us); l != 5 {
|
|
||||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return LogOperationStats{
|
|
||||||
Writes: us[0],
|
|
||||||
Blocks: us[1],
|
|
||||||
NoInternalBuffers: us[2],
|
|
||||||
Force: us[3],
|
|
||||||
ForceSleep: us[4],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
|
||||||
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
|
||||||
if l := len(us); l != 2 {
|
|
||||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReadWriteStats{
|
|
||||||
Read: us[0],
|
|
||||||
Write: us[1],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
|
||||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
|
||||||
if l := len(us); l != 4 {
|
|
||||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return AttributeOperationStats{
|
|
||||||
Get: us[0],
|
|
||||||
Set: us[1],
|
|
||||||
Remove: us[2],
|
|
||||||
List: us[3],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
|
||||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return InodeClusteringStats{
|
|
||||||
Iflush: us[0],
|
|
||||||
Flush: us[1],
|
|
||||||
FlushInode: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
|
||||||
func vnodeStats(us []uint32) (VnodeStats, error) {
|
|
||||||
// The attribute "Free" appears to not be available on older XFS
|
|
||||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
|
||||||
// this slice.
|
|
||||||
l := len(us)
|
|
||||||
if l != 7 && l != 8 {
|
|
||||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := VnodeStats{
|
|
||||||
Active: us[0],
|
|
||||||
Allocate: us[1],
|
|
||||||
Get: us[2],
|
|
||||||
Hold: us[3],
|
|
||||||
Release: us[4],
|
|
||||||
Reclaim: us[5],
|
|
||||||
Remove: us[6],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip adding free, unless it is present. The zero value will
|
|
||||||
// be used in place of an actual count.
|
|
||||||
if l == 7 {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Free = us[7]
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
|
||||||
func bufferStats(us []uint32) (BufferStats, error) {
|
|
||||||
if l := len(us); l != 9 {
|
|
||||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BufferStats{
|
|
||||||
Get: us[0],
|
|
||||||
Create: us[1],
|
|
||||||
GetLocked: us[2],
|
|
||||||
GetLockedWaited: us[3],
|
|
||||||
BusyLocked: us[4],
|
|
||||||
MissLocked: us[5],
|
|
||||||
PageRetries: us[6],
|
|
||||||
PageFound: us[7],
|
|
||||||
GetRead: us[8],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
|
||||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
|
||||||
if l := len(us); l != 3 {
|
|
||||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ExtendedPrecisionStats{
|
|
||||||
FlushBytes: us[0],
|
|
||||||
WriteBytes: us[1],
|
|
||||||
ReadBytes: us[2],
|
|
||||||
}, nil
|
|
||||||
}
|
|
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
163
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
@ -1,163 +0,0 @@
|
|||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
|
||||||
package xfs
|
|
||||||
|
|
||||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
|
||||||
// /proc/fs/xfs/stat.
|
|
||||||
//
|
|
||||||
// The names and meanings of each statistic were taken from
|
|
||||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
|
||||||
// kernel source. Most counters are uint32s (same data types used in
|
|
||||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
|
||||||
type Stats struct {
|
|
||||||
// The name of the filesystem used to source these statistics.
|
|
||||||
// If empty, this indicates aggregated statistics for all XFS
|
|
||||||
// filesystems on the host.
|
|
||||||
Name string
|
|
||||||
|
|
||||||
ExtentAllocation ExtentAllocationStats
|
|
||||||
AllocationBTree BTreeStats
|
|
||||||
BlockMapping BlockMappingStats
|
|
||||||
BlockMapBTree BTreeStats
|
|
||||||
DirectoryOperation DirectoryOperationStats
|
|
||||||
Transaction TransactionStats
|
|
||||||
InodeOperation InodeOperationStats
|
|
||||||
LogOperation LogOperationStats
|
|
||||||
ReadWrite ReadWriteStats
|
|
||||||
AttributeOperation AttributeOperationStats
|
|
||||||
InodeClustering InodeClusteringStats
|
|
||||||
Vnode VnodeStats
|
|
||||||
Buffer BufferStats
|
|
||||||
ExtendedPrecision ExtendedPrecisionStats
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
|
||||||
type ExtentAllocationStats struct {
|
|
||||||
ExtentsAllocated uint32
|
|
||||||
BlocksAllocated uint32
|
|
||||||
ExtentsFreed uint32
|
|
||||||
BlocksFreed uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
|
||||||
type BTreeStats struct {
|
|
||||||
Lookups uint32
|
|
||||||
Compares uint32
|
|
||||||
RecordsInserted uint32
|
|
||||||
RecordsDeleted uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
|
||||||
type BlockMappingStats struct {
|
|
||||||
Reads uint32
|
|
||||||
Writes uint32
|
|
||||||
Unmaps uint32
|
|
||||||
ExtentListInsertions uint32
|
|
||||||
ExtentListDeletions uint32
|
|
||||||
ExtentListLookups uint32
|
|
||||||
ExtentListCompares uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
|
||||||
type DirectoryOperationStats struct {
|
|
||||||
Lookups uint32
|
|
||||||
Creates uint32
|
|
||||||
Removes uint32
|
|
||||||
Getdents uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
|
||||||
type TransactionStats struct {
|
|
||||||
Sync uint32
|
|
||||||
Async uint32
|
|
||||||
Empty uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
|
||||||
type InodeOperationStats struct {
|
|
||||||
Attempts uint32
|
|
||||||
Found uint32
|
|
||||||
Recycle uint32
|
|
||||||
Missed uint32
|
|
||||||
Duplicate uint32
|
|
||||||
Reclaims uint32
|
|
||||||
AttributeChange uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
|
||||||
type LogOperationStats struct {
|
|
||||||
Writes uint32
|
|
||||||
Blocks uint32
|
|
||||||
NoInternalBuffers uint32
|
|
||||||
Force uint32
|
|
||||||
ForceSleep uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadWriteStats contains statistics regarding the number of read and write
|
|
||||||
// system calls for XFS filesystems.
|
|
||||||
type ReadWriteStats struct {
|
|
||||||
Read uint32
|
|
||||||
Write uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttributeOperationStats contains statistics regarding manipulation of
|
|
||||||
// XFS extended file attributes.
|
|
||||||
type AttributeOperationStats struct {
|
|
||||||
Get uint32
|
|
||||||
Set uint32
|
|
||||||
Remove uint32
|
|
||||||
List uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
|
||||||
// operations.
|
|
||||||
type InodeClusteringStats struct {
|
|
||||||
Iflush uint32
|
|
||||||
Flush uint32
|
|
||||||
FlushInode uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
|
||||||
type VnodeStats struct {
|
|
||||||
Active uint32
|
|
||||||
Allocate uint32
|
|
||||||
Get uint32
|
|
||||||
Hold uint32
|
|
||||||
Release uint32
|
|
||||||
Reclaim uint32
|
|
||||||
Remove uint32
|
|
||||||
Free uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
|
||||||
type BufferStats struct {
|
|
||||||
Get uint32
|
|
||||||
Create uint32
|
|
||||||
GetLocked uint32
|
|
||||||
GetLockedWaited uint32
|
|
||||||
BusyLocked uint32
|
|
||||||
MissLocked uint32
|
|
||||||
PageRetries uint32
|
|
||||||
PageFound uint32
|
|
||||||
GetRead uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
|
||||||
// total number of bytes read, written, or flushed, during XFS operations.
|
|
||||||
type ExtendedPrecisionStats struct {
|
|
||||||
FlushBytes uint64
|
|
||||||
WriteBytes uint64
|
|
||||||
ReadBytes uint64
|
|
||||||
}
|
|
196
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
Normal file
196
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Zoneinfo holds info parsed from /proc/zoneinfo.
|
||||||
|
type Zoneinfo struct {
|
||||||
|
Node string
|
||||||
|
Zone string
|
||||||
|
NrFreePages *int64
|
||||||
|
Min *int64
|
||||||
|
Low *int64
|
||||||
|
High *int64
|
||||||
|
Scanned *int64
|
||||||
|
Spanned *int64
|
||||||
|
Present *int64
|
||||||
|
Managed *int64
|
||||||
|
NrActiveAnon *int64
|
||||||
|
NrInactiveAnon *int64
|
||||||
|
NrIsolatedAnon *int64
|
||||||
|
NrAnonPages *int64
|
||||||
|
NrAnonTransparentHugepages *int64
|
||||||
|
NrActiveFile *int64
|
||||||
|
NrInactiveFile *int64
|
||||||
|
NrIsolatedFile *int64
|
||||||
|
NrFilePages *int64
|
||||||
|
NrSlabReclaimable *int64
|
||||||
|
NrSlabUnreclaimable *int64
|
||||||
|
NrMlockStack *int64
|
||||||
|
NrKernelStack *int64
|
||||||
|
NrMapped *int64
|
||||||
|
NrDirty *int64
|
||||||
|
NrWriteback *int64
|
||||||
|
NrUnevictable *int64
|
||||||
|
NrShmem *int64
|
||||||
|
NrDirtied *int64
|
||||||
|
NrWritten *int64
|
||||||
|
NumaHit *int64
|
||||||
|
NumaMiss *int64
|
||||||
|
NumaForeign *int64
|
||||||
|
NumaInterleave *int64
|
||||||
|
NumaLocal *int64
|
||||||
|
NumaOther *int64
|
||||||
|
Protection []*int64
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
||||||
|
|
||||||
|
// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
|
||||||
|
// structs containing the relevant info. More information available here:
|
||||||
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
|
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||||
|
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
|
||||||
|
}
|
||||||
|
zoneinfo, err := parseZoneinfo(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
|
||||||
|
}
|
||||||
|
return zoneinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
|
||||||
|
|
||||||
|
zoneinfo := []Zoneinfo{}
|
||||||
|
|
||||||
|
zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
|
||||||
|
for _, block := range zoneinfoBlocks {
|
||||||
|
var zoneinfoElement Zoneinfo
|
||||||
|
lines := strings.Split(string(block), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
|
||||||
|
if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
|
||||||
|
zoneinfoElement.Node = nodeZone[1]
|
||||||
|
zoneinfoElement.Zone = nodeZone[2]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
|
||||||
|
zoneinfoElement.Zone = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Fields(strings.TrimSpace(line))
|
||||||
|
if len(parts) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vp := util.NewValueParser(parts[1])
|
||||||
|
switch parts[0] {
|
||||||
|
case "nr_free_pages":
|
||||||
|
zoneinfoElement.NrFreePages = vp.PInt64()
|
||||||
|
case "min":
|
||||||
|
zoneinfoElement.Min = vp.PInt64()
|
||||||
|
case "low":
|
||||||
|
zoneinfoElement.Low = vp.PInt64()
|
||||||
|
case "high":
|
||||||
|
zoneinfoElement.High = vp.PInt64()
|
||||||
|
case "scanned":
|
||||||
|
zoneinfoElement.Scanned = vp.PInt64()
|
||||||
|
case "spanned":
|
||||||
|
zoneinfoElement.Spanned = vp.PInt64()
|
||||||
|
case "present":
|
||||||
|
zoneinfoElement.Present = vp.PInt64()
|
||||||
|
case "managed":
|
||||||
|
zoneinfoElement.Managed = vp.PInt64()
|
||||||
|
case "nr_active_anon":
|
||||||
|
zoneinfoElement.NrActiveAnon = vp.PInt64()
|
||||||
|
case "nr_inactive_anon":
|
||||||
|
zoneinfoElement.NrInactiveAnon = vp.PInt64()
|
||||||
|
case "nr_isolated_anon":
|
||||||
|
zoneinfoElement.NrIsolatedAnon = vp.PInt64()
|
||||||
|
case "nr_anon_pages":
|
||||||
|
zoneinfoElement.NrAnonPages = vp.PInt64()
|
||||||
|
case "nr_anon_transparent_hugepages":
|
||||||
|
zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
|
||||||
|
case "nr_active_file":
|
||||||
|
zoneinfoElement.NrActiveFile = vp.PInt64()
|
||||||
|
case "nr_inactive_file":
|
||||||
|
zoneinfoElement.NrInactiveFile = vp.PInt64()
|
||||||
|
case "nr_isolated_file":
|
||||||
|
zoneinfoElement.NrIsolatedFile = vp.PInt64()
|
||||||
|
case "nr_file_pages":
|
||||||
|
zoneinfoElement.NrFilePages = vp.PInt64()
|
||||||
|
case "nr_slab_reclaimable":
|
||||||
|
zoneinfoElement.NrSlabReclaimable = vp.PInt64()
|
||||||
|
case "nr_slab_unreclaimable":
|
||||||
|
zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
|
||||||
|
case "nr_mlock_stack":
|
||||||
|
zoneinfoElement.NrMlockStack = vp.PInt64()
|
||||||
|
case "nr_kernel_stack":
|
||||||
|
zoneinfoElement.NrKernelStack = vp.PInt64()
|
||||||
|
case "nr_mapped":
|
||||||
|
zoneinfoElement.NrMapped = vp.PInt64()
|
||||||
|
case "nr_dirty":
|
||||||
|
zoneinfoElement.NrDirty = vp.PInt64()
|
||||||
|
case "nr_writeback":
|
||||||
|
zoneinfoElement.NrWriteback = vp.PInt64()
|
||||||
|
case "nr_unevictable":
|
||||||
|
zoneinfoElement.NrUnevictable = vp.PInt64()
|
||||||
|
case "nr_shmem":
|
||||||
|
zoneinfoElement.NrShmem = vp.PInt64()
|
||||||
|
case "nr_dirtied":
|
||||||
|
zoneinfoElement.NrDirtied = vp.PInt64()
|
||||||
|
case "nr_written":
|
||||||
|
zoneinfoElement.NrWritten = vp.PInt64()
|
||||||
|
case "numa_hit":
|
||||||
|
zoneinfoElement.NumaHit = vp.PInt64()
|
||||||
|
case "numa_miss":
|
||||||
|
zoneinfoElement.NumaMiss = vp.PInt64()
|
||||||
|
case "numa_foreign":
|
||||||
|
zoneinfoElement.NumaForeign = vp.PInt64()
|
||||||
|
case "numa_interleave":
|
||||||
|
zoneinfoElement.NumaInterleave = vp.PInt64()
|
||||||
|
case "numa_local":
|
||||||
|
zoneinfoElement.NumaLocal = vp.PInt64()
|
||||||
|
case "numa_other":
|
||||||
|
zoneinfoElement.NumaOther = vp.PInt64()
|
||||||
|
case "protection:":
|
||||||
|
protectionParts := strings.Split(line, ":")
|
||||||
|
protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
|
||||||
|
protectionValues = strings.Replace(protectionValues, ")", "", 1)
|
||||||
|
protectionValues = strings.TrimSpace(protectionValues)
|
||||||
|
protectionStringMap := strings.Split(protectionValues, ", ")
|
||||||
|
val, err := util.ParsePInt64s(protectionStringMap)
|
||||||
|
if err == nil {
|
||||||
|
zoneinfoElement.Protection = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
zoneinfo = append(zoneinfo, zoneinfoElement)
|
||||||
|
}
|
||||||
|
return zoneinfo, nil
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user