Updating dependency github.com/google/cadvisor to version 6a8d614
Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
2
vendor/github.com/google/cadvisor/utils/cloudinfo/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cloudinfo/BUILD
generated
vendored
@@ -8,7 +8,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/github.com/google/cadvisor/utils/cloudinfo/aws/aws.go
generated
vendored
6
vendor/github.com/google/cadvisor/utils/cloudinfo/aws/aws.go
generated
vendored
@@ -57,7 +57,11 @@ func fileContainsAmazonIdentifier(filename string) bool {
|
||||
}
|
||||
|
||||
func getAwsMetadata(name string) string {
|
||||
client := ec2metadata.New(session.New(&aws.Config{}))
|
||||
sess, err := session.NewSession(&aws.Config{})
|
||||
if err != nil {
|
||||
return info.UnknownInstance
|
||||
}
|
||||
client := ec2metadata.New(sess)
|
||||
data, err := client.GetMetadata(name)
|
||||
if err != nil {
|
||||
return info.UnknownInstance
|
||||
|
14
vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
14
vendor/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go
generated
vendored
@@ -18,7 +18,7 @@ package cloudinfo
|
||||
|
||||
import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type CloudInfo interface {
|
||||
@@ -75,14 +75,14 @@ func NewRealCloudInfo() CloudInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *realCloudInfo) GetCloudProvider() info.CloudProvider {
|
||||
return self.cloudProvider
|
||||
func (i *realCloudInfo) GetCloudProvider() info.CloudProvider {
|
||||
return i.cloudProvider
|
||||
}
|
||||
|
||||
func (self *realCloudInfo) GetInstanceType() info.InstanceType {
|
||||
return self.instanceType
|
||||
func (i *realCloudInfo) GetInstanceType() info.InstanceType {
|
||||
return i.instanceType
|
||||
}
|
||||
|
||||
func (self *realCloudInfo) GetInstanceID() info.InstanceID {
|
||||
return self.instanceID
|
||||
func (i *realCloudInfo) GetInstanceID() info.InstanceID {
|
||||
return i.instanceID
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cloudinfo/gce/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cloudinfo/gce/BUILD
generated
vendored
@@ -10,7 +10,7 @@ go_library(
|
||||
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/cloudinfo:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cloudinfo/gce/gce.go
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cloudinfo/gce/gce.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/google/cadvisor/utils/cloudinfo"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/BUILD
generated
vendored
@@ -9,7 +9,7 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/cpuload/netlink:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"github.com/google/cadvisor/utils/cpuload/netlink"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type CpuLoadReader interface {
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/BUILD
generated
vendored
@@ -13,7 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
35
vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
35
vendor/github.com/google/cadvisor/utils/cpuload/netlink/conn.go
generated
vendored
@@ -55,41 +55,44 @@ func newConnection() (*Connection, error) {
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func (self *Connection) Read(b []byte) (n int, err error) {
|
||||
n, _, err = syscall.Recvfrom(self.fd, b, 0)
|
||||
func (c *Connection) Read(b []byte) (n int, err error) {
|
||||
n, _, err = syscall.Recvfrom(c.fd, b, 0)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (self *Connection) Write(b []byte) (n int, err error) {
|
||||
err = syscall.Sendto(self.fd, b, 0, &self.addr)
|
||||
func (c *Connection) Write(b []byte) (n int, err error) {
|
||||
err = syscall.Sendto(c.fd, b, 0, &c.addr)
|
||||
return len(b), err
|
||||
}
|
||||
|
||||
func (self *Connection) Close() error {
|
||||
return syscall.Close(self.fd)
|
||||
func (c *Connection) Close() error {
|
||||
return syscall.Close(c.fd)
|
||||
}
|
||||
|
||||
func (self *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
|
||||
func (c *Connection) WriteMessage(msg syscall.NetlinkMessage) error {
|
||||
w := bytes.NewBuffer(nil)
|
||||
msg.Header.Len = uint32(syscall.NLMSG_HDRLEN + len(msg.Data))
|
||||
msg.Header.Seq = self.seq
|
||||
self.seq++
|
||||
msg.Header.Pid = self.pid
|
||||
binary.Write(w, binary.LittleEndian, msg.Header)
|
||||
_, err := w.Write(msg.Data)
|
||||
msg.Header.Seq = c.seq
|
||||
c.seq++
|
||||
msg.Header.Pid = c.pid
|
||||
err := binary.Write(w, binary.LittleEndian, msg.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = self.Write(w.Bytes())
|
||||
_, err = w.Write(msg.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.Write(w.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
|
||||
err = binary.Read(self.rbuf, binary.LittleEndian, &msg.Header)
|
||||
func (c *Connection) ReadMessage() (msg syscall.NetlinkMessage, err error) {
|
||||
err = binary.Read(c.rbuf, binary.LittleEndian, &msg.Header)
|
||||
if err != nil {
|
||||
return msg, err
|
||||
}
|
||||
msg.Data = make([]byte, msg.Header.Len-syscall.NLMSG_HDRLEN)
|
||||
_, err = self.rbuf.Read(msg.Data)
|
||||
_, err = c.rbuf.Read(msg.Data)
|
||||
return msg, err
|
||||
}
|
||||
|
23
vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
23
vendor/github.com/google/cadvisor/utils/cpuload/netlink/netlink.go
generated
vendored
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
var (
|
||||
// TODO(rjnagal): Verify and fix for other architectures.
|
||||
|
||||
Endian = binary.LittleEndian
|
||||
)
|
||||
|
||||
@@ -42,11 +43,11 @@ type netlinkMessage struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (self netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = self.Header
|
||||
func (m netlinkMessage) toRawMsg() (rawmsg syscall.NetlinkMessage) {
|
||||
rawmsg.Header = m.Header
|
||||
w := bytes.NewBuffer([]byte{})
|
||||
binary.Write(w, Endian, self.GenHeader)
|
||||
w.Write(self.Data)
|
||||
binary.Write(w, Endian, m.GenHeader)
|
||||
w.Write(m.Data)
|
||||
rawmsg.Data = w.Bytes()
|
||||
return rawmsg
|
||||
}
|
||||
@@ -64,9 +65,12 @@ func padding(size int, alignment int) int {
|
||||
}
|
||||
|
||||
// Get family id for taskstats subsystem.
|
||||
func getFamilyId(conn *Connection) (uint16, error) {
|
||||
func getFamilyID(conn *Connection) (uint16, error) {
|
||||
msg := prepareFamilyMessage()
|
||||
conn.WriteMessage(msg.toRawMsg())
|
||||
err := conn.WriteMessage(msg.toRawMsg())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
@@ -164,7 +168,7 @@ func parseFamilyResp(msg syscall.NetlinkMessage) (uint16, error) {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("family id not found in the response.")
|
||||
return 0, fmt.Errorf("family id not found in the response")
|
||||
}
|
||||
|
||||
// Extract task stats from response returned by kernel.
|
||||
@@ -203,7 +207,10 @@ func verifyHeader(msg syscall.NetlinkMessage) error {
|
||||
case syscall.NLMSG_ERROR:
|
||||
buf := bytes.NewBuffer(msg.Data)
|
||||
var errno int32
|
||||
binary.Read(buf, Endian, errno)
|
||||
err := binary.Read(buf, Endian, errno)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("netlink request failed with error %s", syscall.Errno(-errno))
|
||||
}
|
||||
return nil
|
||||
|
22
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
22
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
@@ -20,11 +20,11 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type NetlinkReader struct {
|
||||
familyId uint16
|
||||
familyID uint16
|
||||
conn *Connection
|
||||
}
|
||||
|
||||
@@ -34,24 +34,24 @@ func New() (*NetlinkReader, error) {
|
||||
return nil, fmt.Errorf("failed to create a new connection: %s", err)
|
||||
}
|
||||
|
||||
id, err := getFamilyId(conn)
|
||||
id, err := getFamilyID(conn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get netlink family id for task stats: %s", err)
|
||||
}
|
||||
klog.V(4).Infof("Family id for taskstats: %d", id)
|
||||
return &NetlinkReader{
|
||||
familyId: id,
|
||||
familyID: id,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (self *NetlinkReader) Stop() {
|
||||
if self.conn != nil {
|
||||
self.conn.Close()
|
||||
func (r *NetlinkReader) Stop() {
|
||||
if r.conn != nil {
|
||||
r.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (self *NetlinkReader) Start() error {
|
||||
func (r *NetlinkReader) Start() error {
|
||||
// We do the start setup for netlink in New(). Nothing to do here.
|
||||
return nil
|
||||
}
|
||||
@@ -60,9 +60,9 @@ func (self *NetlinkReader) Start() error {
|
||||
// Caller can use historical data to calculate cpu load.
|
||||
// path is an absolute filesystem path for a container under the CPU cgroup hierarchy.
|
||||
// NOTE: non-hierarchical load is returned. It does not include load for subcontainers.
|
||||
func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
|
||||
func (r *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats, error) {
|
||||
if len(path) == 0 {
|
||||
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty!")
|
||||
return info.LoadStats{}, fmt.Errorf("cgroup path can not be empty")
|
||||
}
|
||||
|
||||
cfd, err := os.Open(path)
|
||||
@@ -71,7 +71,7 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
stats, err := getLoadStats(self.familyId, cfd, self.conn)
|
||||
stats, err := getLoadStats(r.familyID, cfd, r.conn)
|
||||
if err != nil {
|
||||
return info.LoadStats{}, err
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/utils/oomparser/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/utils/oomparser/BUILD
generated
vendored
@@ -8,7 +8,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/euank/go-kmsg-parser/kmsgparser:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
19
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
19
vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/euank/go-kmsg-parser/kmsgparser"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -107,23 +107,20 @@ func getProcessNamePid(line string, currentOomInstance *OomInstance) (bool, erro
|
||||
|
||||
// uses regex to see if line is the start of a kernel oom log
|
||||
func checkIfStartOfOomMessages(line string) bool {
|
||||
potential_oom_start := firstLineRegexp.MatchString(line)
|
||||
if potential_oom_start {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
potentialOomStart := firstLineRegexp.MatchString(line)
|
||||
return potentialOomStart
|
||||
}
|
||||
|
||||
// StreamOoms writes to a provided a stream of OomInstance objects representing
|
||||
// OOM events that are found in the logs.
|
||||
// It will block and should be called from a goroutine.
|
||||
func (self *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
kmsgEntries := self.parser.Parse()
|
||||
defer self.parser.Close()
|
||||
func (p *OomParser) StreamOoms(outStream chan<- *OomInstance) {
|
||||
kmsgEntries := p.parser.Parse()
|
||||
defer p.parser.Close()
|
||||
|
||||
for msg := range kmsgEntries {
|
||||
in_oom_kernel_log := checkIfStartOfOomMessages(msg.Message)
|
||||
if in_oom_kernel_log {
|
||||
isOomMessage := checkIfStartOfOomMessages(msg.Message)
|
||||
if isOomMessage {
|
||||
oomCurrentInstance := &OomInstance{
|
||||
ContainerName: "/",
|
||||
VictimContainerName: "/",
|
||||
|
63
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
63
vendor/github.com/google/cadvisor/utils/sysfs/sysfs.go
generated
vendored
@@ -32,9 +32,9 @@ const (
|
||||
ppcDevTree = "/proc/device-tree"
|
||||
s390xDevTree = "/etc" // s390/s390x changes
|
||||
|
||||
hugePagesDirName = "hugepages"
|
||||
coreIDFilePath = "/topology/core_id"
|
||||
meminfoFile = "meminfo"
|
||||
coreIDFilePath = "/topology/core_id"
|
||||
packageIDFilePath = "/topology/physical_package_id"
|
||||
meminfoFile = "meminfo"
|
||||
|
||||
cpuDirPattern = "cpu*[0-9]"
|
||||
nodeDirPattern = "node*[0-9]"
|
||||
@@ -62,10 +62,12 @@ type CacheInfo struct {
|
||||
type SysFs interface {
|
||||
// Get NUMA nodes paths
|
||||
GetNodesPaths() ([]string, error)
|
||||
// Get paths to CPU assigned for specified NUMA node
|
||||
GetCPUsPaths(nodePath string) ([]string, error)
|
||||
// Get paths to CPUs in provided directory e.g. /sys/devices/system/node/node0 or /sys/devices/system/cpu
|
||||
GetCPUsPaths(cpusPath string) ([]string, error)
|
||||
// Get physical core id for specified CPU
|
||||
GetCoreID(coreIDFilePath string) (string, error)
|
||||
// Get physical package id for specified CPU
|
||||
GetCPUPhysicalPackageID(cpuPath string) (string, error)
|
||||
// Get total memory for specified NUMA node
|
||||
GetMemInfo(nodeDir string) (string, error)
|
||||
// Get hugepages from specified directory
|
||||
@@ -101,17 +103,17 @@ func NewRealSysFs() SysFs {
|
||||
return &realSysFs{}
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNodesPaths() ([]string, error) {
|
||||
func (fs *realSysFs) GetNodesPaths() ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s%s", nodeDir, nodeDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetCPUsPaths(nodePath string) ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s/%s", nodePath, cpuDirPattern)
|
||||
func (fs *realSysFs) GetCPUsPaths(cpusPath string) ([]string, error) {
|
||||
pathPattern := fmt.Sprintf("%s/%s", cpusPath, cpuDirPattern)
|
||||
return filepath.Glob(pathPattern)
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetCoreID(cpuPath string) (string, error) {
|
||||
func (fs *realSysFs) GetCoreID(cpuPath string) (string, error) {
|
||||
coreIDFilePath := fmt.Sprintf("%s%s", cpuPath, coreIDFilePath)
|
||||
coreID, err := ioutil.ReadFile(coreIDFilePath)
|
||||
if err != nil {
|
||||
@@ -120,7 +122,16 @@ func (self *realSysFs) GetCoreID(cpuPath string) (string, error) {
|
||||
return strings.TrimSpace(string(coreID)), err
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetMemInfo(nodePath string) (string, error) {
|
||||
func (fs *realSysFs) GetCPUPhysicalPackageID(cpuPath string) (string, error) {
|
||||
packageIDFilePath := fmt.Sprintf("%s%s", cpuPath, packageIDFilePath)
|
||||
packageID, err := ioutil.ReadFile(packageIDFilePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(packageID)), err
|
||||
}
|
||||
|
||||
func (fs *realSysFs) GetMemInfo(nodePath string) (string, error) {
|
||||
meminfoPath := fmt.Sprintf("%s/%s", nodePath, meminfoFile)
|
||||
meminfo, err := ioutil.ReadFile(meminfoPath)
|
||||
if err != nil {
|
||||
@@ -129,11 +140,11 @@ func (self *realSysFs) GetMemInfo(nodePath string) (string, error) {
|
||||
return strings.TrimSpace(string(meminfo)), err
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
|
||||
func (fs *realSysFs) GetHugePagesInfo(hugePagesDirectory string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(hugePagesDirectory)
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
|
||||
func (fs *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName string) (string, error) {
|
||||
hugePageFilePath := fmt.Sprintf("%s%s/%s", hugepagesDirectory, hugePageName, HugePagesNrFile)
|
||||
hugePageFile, err := ioutil.ReadFile(hugePageFilePath)
|
||||
if err != nil {
|
||||
@@ -142,11 +153,11 @@ func (self *realSysFs) GetHugePagesNr(hugepagesDirectory string, hugePageName st
|
||||
return strings.TrimSpace(string(hugePageFile)), err
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
|
||||
func (fs *realSysFs) GetBlockDevices() ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(blockDir)
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
|
||||
func (fs *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
|
||||
dev, err := ioutil.ReadFile(path.Join(blockDir, name, "/dev"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -154,7 +165,7 @@ func (self *realSysFs) GetBlockDeviceNumbers(name string) (string, error) {
|
||||
return string(dev), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
|
||||
func (fs *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
|
||||
sched, err := ioutil.ReadFile(path.Join(blockDir, name, "/queue/scheduler"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -162,7 +173,7 @@ func (self *realSysFs) GetBlockDeviceScheduler(name string) (string, error) {
|
||||
return string(sched), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetBlockDeviceSize(name string) (string, error) {
|
||||
func (fs *realSysFs) GetBlockDeviceSize(name string) (string, error) {
|
||||
size, err := ioutil.ReadFile(path.Join(blockDir, name, "/size"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -170,7 +181,7 @@ func (self *realSysFs) GetBlockDeviceSize(name string) (string, error) {
|
||||
return string(size), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
|
||||
func (fs *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
|
||||
files, err := ioutil.ReadDir(netDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -192,7 +203,7 @@ func (self *realSysFs) GetNetworkDevices() ([]os.FileInfo, error) {
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNetworkAddress(name string) (string, error) {
|
||||
func (fs *realSysFs) GetNetworkAddress(name string) (string, error) {
|
||||
address, err := ioutil.ReadFile(path.Join(netDir, name, "/address"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -200,7 +211,7 @@ func (self *realSysFs) GetNetworkAddress(name string) (string, error) {
|
||||
return string(address), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNetworkMtu(name string) (string, error) {
|
||||
func (fs *realSysFs) GetNetworkMtu(name string) (string, error) {
|
||||
mtu, err := ioutil.ReadFile(path.Join(netDir, name, "/mtu"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -208,7 +219,7 @@ func (self *realSysFs) GetNetworkMtu(name string) (string, error) {
|
||||
return string(mtu), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNetworkSpeed(name string) (string, error) {
|
||||
func (fs *realSysFs) GetNetworkSpeed(name string) (string, error) {
|
||||
speed, err := ioutil.ReadFile(path.Join(netDir, name, "/speed"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -216,7 +227,7 @@ func (self *realSysFs) GetNetworkSpeed(name string) (string, error) {
|
||||
return string(speed), nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
|
||||
func (fs *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, error) {
|
||||
statPath := path.Join(netDir, dev, "/statistics", stat)
|
||||
out, err := ioutil.ReadFile(statPath)
|
||||
if err != nil {
|
||||
@@ -230,7 +241,7 @@ func (self *realSysFs) GetNetworkStatValue(dev string, stat string) (uint64, err
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
|
||||
func (fs *realSysFs) GetCaches(id int) ([]os.FileInfo, error) {
|
||||
cpuPath := fmt.Sprintf("%s%d/cache", cacheDir, id)
|
||||
return ioutil.ReadDir(cpuPath)
|
||||
}
|
||||
@@ -245,7 +256,7 @@ func bitCount(i uint64) (count int) {
|
||||
return
|
||||
}
|
||||
|
||||
func getCpuCount(cache string) (count int, err error) {
|
||||
func getCPUCount(cache string) (count int, err error) {
|
||||
out, err := ioutil.ReadFile(path.Join(cache, "/shared_cpu_map"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -262,7 +273,7 @@ func getCpuCount(cache string) (count int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
|
||||
func (fs *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
|
||||
cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, id, name)
|
||||
out, err := ioutil.ReadFile(path.Join(cachePath, "/size"))
|
||||
if err != nil {
|
||||
@@ -290,7 +301,7 @@ func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
cacheType := strings.TrimSpace(string(out))
|
||||
cpuCount, err := getCpuCount(cachePath)
|
||||
cpuCount, err := getCPUCount(cachePath)
|
||||
if err != nil {
|
||||
return CacheInfo{}, err
|
||||
}
|
||||
@@ -302,7 +313,7 @@ func (self *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (self *realSysFs) GetSystemUUID() (string, error) {
|
||||
func (fs *realSysFs) GetSystemUUID() (string, error) {
|
||||
if id, err := ioutil.ReadFile(path.Join(dmiDir, "id", "product_uuid")); err == nil {
|
||||
return strings.TrimSpace(string(id)), nil
|
||||
} else if id, err = ioutil.ReadFile(path.Join(ppcDevTree, "system-id")); err == nil {
|
||||
|
3
vendor/github.com/google/cadvisor/utils/sysinfo/BUILD
generated
vendored
3
vendor/github.com/google/cadvisor/utils/sysinfo/BUILD
generated
vendored
@@ -9,8 +9,7 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
145
vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
145
vendor/github.com/google/cadvisor/utils/sysinfo/sysinfo.go
generated
vendored
@@ -22,16 +22,17 @@ import (
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
schedulerRegExp = regexp.MustCompile(`.*\[(.*)\].*`)
|
||||
nodeDirRegExp = regexp.MustCompile("node/node(\\d*)")
|
||||
cpuDirRegExp = regexp.MustCompile("/cpu(\\d*)")
|
||||
nodeDirRegExp = regexp.MustCompile(`node/node(\d*)`)
|
||||
cpuDirRegExp = regexp.MustCompile(`/cpu(\d+)`)
|
||||
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
|
||||
|
||||
cpusPath = "/sys/devices/system/cpu"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -55,14 +56,14 @@ func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
|
||||
if strings.HasPrefix(name, "loop") || strings.HasPrefix(name, "ram") || strings.HasPrefix(name, "sr") {
|
||||
continue
|
||||
}
|
||||
disk_info := info.DiskInfo{
|
||||
diskInfo := info.DiskInfo{
|
||||
Name: name,
|
||||
}
|
||||
dev, err := sysfs.GetBlockDeviceNumbers(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, err := fmt.Sscanf(dev, "%d:%d", &disk_info.Major, &disk_info.Minor)
|
||||
n, err := fmt.Sscanf(dev, "%d:%d", &diskInfo.Major, &diskInfo.Minor)
|
||||
if err != nil || n != 2 {
|
||||
return nil, fmt.Errorf("could not parse device numbers from %s for device %s", dev, name)
|
||||
}
|
||||
@@ -76,18 +77,18 @@ func GetBlockDeviceInfo(sysfs sysfs.SysFs) (map[string]info.DiskInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
// size is in 512 bytes blocks.
|
||||
disk_info.Size = size * 512
|
||||
diskInfo.Size = size * 512
|
||||
|
||||
disk_info.Scheduler = "none"
|
||||
diskInfo.Scheduler = "none"
|
||||
blkSched, err := sysfs.GetBlockDeviceScheduler(name)
|
||||
if err == nil {
|
||||
matches := schedulerRegExp.FindSubmatch([]byte(blkSched))
|
||||
if len(matches) >= 2 {
|
||||
disk_info.Scheduler = string(matches[1])
|
||||
diskInfo.Scheduler = string(matches[1])
|
||||
}
|
||||
}
|
||||
device := fmt.Sprintf("%d:%d", disk_info.Major, disk_info.Minor)
|
||||
diskMap[device] = disk_info
|
||||
device := fmt.Sprintf("%d:%d", diskInfo.Major, diskInfo.Minor)
|
||||
diskMap[device] = diskInfo
|
||||
}
|
||||
return diskMap, nil
|
||||
}
|
||||
@@ -193,28 +194,40 @@ func GetNodesInfo(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
allLogicalCoresCount := 0
|
||||
|
||||
nodesDirs, err := sysFs.GetNodesPaths()
|
||||
if err != nil || len(nodesDirs) == 0 {
|
||||
if len(nodesDirs) == 0 && err == nil {
|
||||
//sysFs.GetNodesPaths uses filePath.Glob which does not return any error if pattern does not match anything
|
||||
err = fmt.Errorf("Any path to specific node is not found")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if len(nodesDirs) == 0 {
|
||||
klog.Warningf("Nodes topology is not available, providing CPU topology")
|
||||
return getCPUTopology(sysFs)
|
||||
}
|
||||
|
||||
for _, nodeDir := range nodesDirs {
|
||||
id, err := getMatchedInt(nodeDirRegExp, nodeDir)
|
||||
node := info.Node{Id: id}
|
||||
|
||||
cores, logicalCoreCount, err := getCoresInfo(sysFs, nodeDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
node := info.Node{Id: id}
|
||||
|
||||
allLogicalCoresCount += logicalCoreCount
|
||||
cpuDirs, err := sysFs.GetCPUsPaths(nodeDir)
|
||||
if len(cpuDirs) == 0 {
|
||||
klog.Warningf("Found node without any CPU, nodeDir: %s, number of cpuDirs %d, err: %v", nodeDir, len(cpuDirs), err)
|
||||
} else {
|
||||
cores, err := getCoresInfo(sysFs, cpuDirs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
}
|
||||
|
||||
allLogicalCoresCount += len(cpuDirs)
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
klog.Warningf("Found node without cache information, nodeDir: %s", nodeDir)
|
||||
}
|
||||
|
||||
node.Memory, err = getNodeMemInfo(sysFs, nodeDir)
|
||||
@@ -233,6 +246,68 @@ func GetNodesInfo(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
return nodes, allLogicalCoresCount, err
|
||||
}
|
||||
|
||||
func getCPUTopology(sysFs sysfs.SysFs) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
|
||||
cpusPaths, err := sysFs.GetCPUsPaths(cpusPath)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
cpusCount := len(cpusPaths)
|
||||
|
||||
if cpusCount == 0 {
|
||||
err = fmt.Errorf("Any CPU is not available, cpusPath: %s", cpusPath)
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
cpusByPhysicalPackageID, err := getCpusByPhysicalPackageID(sysFs, cpusPaths)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for physicalPackageID, cpus := range cpusByPhysicalPackageID {
|
||||
node := info.Node{Id: physicalPackageID}
|
||||
|
||||
cores, err := getCoresInfo(sysFs, cpus)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
node.Cores = cores
|
||||
|
||||
// On some Linux platforms(such as Arm64 guest kernel), cache info may not exist.
|
||||
// So, we should ignore error here.
|
||||
err = addCacheInfo(sysFs, &node)
|
||||
if err != nil {
|
||||
klog.Warningf("Found cpu without cache information, cpuPath: %s", cpus)
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes, cpusCount, nil
|
||||
}
|
||||
|
||||
func getCpusByPhysicalPackageID(sysFs sysfs.SysFs, cpusPaths []string) (map[int][]string, error) {
|
||||
cpuPathsByPhysicalPackageID := make(map[int][]string)
|
||||
for _, cpuPath := range cpusPaths {
|
||||
|
||||
rawPhysicalPackageID, err := sysFs.GetCPUPhysicalPackageID(cpuPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
physicalPackageID, err := strconv.Atoi(rawPhysicalPackageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := cpuPathsByPhysicalPackageID[physicalPackageID]; !ok {
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = make([]string, 0)
|
||||
}
|
||||
|
||||
cpuPathsByPhysicalPackageID[physicalPackageID] = append(cpuPathsByPhysicalPackageID[physicalPackageID], cpuPath)
|
||||
}
|
||||
return cpuPathsByPhysicalPackageID, nil
|
||||
}
|
||||
|
||||
// addCacheInfo adds information about cache for NUMA node
|
||||
func addCacheInfo(sysFs sysfs.SysFs, node *info.Node) error {
|
||||
for coreID, core := range node.Cores {
|
||||
@@ -255,7 +330,7 @@ func addCacheInfo(sysFs sysfs.SysFs, node *info.Node) error {
|
||||
// Add a node-level cache.
|
||||
cacheFound := false
|
||||
for _, nodeCache := range node.Caches {
|
||||
if cmp.Equal(nodeCache, c) {
|
||||
if nodeCache == c {
|
||||
cacheFound = true
|
||||
}
|
||||
}
|
||||
@@ -292,28 +367,22 @@ func getNodeMemInfo(sysFs sysfs.SysFs, nodeDir string) (uint64, error) {
|
||||
return uint64(memory), nil
|
||||
}
|
||||
|
||||
// getCoresInfo retruns infromation about physical and logical cores assigned to NUMA node
|
||||
func getCoresInfo(sysFs sysfs.SysFs, nodeDir string) ([]info.Core, int, error) {
|
||||
cpuDirs, err := sysFs.GetCPUsPaths(nodeDir)
|
||||
if err != nil || len(cpuDirs) == 0 {
|
||||
klog.Warningf("Found node without any CPU, nodeDir: %s, number of cpuDirs %d, err: %v", nodeDir, len(cpuDirs), err)
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
// getCoresInfo retruns infromation about physical cores
|
||||
func getCoresInfo(sysFs sysfs.SysFs, cpuDirs []string) ([]info.Core, error) {
|
||||
cores := make([]info.Core, 0, len(cpuDirs))
|
||||
for _, cpuDir := range cpuDirs {
|
||||
cpuID, err := getMatchedInt(cpuDirRegExp, cpuDir)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("Unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
|
||||
return nil, fmt.Errorf("Unexpected format of CPU directory, cpuDirRegExp %s, cpuDir: %s", cpuDirRegExp, cpuDir)
|
||||
}
|
||||
|
||||
rawPhysicalID, err := sysFs.GetCoreID(cpuDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
physicalID, err := strconv.Atoi(rawPhysicalID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
coreIDx := -1
|
||||
@@ -335,7 +404,7 @@ func getCoresInfo(sysFs sysfs.SysFs, nodeDir string) ([]info.Core, int, error) {
|
||||
desiredCore.Threads = append(desiredCore.Threads, cpuID)
|
||||
}
|
||||
}
|
||||
return cores, len(cpuDirs), nil
|
||||
return cores, nil
|
||||
}
|
||||
|
||||
// GetCacheInfo return information about a cache accessible from the given cpu thread
|
||||
@@ -359,12 +428,6 @@ func GetCacheInfo(sysFs sysfs.SysFs, id int) ([]sysfs.CacheInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func GetNetworkStats(name string) (info.InterfaceStats, error) {
|
||||
// TODO(rjnagal): Take syfs as an argument.
|
||||
sysFs := sysfs.NewRealSysFs()
|
||||
return getNetworkStats(name, sysFs)
|
||||
}
|
||||
|
||||
func getNetworkStats(name string, sysFs sysfs.SysFs) (info.InterfaceStats, error) {
|
||||
var stats info.InterfaceStats
|
||||
var err error
|
||||
|
62
vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
62
vendor/github.com/google/cadvisor/utils/timed_store.go
generated
vendored
@@ -57,60 +57,60 @@ func NewTimedStore(age time.Duration, maxItems int) *TimedStore {
|
||||
}
|
||||
|
||||
// Adds an element to the start of the buffer (removing one from the end if necessary).
|
||||
func (self *TimedStore) Add(timestamp time.Time, item interface{}) {
|
||||
func (s *TimedStore) Add(timestamp time.Time, item interface{}) {
|
||||
data := timedStoreData{
|
||||
timestamp: timestamp,
|
||||
data: item,
|
||||
}
|
||||
// Common case: data is added in order.
|
||||
if len(self.buffer) == 0 || !timestamp.Before(self.buffer[len(self.buffer)-1].timestamp) {
|
||||
self.buffer = append(self.buffer, data)
|
||||
if len(s.buffer) == 0 || !timestamp.Before(s.buffer[len(s.buffer)-1].timestamp) {
|
||||
s.buffer = append(s.buffer, data)
|
||||
} else {
|
||||
// Data is out of order; insert it in the correct position.
|
||||
index := sort.Search(len(self.buffer), func(index int) bool {
|
||||
return self.buffer[index].timestamp.After(timestamp)
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(timestamp)
|
||||
})
|
||||
self.buffer = append(self.buffer, timedStoreData{}) // Make room to shift the elements
|
||||
copy(self.buffer[index+1:], self.buffer[index:]) // Shift the elements over
|
||||
self.buffer[index] = data
|
||||
s.buffer = append(s.buffer, timedStoreData{}) // Make room to shift the elements
|
||||
copy(s.buffer[index+1:], s.buffer[index:]) // Shift the elements over
|
||||
s.buffer[index] = data
|
||||
}
|
||||
|
||||
// Remove any elements before eviction time.
|
||||
// TODO(rjnagal): This is assuming that the added entry has timestamp close to now.
|
||||
evictTime := timestamp.Add(-self.age)
|
||||
index := sort.Search(len(self.buffer), func(index int) bool {
|
||||
return self.buffer[index].timestamp.After(evictTime)
|
||||
evictTime := timestamp.Add(-s.age)
|
||||
index := sort.Search(len(s.buffer), func(index int) bool {
|
||||
return s.buffer[index].timestamp.After(evictTime)
|
||||
})
|
||||
if index < len(self.buffer) {
|
||||
self.buffer = self.buffer[index:]
|
||||
if index < len(s.buffer) {
|
||||
s.buffer = s.buffer[index:]
|
||||
}
|
||||
|
||||
// Remove any elements if over our max size.
|
||||
if self.maxItems >= 0 && len(self.buffer) > self.maxItems {
|
||||
startIndex := len(self.buffer) - self.maxItems
|
||||
self.buffer = self.buffer[startIndex:]
|
||||
if s.maxItems >= 0 && len(s.buffer) > s.maxItems {
|
||||
startIndex := len(s.buffer) - s.maxItems
|
||||
s.buffer = s.buffer[startIndex:]
|
||||
}
|
||||
}
|
||||
|
||||
// Returns up to maxResult elements in the specified time period (inclusive).
|
||||
// Results are from first to last. maxResults of -1 means no limit.
|
||||
func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
|
||||
func (s *TimedStore) InTimeRange(start, end time.Time, maxResults int) []interface{} {
|
||||
// No stats, return empty.
|
||||
if len(self.buffer) == 0 {
|
||||
if len(s.buffer) == 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
var startIndex int
|
||||
if start.IsZero() {
|
||||
// None specified, start at the beginning.
|
||||
startIndex = len(self.buffer) - 1
|
||||
startIndex = len(s.buffer) - 1
|
||||
} else {
|
||||
// Start is the index before the elements smaller than it. We do this by
|
||||
// finding the first element smaller than start and taking the index
|
||||
// before that element
|
||||
startIndex = sort.Search(len(self.buffer), func(index int) bool {
|
||||
startIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] < start
|
||||
return self.getData(index).timestamp.Before(start)
|
||||
return s.getData(index).timestamp.Before(start)
|
||||
}) - 1
|
||||
// Check if start is after all the data we have.
|
||||
if startIndex < 0 {
|
||||
@@ -124,12 +124,12 @@ func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []inte
|
||||
endIndex = 0
|
||||
} else {
|
||||
// End is the first index smaller than or equal to it (so, not larger).
|
||||
endIndex = sort.Search(len(self.buffer), func(index int) bool {
|
||||
endIndex = sort.Search(len(s.buffer), func(index int) bool {
|
||||
// buffer[index] <= t -> !(buffer[index] > t)
|
||||
return !self.getData(index).timestamp.After(end)
|
||||
return !s.getData(index).timestamp.After(end)
|
||||
})
|
||||
// Check if end is before all the data we have.
|
||||
if endIndex == len(self.buffer) {
|
||||
if endIndex == len(s.buffer) {
|
||||
return []interface{}{}
|
||||
}
|
||||
}
|
||||
@@ -144,21 +144,21 @@ func (self *TimedStore) InTimeRange(start, end time.Time, maxResults int) []inte
|
||||
// Return in sorted timestamp order so from the "back" to "front".
|
||||
result := make([]interface{}, numResults)
|
||||
for i := 0; i < numResults; i++ {
|
||||
result[i] = self.Get(startIndex - i)
|
||||
result[i] = s.Get(startIndex - i)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Gets the element at the specified index. Note that elements are output in LIFO order.
|
||||
func (self *TimedStore) Get(index int) interface{} {
|
||||
return self.getData(index).data
|
||||
func (s *TimedStore) Get(index int) interface{} {
|
||||
return s.getData(index).data
|
||||
}
|
||||
|
||||
// Gets the data at the specified index. Note that elements are output in LIFO order.
|
||||
func (self *TimedStore) getData(index int) timedStoreData {
|
||||
return self.buffer[len(self.buffer)-index-1]
|
||||
func (s *TimedStore) getData(index int) timedStoreData {
|
||||
return s.buffer[len(s.buffer)-index-1]
|
||||
}
|
||||
|
||||
func (self *TimedStore) Size() int {
|
||||
return len(self.buffer)
|
||||
func (s *TimedStore) Size() int {
|
||||
return len(s.buffer)
|
||||
}
|
||||
|
Reference in New Issue
Block a user