chore: update cgroups and ttrpc versions
- update github.com/containerd/cgroups to v1.1.0 - update github.com/containerd/ttrpc to v1.2.1 Signed-off-by: Akhil Mohan <akhilerm@gmail.com>
This commit is contained in:
48
vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
48
vendor/github.com/cilium/ebpf/internal/io.go
generated
vendored
@@ -1,6 +1,35 @@
|
||||
package internal
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
|
||||
// buffered reader. It is a convenience function for reading subsections of
|
||||
// ELF sections while minimizing the amount of read() syscalls made.
|
||||
//
|
||||
// Syscall overhead is non-negligible in continuous integration context
|
||||
// where ELFs might be accessed over virtual filesystems with poor random
|
||||
// access performance. Buffering reads makes sense because (sub)sections
|
||||
// end up being read completely anyway.
|
||||
//
|
||||
// Use instead of the r.Seek() + io.LimitReader() pattern.
|
||||
func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
|
||||
// Clamp the size of the buffer to one page to avoid slurping large parts
|
||||
// of a file into memory. bufio.NewReader uses a hardcoded default buffer
|
||||
// of 4096. Allow arches with larger pages to allocate more, but don't
|
||||
// allocate a fixed 4k buffer if we only need to read a small segment.
|
||||
buf := n
|
||||
if ps := int64(os.Getpagesize()); n > ps {
|
||||
buf = ps
|
||||
}
|
||||
|
||||
return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf))
|
||||
}
|
||||
|
||||
// DiscardZeroes makes sure that all written bytes are zero
|
||||
// before discarding them.
|
||||
@@ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) {
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// ReadAllCompressed decompresses a gzipped file into memory.
|
||||
func ReadAllCompressed(file string) ([]byte, error) {
|
||||
fh, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
gz, err := gzip.NewReader(fh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gz.Close()
|
||||
|
||||
return io.ReadAll(gz)
|
||||
}
|
||||
|
Reference in New Issue
Block a user