build(deps): bump github.com/containerd/cgroups/v3 from 3.0.2 to 3.0.3

Bumps [github.com/containerd/cgroups/v3](https://github.com/containerd/cgroups) from 3.0.2 to 3.0.3.
- [Release notes](https://github.com/containerd/cgroups/releases)
- [Commits](https://github.com/containerd/cgroups/compare/v3.0.2...v3.0.3)

---
updated-dependencies:
- dependency-name: github.com/containerd/cgroups/v3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-29 11:45:53 +00:00
committed by GitHub
parent 1f76ca4081
commit 5387747e92
119 changed files with 8743 additions and 3476 deletions

View File

@@ -2,7 +2,6 @@ package btf
import (
"bufio"
"bytes"
"debug/elf"
"encoding/binary"
"errors"
@@ -11,6 +10,7 @@ import (
"math"
"os"
"reflect"
"sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
@@ -21,34 +21,41 @@ const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrMultipleMatches = errors.New("multiple matching types")
)
// ID represents the unique ID of a BTF object.
type ID = sys.BTFID
// Spec represents decoded BTF.
// Spec allows querying a set of Types and loading the set into the
// kernel.
type Spec struct {
// Data from .BTF.
rawTypes []rawType
strings *stringTable
// All types contained by the spec. For the base type, the position of
// a type in the slice is its ID.
types types
// All types contained by the spec, not including types from the base in
// case the spec was parsed from split BTF.
types []Type
// Type IDs indexed by type.
typeIDs map[Type]TypeID
// The ID of the first type in types.
firstTypeID TypeID
// Types indexed by essential name.
// Includes all struct flavors and types with the same name.
namedTypes map[essentialName][]Type
// String table from ELF, may be nil.
strings *stringTable
// Byte order of the ELF we decoded the spec from, may be nil.
byteOrder binary.ByteOrder
}
var btfHeaderLen = binary.Size(&btfHeader{})
type btfHeader struct {
Magic uint16
Version uint8
@@ -73,6 +80,18 @@ func (h *btfHeader) stringStart() int64 {
return int64(h.HdrLen + h.StringOff)
}
// newSpec creates a Spec containing only Void.
func newSpec() *Spec {
return &Spec{
[]Type{(*Void)(nil)},
map[Type]TypeID{(*Void)(nil): 0},
0,
make(map[essentialName][]Type),
nil,
nil,
}
}
// LoadSpec opens file and calls LoadSpecFromReader on it.
func LoadSpec(file string) (*Spec, error) {
fh, err := os.Open(file)
@@ -92,10 +111,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
// Try to parse a naked BTF blob. This will return an error if
// we encounter a Datasec, since we can't fix it up.
spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
return spec, err
return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
}
return nil, err
@@ -106,7 +122,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
// ExtInfos may be nil if the ELF doesn't contain section metadta.
// ExtInfos may be nil if the ELF doesn't contain section metadata.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
@@ -119,7 +135,7 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return nil, nil, err
}
extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
extInfos, err := loadExtInfosFromELF(file, spec)
if err != nil && !errors.Is(err, ErrNotFound) {
return nil, nil, err
}
@@ -127,40 +143,40 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return spec, extInfos, nil
}
// variableOffsets extracts all symbols offsets from an ELF and indexes them by
// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
// References to variables in BTF data sections carry unsigned 32-bit offsets.
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
// beyond this range. Since these symbols cannot be described by BTF info,
// ignore them here.
func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
variableOffsets := make(map[variable]uint32)
for _, symbol := range symbols {
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
offsets := make(map[symbol]uint32)
for _, sym := range symbols {
if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
if symbol.Value > math.MaxUint32 {
if sym.Value > math.MaxUint32 {
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
continue
}
if int(symbol.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
if int(sym.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
}
secName := file.Sections[symbol.Section].Name
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
secName := file.Sections[sym.Section].Name
offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
}
return variableOffsets, nil
return offsets, nil
}
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
@@ -190,7 +206,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
vars, err := variableOffsets(file)
offsets, err := symbolOffsets(file)
if err != nil {
return nil, err
}
@@ -199,51 +215,66 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("compressed BTF is not supported")
}
rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
if err != nil {
return nil, err
}
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
err = fixupDatasec(spec.types, sectionSizes, offsets)
if err != nil {
return nil, err
}
return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
return spec, nil
}
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
baseTypes types, baseStrings *stringTable) (*Spec, error) {
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
var (
baseStrings *stringTable
firstTypeID TypeID
err error
)
if base != nil {
if base.firstTypeID != 0 {
return nil, fmt.Errorf("can't use split BTF as base")
}
if base.strings == nil {
return nil, fmt.Errorf("parse split BTF: base must be loaded from an ELF")
}
baseStrings = base.strings
firstTypeID, err = base.nextTypeID()
if err != nil {
return nil, err
}
}
rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
if err != nil {
return nil, err
}
return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
}
func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
baseTypes types) (*Spec, error) {
types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
types, err := inflateRawTypes(rawTypes, rawStrings, base)
if err != nil {
return nil, err
}
typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
typeIDs, typesByName := indexTypes(types, firstTypeID)
return &Spec{
rawTypes: rawTypes,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
strings: rawStrings,
byteOrder: bo,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
firstTypeID: firstTypeID,
strings: rawStrings,
byteOrder: bo,
}, nil
}
func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
namedTypes := 0
for _, typ := range types {
if typ.TypeName() != "" {
@@ -261,7 +292,7 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
if name := newEssentialName(typ.TypeName()); name != "" {
typesByName[name] = append(typesByName[name], typ)
}
typeIDs[typ] = TypeID(i) + typeIDOffset
typeIDs[typ] = firstTypeID + TypeID(i)
}
return typeIDs, typesByName
@@ -272,20 +303,70 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
spec, _, err := kernelSpec()
if err != nil {
return nil, err
}
return spec.Copy(), nil
}
var kernelBTF struct {
sync.RWMutex
spec *Spec
// True if the spec was read from an ELF instead of raw BTF in /sys.
fallback bool
}
// FlushKernelSpec removes any cached kernel type information.
func FlushKernelSpec() {
kernelBTF.Lock()
defer kernelBTF.Unlock()
kernelBTF.spec, kernelBTF.fallback = nil, false
}
func kernelSpec() (*Spec, bool, error) {
kernelBTF.RLock()
spec, fallback := kernelBTF.spec, kernelBTF.fallback
kernelBTF.RUnlock()
if spec == nil {
kernelBTF.Lock()
defer kernelBTF.Unlock()
spec, fallback = kernelBTF.spec, kernelBTF.fallback
}
if spec != nil {
return spec, fallback, nil
}
spec, fallback, err := loadKernelSpec()
if err != nil {
return nil, false, err
}
kernelBTF.spec, kernelBTF.fallback = spec, fallback
return spec, fallback, nil
}
func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
return loadRawSpec(fh, internal.NativeEndian, nil, nil)
spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
return spec, false, err
}
file, err := findVMLinux()
if err != nil {
return nil, err
return nil, false, err
}
defer file.Close()
return loadSpecFromELF(file)
spec, err := loadSpecFromELF(file)
return spec, true, err
}
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
@@ -388,140 +469,122 @@ func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([
return rawTypes, rawStrings, nil
}
type variable struct {
type symbol struct {
section string
name string
}
func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
for i, rawType := range rawTypes {
if rawType.Kind() != kindDatasec {
// fixupDatasec attempts to patch up missing info in Datasecs and its members by
// supplementing them with information from the ELF headers and symbol table.
func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
for _, typ := range types {
ds, ok := typ.(*Datasec)
if !ok {
continue
}
name, err := rawStrings.Lookup(rawType.NameOff)
if err != nil {
return err
}
name := ds.Name
if name == ".kconfig" || name == ".ksyms" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
// Some Datasecs are virtual and don't have corresponding ELF sections.
switch name {
case ".ksyms":
// .ksyms describes forward declarations of kfunc signatures.
// Nothing to fix up, all sizes and offsets are 0.
for _, vsi := range ds.Vars {
_, ok := vsi.Type.(*Func)
if !ok {
// Only Funcs are supported in the .ksyms Datasec.
return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
}
}
continue
case ".kconfig":
// .kconfig has a size of 0 and has all members' offsets set to 0.
// Fix up all offsets and set the Datasec's size.
if err := fixupDatasecLayout(ds); err != nil {
return err
}
// Fix up extern to global linkage to avoid a BTF verifier error.
for _, vsi := range ds.Vars {
vsi.Type.(*Var).Linkage = GlobalVar
}
if rawTypes[i].SizeType != 0 {
continue
}
size, ok := sectionSizes[name]
if ds.Size != 0 {
continue
}
ds.Size, ok = sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
rawTypes[i].SizeType = size
secinfos := rawType.data.([]btfVarSecinfo)
for j, secInfo := range secinfos {
id := int(secInfo.Type - 1)
if id >= len(rawTypes) {
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
}
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
if err != nil {
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
}
offset, ok := variableOffsets[variable{name, varName}]
for i := range ds.Vars {
symName := ds.Vars[i].Type.TypeName()
ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
if !ok {
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
}
secinfos[j].Offset = offset
}
}
return nil
}
// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
// alignment. Calculate and set ds.Size.
func fixupDatasecLayout(ds *Datasec) error {
var off uint32
for i, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
}
size, err := Sizeof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
}
align, err := alignof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
}
// Align the current member based on the offset of the end of the previous
// member and the alignment of the current member.
off = internal.Align(off, uint32(align))
ds.Vars[i].Offset = off
off += uint32(size)
}
ds.Size = off
return nil
}
// Copy creates a copy of Spec.
func (s *Spec) Copy() *Spec {
types := copyTypes(s.types, nil)
typeIDOffset := TypeID(0)
if len(s.types) != 0 {
typeIDOffset = s.typeIDs[s.types[0]]
}
typeIDs, typesByName := indexTypes(types, typeIDOffset)
typeIDs, typesByName := indexTypes(types, s.firstTypeID)
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
s.rawTypes,
s.strings,
types,
typeIDs,
s.firstTypeID,
typesByName,
s.strings,
s.byteOrder,
}
}
type marshalOpts struct {
ByteOrder binary.ByteOrder
StripFuncLinkage bool
}
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
headerLen = binary.Size(header)
)
// Reserve space for the header. We have to write it last since
// we don't know the size of the type section yet.
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(StaticFunc)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
}
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
stringsLen := s.strings.Length()
buf.Grow(stringsLen)
if err := s.strings.Marshal(&buf); err != nil {
return nil, err
}
// Fill out the header, and write it out.
header = &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(headerLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringsLen),
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
if err != nil {
return nil, fmt.Errorf("can't write header: %v", err)
}
return raw, nil
}
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
@@ -532,12 +595,31 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
return copy(sw, p), nil
}
// nextTypeID returns the next unallocated type ID or an error if there are no
// more type IDs.
func (s *Spec) nextTypeID() (TypeID, error) {
id := s.firstTypeID + TypeID(len(s.types))
if id < s.firstTypeID {
return 0, fmt.Errorf("no more type IDs")
}
return id, nil
}
// TypeByID returns the BTF Type with the given type ID.
//
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
return s.types.ByID(id)
if id < s.firstTypeID {
return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
}
index := int(id - s.firstTypeID)
if index >= len(s.types) {
return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
}
return s.types[index], nil
}
// TypeID returns the ID for a given Type.
@@ -598,17 +680,19 @@ func (s *Spec) AnyTypeByName(name string) (Type, error) {
return types[0], nil
}
// TypeByName searches for a Type with a specific name. Since multiple
// Types with the same name can exist, the parameter typ is taken to
// narrow down the search in case of a clash.
// TypeByName searches for a Type with a specific name. Since multiple Types
// with the same name can exist, the parameter typ is taken to narrow down the
// search in case of a clash.
//
// typ must be a non-nil pointer to an implementation of a Type.
// On success, the address of the found Type will be copied to typ.
// typ must be a non-nil pointer to an implementation of a Type. On success, the
// address of the found Type will be copied to typ.
//
// Returns an error wrapping ErrNotFound if no matching
// Type exists in the Spec. If multiple candidates are found,
// an error is returned.
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
func (s *Spec) TypeByName(name string, typ interface{}) error {
typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
// typ may be **T or *Type
typValue := reflect.ValueOf(typ)
if typValue.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", typ)
@@ -620,7 +704,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
wanted := typPtr.Type()
if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
if wanted == typeInterface {
// This is *Type. Unwrap the value's type.
wanted = typPtr.Elem().Type()
}
if !wanted.AssignableTo(typeInterface) {
return fmt.Errorf("%T does not satisfy Type interface", typ)
}
@@ -636,14 +725,14 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
if candidate != nil {
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
}
candidate = typ
}
if candidate == nil {
return fmt.Errorf("type %s: %w", name, ErrNotFound)
return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
}
typPtr.Set(reflect.ValueOf(candidate))
@@ -656,12 +745,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
// Types from base are used to resolve references in the split BTF.
// The returned Spec only contains types from the split BTF, not from the base.
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
return loadRawSpec(r, internal.NativeEndian, base)
}
// TypesIterator iterates over types of a given spec.
type TypesIterator struct {
spec *Spec
types []Type
index int
// The last visited type in the spec.
Type Type
@@ -669,229 +758,112 @@ type TypesIterator struct {
// Iterate returns the types iterator.
func (s *Spec) Iterate() *TypesIterator {
return &TypesIterator{spec: s, index: 0}
// We share the backing array of types with the Spec. This is safe since
// we don't allow deletion or shuffling of types.
return &TypesIterator{types: s.types, index: 0}
}
// Next returns true as long as there are any remaining types.
func (iter *TypesIterator) Next() bool {
if len(iter.spec.types) <= iter.index {
if len(iter.types) <= iter.index {
return false
}
iter.Type = iter.spec.types[iter.index]
iter.Type = iter.types[iter.index]
iter.index++
return true
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
}
// NewHandle loads BTF into the kernel.
//
// Returns ErrNotSupported if BTF is not supported.
func NewHandle(spec *Spec) (*Handle, error) {
if err := haveBTF(); err != nil {
return nil, err
}
if spec.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
btf, err := spec.marshal(marshalOpts{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
})
if err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
}
fd, err := sys.BtfLoad(attr)
if err != nil {
logBuf := make([]byte, 64*1024)
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
// NB: The syscall will never return ENOSPC as of 5.18-rc4.
_, _ = sys.BtfLoad(attr)
return nil, internal.ErrorWithLog(err, logBuf)
}
return &Handle{fd, attr.BtfSize}, nil
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size}, nil
}
// Spec parses the kernel BTF into Go types.
//
// base is used to decode split BTF and may be nil.
func (h *Handle) Spec(base *Spec) (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
var baseTypes types
var baseStrings *stringTable
if base != nil {
baseTypes = base.types
baseStrings = base.strings
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24
typesLen := uint32(binary.Size(types))
header := btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typesLen,
StringOff: typesLen,
StringLen: uint32(len(strings)),
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, types)
buf.Write(strings)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
}
strings = []byte{0, 'a', 0}
)
// We use a BTF_KIND_VAR here, to make sure that
// the kernel understands BTF at least as well as we
// do. BTF_KIND_VAR was introduced ~5.1.
types.Integer.SetKind(kindPointer)
types.Var.NameOff = 1
types.Var.SetKind(kindVar)
types.Var.SizeType = 1
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
// kernel that supports BPF_BTF_LOAD.
var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
// 0-length anonymous integer
err := probeBTF(&Int{})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: loading the program
// might still succeed without BTF.
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
return err
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
// used as a proxy for .bss, .data and .rodata map support, which generally
// come with a Var and Datasec. These were introduced in Linux 5.2.
var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
if err := haveBTF(); err != nil {
return err
}
var (
types struct {
FuncProto btfType
Func btfType
}
strings = []byte{0, 'a', 0}
)
v := &Var{
Name: "a",
Type: &Pointer{(*Void)(nil)},
}
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
types.Func.SetLinkage(GlobalFunc)
err := probeBTF(v)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: creating the map may still
// succeed without Btf* attrs.
return internal.ErrNotSupported
}
return err
})
btf := marshalBTF(&types, strings, internal.NativeEndian)
// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
// is used as a proxy for ext_info (func_info) support, which depends on
// Func(Proto) by definition.
var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
if err := haveBTF(); err != nil {
return err
}
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
return err
})
var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
if err := haveProgBTF(); err != nil {
return err
}
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
Linkage: GlobalFunc,
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
})
func probeBTF(typ Type) error {
b, err := NewBuilder([]Type{typ})
if err != nil {
return err
}
fd.Close()
return nil
})
buf, err := b.Marshal(nil, nil)
if err != nil {
return err
}
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(buf),
BtfSize: uint32(len(buf)),
})
if err == nil {
fd.Close()
}
return err
}