build(deps): bump github.com/containerd/cgroups/v3 from 3.0.2 to 3.0.3

Bumps [github.com/containerd/cgroups/v3](https://github.com/containerd/cgroups) from 3.0.2 to 3.0.3.
- [Release notes](https://github.com/containerd/cgroups/releases)
- [Commits](https://github.com/containerd/cgroups/compare/v3.0.2...v3.0.3)

---
updated-dependencies:
- dependency-name: github.com/containerd/cgroups/v3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-29 11:45:53 +00:00
committed by GitHub
parent 1f76ca4081
commit 5387747e92
119 changed files with 8743 additions and 3476 deletions

View File

@@ -2,7 +2,6 @@ package btf
import (
"bufio"
"bytes"
"debug/elf"
"encoding/binary"
"errors"
@@ -11,6 +10,7 @@ import (
"math"
"os"
"reflect"
"sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
@@ -21,34 +21,41 @@ const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrMultipleMatches = errors.New("multiple matching types")
)
// ID represents the unique ID of a BTF object.
type ID = sys.BTFID
// Spec represents decoded BTF.
// Spec allows querying a set of Types and loading the set into the
// kernel.
type Spec struct {
// Data from .BTF.
rawTypes []rawType
strings *stringTable
// All types contained by the spec. For the base type, the position of
// a type in the slice is its ID.
types types
// All types contained by the spec, not including types from the base in
// case the spec was parsed from split BTF.
types []Type
// Type IDs indexed by type.
typeIDs map[Type]TypeID
// The ID of the first type in types.
firstTypeID TypeID
// Types indexed by essential name.
// Includes all struct flavors and types with the same name.
namedTypes map[essentialName][]Type
// String table from ELF, may be nil.
strings *stringTable
// Byte order of the ELF we decoded the spec from, may be nil.
byteOrder binary.ByteOrder
}
var btfHeaderLen = binary.Size(&btfHeader{})
type btfHeader struct {
Magic uint16
Version uint8
@@ -73,6 +80,18 @@ func (h *btfHeader) stringStart() int64 {
return int64(h.HdrLen + h.StringOff)
}
// newSpec creates a Spec containing only Void.
func newSpec() *Spec {
return &Spec{
[]Type{(*Void)(nil)},
map[Type]TypeID{(*Void)(nil): 0},
0,
make(map[essentialName][]Type),
nil,
nil,
}
}
// LoadSpec opens file and calls LoadSpecFromReader on it.
func LoadSpec(file string) (*Spec, error) {
fh, err := os.Open(file)
@@ -92,10 +111,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
// Try to parse a naked BTF blob. This will return an error if
// we encounter a Datasec, since we can't fix it up.
spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
return spec, err
return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
}
return nil, err
@@ -106,7 +122,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
// ExtInfos may be nil if the ELF doesn't contain section metadta.
// ExtInfos may be nil if the ELF doesn't contain section metadata.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
@@ -119,7 +135,7 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return nil, nil, err
}
extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
extInfos, err := loadExtInfosFromELF(file, spec)
if err != nil && !errors.Is(err, ErrNotFound) {
return nil, nil, err
}
@@ -127,40 +143,40 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return spec, extInfos, nil
}
// variableOffsets extracts all symbols offsets from an ELF and indexes them by
// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
// References to variables in BTF data sections carry unsigned 32-bit offsets.
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
// beyond this range. Since these symbols cannot be described by BTF info,
// ignore them here.
func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
variableOffsets := make(map[variable]uint32)
for _, symbol := range symbols {
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
offsets := make(map[symbol]uint32)
for _, sym := range symbols {
if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
if symbol.Value > math.MaxUint32 {
if sym.Value > math.MaxUint32 {
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
continue
}
if int(symbol.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
if int(sym.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
}
secName := file.Sections[symbol.Section].Name
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
secName := file.Sections[sym.Section].Name
offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
}
return variableOffsets, nil
return offsets, nil
}
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
@@ -190,7 +206,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
vars, err := variableOffsets(file)
offsets, err := symbolOffsets(file)
if err != nil {
return nil, err
}
@@ -199,51 +215,66 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("compressed BTF is not supported")
}
rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
if err != nil {
return nil, err
}
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
err = fixupDatasec(spec.types, sectionSizes, offsets)
if err != nil {
return nil, err
}
return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
return spec, nil
}
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
baseTypes types, baseStrings *stringTable) (*Spec, error) {
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
var (
baseStrings *stringTable
firstTypeID TypeID
err error
)
if base != nil {
if base.firstTypeID != 0 {
return nil, fmt.Errorf("can't use split BTF as base")
}
if base.strings == nil {
return nil, fmt.Errorf("parse split BTF: base must be loaded from an ELF")
}
baseStrings = base.strings
firstTypeID, err = base.nextTypeID()
if err != nil {
return nil, err
}
}
rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
if err != nil {
return nil, err
}
return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
}
func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
baseTypes types) (*Spec, error) {
types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
types, err := inflateRawTypes(rawTypes, rawStrings, base)
if err != nil {
return nil, err
}
typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
typeIDs, typesByName := indexTypes(types, firstTypeID)
return &Spec{
rawTypes: rawTypes,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
strings: rawStrings,
byteOrder: bo,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
firstTypeID: firstTypeID,
strings: rawStrings,
byteOrder: bo,
}, nil
}
func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
namedTypes := 0
for _, typ := range types {
if typ.TypeName() != "" {
@@ -261,7 +292,7 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
if name := newEssentialName(typ.TypeName()); name != "" {
typesByName[name] = append(typesByName[name], typ)
}
typeIDs[typ] = TypeID(i) + typeIDOffset
typeIDs[typ] = firstTypeID + TypeID(i)
}
return typeIDs, typesByName
@@ -272,20 +303,70 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
spec, _, err := kernelSpec()
if err != nil {
return nil, err
}
return spec.Copy(), nil
}
var kernelBTF struct {
sync.RWMutex
spec *Spec
// True if the spec was read from an ELF instead of raw BTF in /sys.
fallback bool
}
// FlushKernelSpec removes any cached kernel type information.
func FlushKernelSpec() {
kernelBTF.Lock()
defer kernelBTF.Unlock()
kernelBTF.spec, kernelBTF.fallback = nil, false
}
func kernelSpec() (*Spec, bool, error) {
kernelBTF.RLock()
spec, fallback := kernelBTF.spec, kernelBTF.fallback
kernelBTF.RUnlock()
if spec == nil {
kernelBTF.Lock()
defer kernelBTF.Unlock()
spec, fallback = kernelBTF.spec, kernelBTF.fallback
}
if spec != nil {
return spec, fallback, nil
}
spec, fallback, err := loadKernelSpec()
if err != nil {
return nil, false, err
}
kernelBTF.spec, kernelBTF.fallback = spec, fallback
return spec, fallback, nil
}
func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
return loadRawSpec(fh, internal.NativeEndian, nil, nil)
spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
return spec, false, err
}
file, err := findVMLinux()
if err != nil {
return nil, err
return nil, false, err
}
defer file.Close()
return loadSpecFromELF(file)
spec, err := loadSpecFromELF(file)
return spec, true, err
}
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
@@ -388,140 +469,122 @@ func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([
return rawTypes, rawStrings, nil
}
type variable struct {
type symbol struct {
section string
name string
}
func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
for i, rawType := range rawTypes {
if rawType.Kind() != kindDatasec {
// fixupDatasec attempts to patch up missing info in Datasecs and its members by
// supplementing them with information from the ELF headers and symbol table.
func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
for _, typ := range types {
ds, ok := typ.(*Datasec)
if !ok {
continue
}
name, err := rawStrings.Lookup(rawType.NameOff)
if err != nil {
return err
}
name := ds.Name
if name == ".kconfig" || name == ".ksyms" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
// Some Datasecs are virtual and don't have corresponding ELF sections.
switch name {
case ".ksyms":
// .ksyms describes forward declarations of kfunc signatures.
// Nothing to fix up, all sizes and offsets are 0.
for _, vsi := range ds.Vars {
_, ok := vsi.Type.(*Func)
if !ok {
// Only Funcs are supported in the .ksyms Datasec.
return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
}
}
continue
case ".kconfig":
// .kconfig has a size of 0 and has all members' offsets set to 0.
// Fix up all offsets and set the Datasec's size.
if err := fixupDatasecLayout(ds); err != nil {
return err
}
// Fix up extern to global linkage to avoid a BTF verifier error.
for _, vsi := range ds.Vars {
vsi.Type.(*Var).Linkage = GlobalVar
}
if rawTypes[i].SizeType != 0 {
continue
}
size, ok := sectionSizes[name]
if ds.Size != 0 {
continue
}
ds.Size, ok = sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
rawTypes[i].SizeType = size
secinfos := rawType.data.([]btfVarSecinfo)
for j, secInfo := range secinfos {
id := int(secInfo.Type - 1)
if id >= len(rawTypes) {
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
}
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
if err != nil {
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
}
offset, ok := variableOffsets[variable{name, varName}]
for i := range ds.Vars {
symName := ds.Vars[i].Type.TypeName()
ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
if !ok {
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
}
secinfos[j].Offset = offset
}
}
return nil
}
// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
// alignment. Calculate and set ds.Size.
func fixupDatasecLayout(ds *Datasec) error {
var off uint32
for i, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
}
size, err := Sizeof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
}
align, err := alignof(v.Type)
if err != nil {
return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
}
// Align the current member based on the offset of the end of the previous
// member and the alignment of the current member.
off = internal.Align(off, uint32(align))
ds.Vars[i].Offset = off
off += uint32(size)
}
ds.Size = off
return nil
}
// Copy creates a copy of Spec.
func (s *Spec) Copy() *Spec {
types := copyTypes(s.types, nil)
typeIDOffset := TypeID(0)
if len(s.types) != 0 {
typeIDOffset = s.typeIDs[s.types[0]]
}
typeIDs, typesByName := indexTypes(types, typeIDOffset)
typeIDs, typesByName := indexTypes(types, s.firstTypeID)
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
s.rawTypes,
s.strings,
types,
typeIDs,
s.firstTypeID,
typesByName,
s.strings,
s.byteOrder,
}
}
type marshalOpts struct {
ByteOrder binary.ByteOrder
StripFuncLinkage bool
}
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
headerLen = binary.Size(header)
)
// Reserve space for the header. We have to write it last since
// we don't know the size of the type section yet.
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(StaticFunc)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
}
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
stringsLen := s.strings.Length()
buf.Grow(stringsLen)
if err := s.strings.Marshal(&buf); err != nil {
return nil, err
}
// Fill out the header, and write it out.
header = &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(headerLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringsLen),
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
if err != nil {
return nil, fmt.Errorf("can't write header: %v", err)
}
return raw, nil
}
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
@@ -532,12 +595,31 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
return copy(sw, p), nil
}
// nextTypeID returns the next unallocated type ID or an error if there are no
// more type IDs.
func (s *Spec) nextTypeID() (TypeID, error) {
id := s.firstTypeID + TypeID(len(s.types))
if id < s.firstTypeID {
return 0, fmt.Errorf("no more type IDs")
}
return id, nil
}
// TypeByID returns the BTF Type with the given type ID.
//
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
return s.types.ByID(id)
if id < s.firstTypeID {
return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
}
index := int(id - s.firstTypeID)
if index >= len(s.types) {
return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
}
return s.types[index], nil
}
// TypeID returns the ID for a given Type.
@@ -598,17 +680,19 @@ func (s *Spec) AnyTypeByName(name string) (Type, error) {
return types[0], nil
}
// TypeByName searches for a Type with a specific name. Since multiple
// Types with the same name can exist, the parameter typ is taken to
// narrow down the search in case of a clash.
// TypeByName searches for a Type with a specific name. Since multiple Types
// with the same name can exist, the parameter typ is taken to narrow down the
// search in case of a clash.
//
// typ must be a non-nil pointer to an implementation of a Type.
// On success, the address of the found Type will be copied to typ.
// typ must be a non-nil pointer to an implementation of a Type. On success, the
// address of the found Type will be copied to typ.
//
// Returns an error wrapping ErrNotFound if no matching
// Type exists in the Spec. If multiple candidates are found,
// an error is returned.
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
func (s *Spec) TypeByName(name string, typ interface{}) error {
typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
// typ may be **T or *Type
typValue := reflect.ValueOf(typ)
if typValue.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", typ)
@@ -620,7 +704,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
wanted := typPtr.Type()
if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
if wanted == typeInterface {
// This is *Type. Unwrap the value's type.
wanted = typPtr.Elem().Type()
}
if !wanted.AssignableTo(typeInterface) {
return fmt.Errorf("%T does not satisfy Type interface", typ)
}
@@ -636,14 +725,14 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
if candidate != nil {
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
}
candidate = typ
}
if candidate == nil {
return fmt.Errorf("type %s: %w", name, ErrNotFound)
return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
}
typPtr.Set(reflect.ValueOf(candidate))
@@ -656,12 +745,12 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
// Types from base are used to resolve references in the split BTF.
// The returned Spec only contains types from the split BTF, not from the base.
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
return loadRawSpec(r, internal.NativeEndian, base)
}
// TypesIterator iterates over types of a given spec.
type TypesIterator struct {
spec *Spec
types []Type
index int
// The last visited type in the spec.
Type Type
@@ -669,229 +758,112 @@ type TypesIterator struct {
// Iterate returns the types iterator.
func (s *Spec) Iterate() *TypesIterator {
return &TypesIterator{spec: s, index: 0}
// We share the backing array of types with the Spec. This is safe since
// we don't allow deletion or shuffling of types.
return &TypesIterator{types: s.types, index: 0}
}
// Next returns true as long as there are any remaining types.
func (iter *TypesIterator) Next() bool {
if len(iter.spec.types) <= iter.index {
if len(iter.types) <= iter.index {
return false
}
iter.Type = iter.spec.types[iter.index]
iter.Type = iter.types[iter.index]
iter.index++
return true
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
}
// NewHandle loads BTF into the kernel.
//
// Returns ErrNotSupported if BTF is not supported.
func NewHandle(spec *Spec) (*Handle, error) {
if err := haveBTF(); err != nil {
return nil, err
}
if spec.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
btf, err := spec.marshal(marshalOpts{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
})
if err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
}
fd, err := sys.BtfLoad(attr)
if err != nil {
logBuf := make([]byte, 64*1024)
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
// NB: The syscall will never return ENOSPC as of 5.18-rc4.
_, _ = sys.BtfLoad(attr)
return nil, internal.ErrorWithLog(err, logBuf)
}
return &Handle{fd, attr.BtfSize}, nil
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size}, nil
}
// Spec parses the kernel BTF into Go types.
//
// base is used to decode split BTF and may be nil.
func (h *Handle) Spec(base *Spec) (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
var baseTypes types
var baseStrings *stringTable
if base != nil {
baseTypes = base.types
baseStrings = base.strings
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24
typesLen := uint32(binary.Size(types))
header := btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: minHeaderLength,
TypeOff: 0,
TypeLen: typesLen,
StringOff: typesLen,
StringLen: uint32(len(strings)),
}
buf := new(bytes.Buffer)
_ = binary.Write(buf, bo, &header)
_ = binary.Write(buf, bo, types)
buf.Write(strings)
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
}
strings = []byte{0, 'a', 0}
)
// We use a BTF_KIND_VAR here, to make sure that
// the kernel understands BTF at least as well as we
// do. BTF_KIND_VAR was introduced ~5.1.
types.Integer.SetKind(kindPointer)
types.Var.NameOff = 1
types.Var.SetKind(kindVar)
types.Var.SizeType = 1
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
// kernel that supports BPF_BTF_LOAD.
var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
// 0-length anonymous integer
err := probeBTF(&Int{})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: loading the program
// might still succeed without BTF.
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
return err
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
// used as a proxy for .bss, .data and .rodata map support, which generally
// come with a Var and Datasec. These were introduced in Linux 5.2.
var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
if err := haveBTF(); err != nil {
return err
}
var (
types struct {
FuncProto btfType
Func btfType
}
strings = []byte{0, 'a', 0}
)
v := &Var{
Name: "a",
Type: &Pointer{(*Void)(nil)},
}
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
types.Func.SetLinkage(GlobalFunc)
err := probeBTF(v)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: creating the map may still
// succeed without Btf* attrs.
return internal.ErrNotSupported
}
return err
})
btf := marshalBTF(&types, strings, internal.NativeEndian)
// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
// is used as a proxy for ext_info (func_info) support, which depends on
// Func(Proto) by definition.
var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
if err := haveBTF(); err != nil {
return err
}
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
return err
})
var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
if err := haveProgBTF(); err != nil {
return err
}
fn := &Func{
Name: "a",
Type: &FuncProto{Return: (*Void)(nil)},
Linkage: GlobalFunc,
}
err := probeBTF(fn)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
return err
})
func probeBTF(typ Type) error {
b, err := NewBuilder([]Type{typ})
if err != nil {
return err
}
fd.Close()
return nil
})
buf, err := b.Marshal(nil, nil)
if err != nil {
return err
}
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(buf),
BtfSize: uint32(len(buf)),
})
if err == nil {
fd.Close()
}
return err
}

View File

@@ -4,35 +4,41 @@ import (
"encoding/binary"
"fmt"
"io"
"unsafe"
)
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
// btfKind describes a Type.
type btfKind uint8
// Equivalents of the BTF_KIND_* constants.
const (
kindUnknown btfKind = iota
kindInt
kindPointer
kindArray
kindStruct
kindUnion
kindEnum
kindForward
kindTypedef
kindVolatile
kindConst
kindRestrict
kindUnknown btfKind = iota // Unknown
kindInt // Int
kindPointer // Pointer
kindArray // Array
kindStruct // Struct
kindUnion // Union
kindEnum // Enum
kindForward // Forward
kindTypedef // Typedef
kindVolatile // Volatile
kindConst // Const
kindRestrict // Restrict
// Added ~4.20
kindFunc
kindFuncProto
kindFunc // Func
kindFuncProto // FuncProto
// Added ~5.1
kindVar
kindDatasec
kindVar // Var
kindDatasec // Datasec
// Added ~5.13
kindFloat
kindFloat // Float
// Added 5.16
kindDeclTag // DeclTag
kindTypeTag // TypeTag
// Added 6.0
kindEnum64 // Enum64
)
// FuncLinkage describes BTF function linkage metadata.
@@ -63,6 +69,8 @@ const (
btfTypeKindFlagMask = 1
)
var btfTypeLen = binary.Size(btfType{})
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
@@ -85,47 +93,6 @@ type btfType struct {
SizeType uint32
}
func (k btfKind) String() string {
switch k {
case kindUnknown:
return "Unknown"
case kindInt:
return "Integer"
case kindPointer:
return "Pointer"
case kindArray:
return "Array"
case kindStruct:
return "Struct"
case kindUnion:
return "Union"
case kindEnum:
return "Enumeration"
case kindForward:
return "Forward"
case kindTypedef:
return "Typedef"
case kindVolatile:
return "Volatile"
case kindConst:
return "Const"
case kindRestrict:
return "Restrict"
case kindFunc:
return "Function"
case kindFuncProto:
return "Function Proto"
case kindVar:
return "Variable"
case kindDatasec:
return "Section"
case kindFloat:
return "Float"
default:
return fmt.Sprintf("Unknown (%d)", k)
}
}
func mask(len uint32) uint32 {
return (1 << len) - 1
}
@@ -164,10 +131,43 @@ func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) KindFlag() bool {
func (bt *btfType) kindFlagBool() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) setKindFlagBool(set bool) {
var value uint32
if set {
value = 1
}
bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
}
// Bitfield returns true if the struct or union contain a bitfield.
func (bt *btfType) Bitfield() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetBitfield(isBitfield bool) {
bt.setKindFlagBool(isBitfield)
}
func (bt *btfType) FwdKind() FwdKind {
return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
}
func (bt *btfType) SetFwdKind(kind FwdKind) {
bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
}
func (bt *btfType) Signed() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetSigned(signed bool) {
bt.setKindFlagBool(signed)
}
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
@@ -181,6 +181,10 @@ func (bt *btfType) Type() TypeID {
return TypeID(bt.SizeType)
}
func (bt *btfType) SetType(id TypeID) {
bt.SizeType = uint32(id)
}
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
@@ -190,13 +194,22 @@ func (bt *btfType) SetSize(size uint32) {
bt.SizeType = size
}
func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error {
buf := make([]byte, unsafe.Sizeof(*bt))
bo.PutUint32(buf[0:], bt.NameOff)
bo.PutUint32(buf[4:], bt.Info)
bo.PutUint32(buf[8:], bt.SizeType)
_, err := w.Write(buf)
return err
}
type rawType struct {
btfType
data interface{}
}
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
if err := binary.Write(w, bo, &rt.btfType); err != nil {
if err := rt.btfType.Marshal(w, bo); err != nil {
return err
}
@@ -209,11 +222,11 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
// btfInt encodes additional data for integers.
//
// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
// ? = undefined
// e = encoding
// o = offset (bitfields?)
// b = bits (bitfields)
// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
// ? = undefined
// e = encoding
// o = offset (bitfields?)
// b = bits (bitfields)
type btfInt struct {
Raw uint32
}
@@ -275,7 +288,13 @@ type btfVariable struct {
type btfEnum struct {
NameOff uint32
Val int32
Val uint32
}
type btfEnum64 struct {
NameOff uint32
ValLo32 uint32
ValHi32 uint32
}
type btfParam struct {
@@ -283,12 +302,16 @@ type btfParam struct {
Type TypeID
}
type btfDeclTag struct {
ComponentIdx uint32
}
func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
var header btfType
// because of the interleaving between types and struct members it is difficult to
// precompute the numbers of raw types this will parse
// this "guess" is a good first estimation
sizeOfbtfType := uintptr(binary.Size(btfType{}))
sizeOfbtfType := uintptr(btfTypeLen)
tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
types := make([]rawType, 0, tyMaxCount)
@@ -325,6 +348,11 @@ func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, err
case kindDatasec:
data = make([]btfVarSecinfo, header.Vlen())
case kindFloat:
case kindDeclTag:
data = new(btfDeclTag)
case kindTypeTag:
case kindEnum64:
data = make([]btfEnum64, header.Vlen())
default:
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}

View File

@@ -1,4 +1,4 @@
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT.
package btf
@@ -42,3 +42,39 @@ func (i VarLinkage) String() string {
}
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[kindUnknown-0]
_ = x[kindInt-1]
_ = x[kindPointer-2]
_ = x[kindArray-3]
_ = x[kindStruct-4]
_ = x[kindUnion-5]
_ = x[kindEnum-6]
_ = x[kindForward-7]
_ = x[kindTypedef-8]
_ = x[kindVolatile-9]
_ = x[kindConst-10]
_ = x[kindRestrict-11]
_ = x[kindFunc-12]
_ = x[kindFuncProto-13]
_ = x[kindVar-14]
_ = x[kindDatasec-15]
_ = x[kindFloat-16]
_ = x[kindDeclTag-17]
_ = x[kindTypeTag-18]
_ = x[kindEnum64-19]
}
const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
func (i btfKind) String() string {
if i >= btfKind(len(_btfKind_index)-1) {
return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]]
}

View File

@@ -156,16 +156,25 @@ func (k coreKind) String() string {
}
}
// CORERelocate calculates the difference in types between local and target.
// CORERelocate calculates changes needed to adjust eBPF instructions for differences
// in types.
//
// Returns a list of fixups which can be applied to instructions to make them
// match the target type(s).
//
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
// for relos[i].
func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
if local.byteOrder != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
if target == nil {
var err error
target, _, err = kernelSpec()
if err != nil {
return nil, fmt.Errorf("load kernel spec: %w", err)
}
}
if bo != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
}
type reloGroup struct {
@@ -185,15 +194,14 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
id, err := local.TypeID(relo.typ)
if err != nil {
return nil, fmt.Errorf("%s: %w", relo.kind, err)
}
result[i] = COREFixup{
kind: relo.kind,
local: uint32(id),
target: uint32(id),
kind: relo.kind,
local: uint32(relo.id),
// NB: Using relo.id as the target here is incorrect, since
// it doesn't match the BTF we generate on the fly. This isn't
// too bad for now since there are no uses of the local type ID
// in the kernel, yet.
target: uint32(relo.id),
}
continue
}
@@ -214,7 +222,7 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
}
targets := target.namedTypes[newEssentialName(localTypeName)]
fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
@@ -229,34 +237,29 @@ func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, er
var errAmbiguousRelocation = errors.New("ambiguous relocation")
var errImpossibleRelocation = errors.New("impossible relocation")
var errIncompatibleTypes = errors.New("incompatible types")
// coreCalculateFixups calculates the fixups for the given relocations using
// the "best" target.
// coreCalculateFixups finds the target type that best matches all relocations.
//
// All relos must target the same type.
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
localID, err := localSpec.TypeID(local)
if err != nil {
return nil, fmt.Errorf("local type ID: %w", err)
}
local = Copy(local, UnderlyingType)
func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
bestScore := len(relos)
var bestFixups []COREFixup
for i := range targets {
targetID, err := targetSpec.TypeID(targets[i])
for _, target := range targets {
targetID, err := targetSpec.TypeID(target)
if err != nil {
return nil, fmt.Errorf("target type ID: %w", err)
}
target := Copy(targets[i], UnderlyingType)
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
fixup, err := coreCalculateFixup(relo, target, targetID, bo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
}
if fixup.poison || fixup.isNonExistant() {
score++
@@ -303,9 +306,11 @@ func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type
return bestFixups, nil
}
var errNoSignedness = errors.New("no signedness")
// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{kind: relo.kind, local: local, target: target}, nil
}
@@ -320,18 +325,20 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
}
zero := COREFixup{}
local := relo.typ
switch relo.kind {
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
}
err := coreAreTypesCompatible(local, target)
if errors.Is(err, errImpossibleRelocation) {
if errors.Is(err, errIncompatibleTypes) {
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
return zero, err
}
switch relo.kind {
@@ -339,7 +346,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return fixup(1, 1)
case reloTypeIDTarget:
return fixup(uint32(localID), uint32(targetID))
return fixup(uint32(relo.id), uint32(targetID))
case reloTypeSize:
localSize, err := Sizeof(local)
@@ -361,7 +368,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
return zero, err
}
switch relo.kind {
@@ -372,21 +379,8 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}
case reloFieldSigned:
switch local.(type) {
case *Enum:
return fixup(1, 1)
case *Int:
return fixup(
uint32(local.(*Int).Encoding&Signed),
uint32(target.(*Int).Encoding&Signed),
)
default:
return fixupWithoutValidation(0, 0)
}
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
if _, ok := target.(*Fwd); ok {
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
if _, ok := as[*Fwd](target); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
// we'll find it in one of the other iterations.
@@ -398,7 +392,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
return poison()
}
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
return zero, err
}
maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
@@ -427,7 +421,7 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
case reloFieldLShiftU64:
var target uint32
if byteOrder == binary.LittleEndian {
if bo == binary.LittleEndian {
targetSize, err := targetField.sizeBits()
if err != nil {
return zero, err
@@ -451,10 +445,40 @@ func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID,
}
return fixupWithoutValidation(0, uint32(64-targetSize))
case reloFieldSigned:
switch local := UnderlyingType(localField.Type).(type) {
case *Enum:
target, ok := as[*Enum](targetField.Type)
if !ok {
return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
}
return fixup(boolToUint32(local.Signed), boolToUint32(target.Signed))
case *Int:
target, ok := as[*Int](targetField.Type)
if !ok {
return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
}
return fixup(
uint32(local.Encoding&Signed),
uint32(target.Encoding&Signed),
)
default:
return zero, fmt.Errorf("type %T: %w", local, errNoSignedness)
}
}
}
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
return zero, ErrNotSupported
}
func boolToUint32(val bool) uint32 {
if val {
return 1
}
return 0
}
/* coreAccessor contains a path through a struct. It contains at least one index.
@@ -516,7 +540,7 @@ func (ca coreAccessor) String() string {
}
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
e, ok := t.(*Enum)
e, ok := as[*Enum](t)
if !ok {
return nil, fmt.Errorf("not an enum: %s", t)
}
@@ -536,9 +560,9 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
// coreField represents the position of a "child" of a composite type from the
// start of that type.
//
// /- start of composite
// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
// \- start of field end of field -/
// /- start of composite
// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
// \- start of field end of field -/
type coreField struct {
Type Type
@@ -555,6 +579,10 @@ type coreField struct {
}
func (cf *coreField) adjustOffsetToNthElement(n int) error {
if n == 0 {
return nil
}
size, err := Sizeof(cf.Type)
if err != nil {
return err
@@ -597,7 +625,7 @@ func (cf *coreField) sizeBits() (Bits, error) {
// between kernel versions. Synthesise the size to make the shifts work.
size, err := Sizeof(cf.Type)
if err != nil {
return 0, nil
return 0, err
}
return Bits(size * 8), nil
}
@@ -611,6 +639,10 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
local := coreField{Type: localT}
target := coreField{Type: targetT}
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
// The first index is used to offset a pointer of the base type like
// when accessing an array.
if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
@@ -621,13 +653,9 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
return coreField{}, coreField{}, err
}
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
var localMaybeFlex, targetMaybeFlex bool
for i, acc := range localAcc[1:] {
switch localType := local.Type.(type) {
switch localType := UnderlyingType(local.Type).(type) {
case composite:
// For composite types acc is used to find the field in the local type,
// and then we try to find a field in target with the same name.
@@ -638,21 +666,21 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
localMember := localMembers[acc]
if localMember.Name == "" {
_, ok := localMember.Type.(composite)
localMemberType, ok := as[composite](localMember.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
}
// This is an anonymous struct or union, ignore it.
local = coreField{
Type: localMember.Type,
Type: localMemberType,
offset: local.offset + localMember.Offset.Bytes(),
}
localMaybeFlex = false
continue
}
targetType, ok := target.Type.(composite)
targetType, ok := as[composite](target.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
}
@@ -698,7 +726,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.Type.(*Array)
targetType, ok := as[*Array](target.Type)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
}
@@ -792,7 +820,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
continue
}
comp, ok := member.Type.(composite)
comp, ok := as[composite](member.Type)
if !ok {
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
}
@@ -811,7 +839,7 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
return nil, nil, err
}
targetEnum, ok := target.(*Enum)
targetEnum, ok := as[*Enum](target)
if !ok {
return nil, nil, errImpossibleRelocation
}
@@ -828,6 +856,13 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
return nil, nil, errImpossibleRelocation
}
// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules.
//
// Only layout compatibility is checked, ignoring names of the root type.
func CheckTypeCompatibility(localType Type, targetType Type) error {
return coreAreTypesCompatible(localType, targetType)
}
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
*
* Check local and target types for compatibility. This check is used for
@@ -849,25 +884,26 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*
* Returns errImpossibleRelocation if types are not compatible.
* Returns errIncompatibleTypes if types are not compatible.
*/
func coreAreTypesCompatible(localType Type, targetType Type) error {
var (
localTs, targetTs typeDeque
l, t = &localType, &targetType
depth = 0
)
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
for ; l != nil && t != nil; l, t = localTs.Shift(), targetTs.Shift() {
if depth >= maxTypeDepth {
return errors.New("types are nested too deep")
}
localType = *l
targetType = *t
localType = UnderlyingType(*l)
targetType = UnderlyingType(*t)
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
}
switch lv := (localType).(type) {
@@ -876,18 +912,18 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
case *Pointer, *Array:
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
walkType(localType, localTs.Push)
walkType(targetType, targetTs.Push)
case *FuncProto:
tv := targetType.(*FuncProto)
if len(lv.Params) != len(tv.Params) {
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
}
depth++
localType.walk(&localTs)
targetType.walk(&targetTs)
walkType(localType, localTs.Push)
walkType(targetType, targetTs.Push)
default:
return fmt.Errorf("unsupported type %T", localType)
@@ -931,6 +967,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
* Returns errImpossibleRelocation if the members are not compatible.
*/
func coreAreMembersCompatible(localType Type, targetType Type) error {
localType = UnderlyingType(localType)
targetType = UnderlyingType(targetType)
doNamesMatch := func(a, b string) error {
if a == "" || b == "" {
// allow anonymous and named type to match

View File

@@ -24,7 +24,7 @@ type ExtInfos struct {
// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
//
// Returns an error wrapping ErrNotFound if no ext infos are present.
func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
section := file.Section(".BTF.ext")
if section == nil {
return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
@@ -34,11 +34,11 @@ func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTa
return nil, fmt.Errorf("compressed ext_info is not supported")
}
return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
return loadExtInfos(section.ReaderAt, file.ByteOrder, spec, spec.strings)
}
// loadExtInfos parses bare ext infos.
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec, strings *stringTable) (*ExtInfos, error) {
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
// the header structs, resulting in one syscall per header.
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
@@ -60,7 +60,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
for section, bfis := range btfFuncInfos {
funcInfos[section], err = newFuncInfos(bfis, ts)
funcInfos[section], err = newFuncInfos(bfis, spec)
if err != nil {
return nil, fmt.Errorf("section %s: func infos: %w", section, err)
}
@@ -93,7 +93,7 @@ func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringT
coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
for section, brs := range btfCORERelos {
coreRelos[section], err = newRelocationInfos(brs, ts, strings)
coreRelos[section], err = newRelocationInfos(brs, spec, strings)
if err != nil {
return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
}
@@ -114,7 +114,7 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
iter := insns.Iterate()
for iter.Next() {
if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
*iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].fn)
funcInfos = funcInfos[1:]
}
@@ -132,17 +132,37 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
// MarshalExtInfos encodes function and line info embedded in insns into kernel
// wire format.
func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
//
// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
// Bail out early if the kernel doesn't support Func(Proto). If this is the
// case, func_info will also be unsupported.
if err := haveProgBTF(); err != nil {
return nil, nil, nil, err
}
iter := insns.Iterate()
var fiBuf, liBuf bytes.Buffer
for iter.Next() {
_, ok := iter.Ins.Source().(*Line)
fn := FuncMetadata(iter.Ins)
if ok || fn != nil {
goto marshal
}
}
return nil, nil, nil, nil
marshal:
var b Builder
var fiBuf, liBuf bytes.Buffer
for {
if fn := FuncMetadata(iter.Ins); fn != nil {
fi := &funcInfo{
fn: fn,
offset: iter.Offset,
}
if err := fi.marshal(&fiBuf, typeID); err != nil {
return nil, nil, fmt.Errorf("write func info: %w", err)
if err := fi.marshal(&fiBuf, &b); err != nil {
return nil, nil, nil, fmt.Errorf("write func info: %w", err)
}
}
@@ -151,12 +171,18 @@ func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error))
line: line,
offset: iter.Offset,
}
if err := li.marshal(&liBuf); err != nil {
return nil, nil, fmt.Errorf("write line info: %w", err)
if err := li.marshal(&liBuf, &b); err != nil {
return nil, nil, nil, fmt.Errorf("write line info: %w", err)
}
}
if !iter.Next() {
break
}
}
return fiBuf.Bytes(), liBuf.Bytes(), nil
handle, err := NewHandle(&b)
return handle, fiBuf.Bytes(), liBuf.Bytes(), err
}
// btfExtHeader is found at the start of the .BTF.ext section.
@@ -311,8 +337,8 @@ type bpfFuncInfo struct {
TypeID TypeID
}
func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
typ, err := ts.ByID(fi.TypeID)
func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) {
typ, err := spec.TypeByID(fi.TypeID)
if err != nil {
return nil, err
}
@@ -333,10 +359,10 @@ func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
}, nil
}
func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) ([]funcInfo, error) {
fis := make([]funcInfo, 0, len(bfis))
for _, bfi := range bfis {
fi, err := newFuncInfo(bfi, ts)
fi, err := newFuncInfo(bfi, spec)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
}
@@ -349,8 +375,8 @@ func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
}
// marshal into the BTF wire format.
func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
id, err := typeID(fi.fn)
func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
id, err := b.Add(fi.fn)
if err != nil {
return err
}
@@ -358,10 +384,14 @@ func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) erro
InsnOff: uint32(fi.offset),
TypeID: id,
}
return binary.Write(w, internal.NativeEndian, &bfi)
buf := make([]byte, FuncInfoSize)
internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
_, err = w.Write(buf)
return err
}
// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
// func infos indexed by section name.
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
@@ -428,12 +458,6 @@ type Line struct {
line string
lineNumber uint32
lineColumn uint32
// TODO: We should get rid of the fields below, but for that we need to be
// able to write BTF.
fileNameOff uint32
lineOff uint32
}
func (li *Line) FileName() string {
@@ -496,8 +520,6 @@ func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
line,
lineNumber,
lineColumn,
li.FileNameOff,
li.LineOff,
},
asm.RawInstructionOffset(li.InsnOff),
}, nil
@@ -519,7 +541,7 @@ func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error)
}
// marshal writes the binary representation of the LineInfo to w.
func (li *lineInfo) marshal(w io.Writer) error {
func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
line := li.line
if line.lineNumber > bpfLineMax {
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
@@ -529,13 +551,30 @@ func (li *lineInfo) marshal(w io.Writer) error {
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
}
fileNameOff, err := b.addString(line.fileName)
if err != nil {
return fmt.Errorf("file name %q: %w", line.fileName, err)
}
lineOff, err := b.addString(line.line)
if err != nil {
return fmt.Errorf("line %q: %w", line.line, err)
}
bli := bpfLineInfo{
uint32(li.offset),
line.fileNameOff,
line.lineOff,
fileNameOff,
lineOff,
(line.lineNumber << bpfLineShift) | line.lineColumn,
}
return binary.Write(w, internal.NativeEndian, &bli)
buf := make([]byte, LineInfoSize)
internal.NativeEndian.PutUint32(buf, bli.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
_, err = w.Write(buf)
return err
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
@@ -605,9 +644,16 @@ type bpfCORERelo struct {
}
type CORERelocation struct {
// The local type of the relocation, stripped of typedefs and qualifiers.
typ Type
accessor coreAccessor
kind coreKind
// The ID of the local type in the source BTF.
id TypeID
}
func (cr *CORERelocation) String() string {
return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
}
func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
@@ -620,8 +666,8 @@ type coreRelocationInfo struct {
offset asm.RawInstructionOffset
}
func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
typ, err := ts.ByID(relo.TypeID)
func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
typ, err := spec.TypeByID(relo.TypeID)
if err != nil {
return nil, err
}
@@ -641,15 +687,16 @@ func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreR
typ,
accessor,
relo.Kind,
relo.TypeID,
},
asm.RawInstructionOffset(relo.InsnOff),
}, nil
}
func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) ([]coreRelocationInfo, error) {
rs := make([]coreRelocationInfo, 0, len(brs))
for _, br := range brs {
relo, err := newRelocationInfo(br, ts, strings)
relo, err := newRelocationInfo(br, spec, strings)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
}

View File

@@ -56,54 +56,40 @@ func (gf *GoFormatter) enumIdentifier(name, element string) string {
//
// It encodes https://golang.org/ref/spec#Type_declarations:
//
// type foo struct { bar uint32; }
// type bar int32
// type foo struct { bar uint32; }
// type bar int32
func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
if name == "" {
return fmt.Errorf("need a name for type %s", typ)
}
switch v := skipQualifiers(typ).(type) {
case *Enum:
fmt.Fprintf(&gf.w, "type %s ", name)
switch v.Size {
case 1:
gf.w.WriteString("int8")
case 2:
gf.w.WriteString("int16")
case 4:
gf.w.WriteString("int32")
case 8:
gf.w.WriteString("int64")
default:
return fmt.Errorf("%s: invalid enum size %d", typ, v.Size)
}
if len(v.Values) == 0 {
return nil
}
gf.w.WriteString("; const ( ")
for _, ev := range v.Values {
id := gf.enumIdentifier(name, ev.Name)
fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
}
gf.w.WriteString(")")
return nil
default:
fmt.Fprintf(&gf.w, "type %s ", name)
return gf.writeTypeLit(v, 0)
typ = skipQualifiers(typ)
fmt.Fprintf(&gf.w, "type %s ", name)
if err := gf.writeTypeLit(typ, 0); err != nil {
return err
}
e, ok := typ.(*Enum)
if !ok || len(e.Values) == 0 {
return nil
}
gf.w.WriteString("; const ( ")
for _, ev := range e.Values {
id := gf.enumIdentifier(name, ev.Name)
fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
}
gf.w.WriteString(")")
return nil
}
// writeType outputs the name of a named type or a literal describing the type.
//
// It encodes https://golang.org/ref/spec#Types.
//
// foo (if foo is a named type)
// uint32
// foo (if foo is a named type)
// uint32
func (gf *GoFormatter) writeType(typ Type, depth int) error {
typ = skipQualifiers(typ)
@@ -122,8 +108,8 @@ func (gf *GoFormatter) writeType(typ Type, depth int) error {
//
// It encodes https://golang.org/ref/spec#TypeLit.
//
// struct { bar uint32; }
// uint32
// struct { bar uint32; }
// uint32
func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
depth++
if depth > maxTypeDepth {
@@ -133,10 +119,24 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
var err error
switch v := skipQualifiers(typ).(type) {
case *Int:
gf.writeIntLit(v)
err = gf.writeIntLit(v)
case *Enum:
gf.w.WriteString("int32")
if !v.Signed {
gf.w.WriteRune('u')
}
switch v.Size {
case 1:
gf.w.WriteString("int8")
case 2:
gf.w.WriteString("int16")
case 4:
gf.w.WriteString("int32")
case 8:
gf.w.WriteString("int64")
default:
err = fmt.Errorf("invalid enum size %d", v.Size)
}
case *Typedef:
err = gf.writeType(v.Type, depth)
@@ -166,19 +166,36 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
return nil
}
func (gf *GoFormatter) writeIntLit(i *Int) {
// NB: Encoding.IsChar is ignored.
if i.Encoding.IsBool() && i.Size == 1 {
gf.w.WriteString("bool")
return
}
func (gf *GoFormatter) writeIntLit(i *Int) error {
bits := i.Size * 8
if i.Encoding.IsSigned() {
fmt.Fprintf(&gf.w, "int%d", bits)
} else {
fmt.Fprintf(&gf.w, "uint%d", bits)
switch i.Encoding {
case Bool:
if i.Size != 1 {
return fmt.Errorf("bool with size %d", i.Size)
}
gf.w.WriteString("bool")
case Char:
if i.Size != 1 {
return fmt.Errorf("char with size %d", i.Size)
}
// BTF doesn't have a way to specify the signedness of a char. Assume
// we are dealing with unsigned, since this works nicely with []byte
// in Go code.
fallthrough
case Unsigned, Signed:
stem := "uint"
if i.Encoding == Signed {
stem = "int"
}
if i.Size > 8 {
fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8)
} else {
fmt.Fprintf(&gf.w, "%s%d", stem, bits)
}
default:
return fmt.Errorf("can't encode %s", i.Encoding)
}
return nil
}
func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
@@ -199,11 +216,15 @@ func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int)
gf.writePadding(n)
}
size, err := Sizeof(m.Type)
fieldSize, err := Sizeof(m.Type)
if err != nil {
return fmt.Errorf("field %d: %w", i, err)
}
prevOffset = offset + uint32(size)
prevOffset = offset + uint32(fieldSize)
if prevOffset > size {
return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size)
}
if err := gf.writeStructField(m, depth); err != nil {
return fmt.Errorf("field %d: %w", i, err)
@@ -272,7 +293,11 @@ func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
prevOffset := uint32(0)
for i, vsi := range ds.Vars {
v := vsi.Type.(*Var)
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("can't format %s as part of data section", vsi.Type)
}
if v.Linkage != GlobalVar {
// Ignore static, extern, etc. for now.
continue

View File

@@ -1,14 +1,142 @@
package btf
import (
"bytes"
"errors"
"fmt"
"math"
"os"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
needsKernelBase bool
}
// NewHandle loads the contents of a [Builder] into the kernel.
//
// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
func NewHandle(b *Builder) (*Handle, error) {
small := getByteSlice()
defer putByteSlice(small)
buf, err := b.Marshal(*small, KernelMarshalOptions())
if err != nil {
return nil, fmt.Errorf("marshal BTF: %w", err)
}
return NewHandleFromRawBTF(buf)
}
// NewHandleFromRawBTF loads raw BTF into the kernel.
//
// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
}
fd, err := sys.BtfLoad(attr)
if err == nil {
return &Handle{fd, attr.BtfSize, false}, nil
}
if err := haveBTF(); err != nil {
return nil, err
}
logBuf := make([]byte, 64*1024)
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
// if there are other verification errors. ENOSPC is only returned when
// the BTF blob is correct, a log was requested, and the provided buffer
// is too small.
_, ve := sys.BtfLoad(attr)
return nil, internal.ErrorWithLog("load btf", err, logBuf, errors.Is(ve, unix.ENOSPC))
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size, info.IsModule()}, nil
}
// Spec parses the kernel BTF into Go types.
//
// base must contain type information for vmlinux if the handle is for
// a kernel module. It may be nil otherwise.
func (h *Handle) Spec(base *Spec) (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
if h.needsKernelBase && base == nil {
return nil, fmt.Errorf("missing base types")
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
// HandleInfo describes a Handle.
type HandleInfo struct {
// ID of this handle in the kernel. The ID is only valid as long as the
@@ -59,7 +187,7 @@ func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
}, nil
}
// IsModule returns true if the BTF is for the kernel itself.
// IsVmlinux returns true if the BTF is for the kernel itself.
func (i *HandleInfo) IsVmlinux() bool {
return i.IsKernel && i.Name == "vmlinux"
}
@@ -71,51 +199,89 @@ func (i *HandleInfo) IsModule() bool {
// HandleIterator allows enumerating BTF blobs loaded into the kernel.
type HandleIterator struct {
// The ID of the last retrieved handle. Only valid after a call to Next.
ID ID
err error
// The ID of the current handle. Only valid after a call to Next.
ID ID
// The current Handle. Only valid until a call to Next.
// See Take if you want to retain the handle.
Handle *Handle
err error
}
// Next retrieves a handle for the next BTF blob.
// Next retrieves a handle for the next BTF object.
//
// [Handle.Close] is called if *handle is non-nil to avoid leaking fds.
//
// Returns true if another BTF blob was found. Call [HandleIterator.Err] after
// Returns true if another BTF object was found. Call [HandleIterator.Err] after
// the function returns false.
func (it *HandleIterator) Next(handle **Handle) bool {
if *handle != nil {
(*handle).Close()
*handle = nil
}
func (it *HandleIterator) Next() bool {
id := it.ID
for {
attr := &sys.BtfGetNextIdAttr{Id: id}
err := sys.BtfGetNextId(attr)
if errors.Is(err, os.ErrNotExist) {
// There are no more BTF objects.
return false
break
} else if err != nil {
it.err = fmt.Errorf("get next BTF ID: %w", err)
return false
break
}
id = attr.NextId
*handle, err = NewHandleFromID(id)
handle, err := NewHandleFromID(id)
if errors.Is(err, os.ErrNotExist) {
// Try again with the next ID.
continue
} else if err != nil {
it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
return false
break
}
it.ID = id
it.Handle.Close()
it.ID, it.Handle = id, handle
return true
}
// No more handles or we encountered an error.
it.Handle.Close()
it.Handle = nil
return false
}
// Take the ownership of the current handle.
//
// It's the callers responsibility to close the handle.
func (it *HandleIterator) Take() *Handle {
handle := it.Handle
it.Handle = nil
return handle
}
// Err returns an error if iteration failed for some reason.
func (it *HandleIterator) Err() error {
return it.err
}
// FindHandle returns the first handle for which predicate returns true.
//
// Requires CAP_SYS_ADMIN.
//
// Returns an error wrapping ErrNotFound if predicate never returns true or if
// there is no BTF loaded into the kernel.
func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) {
it := new(HandleIterator)
defer it.Handle.Close()
for it.Next() {
info, err := it.Handle.Info()
if err != nil {
return nil, fmt.Errorf("info for ID %d: %w", it.ID, err)
}
if predicate(info) {
return it.Take(), nil
}
}
if err := it.Err(); err != nil {
return nil, fmt.Errorf("iterate handles: %w", err)
}
return nil, fmt.Errorf("find handle: %w", ErrNotFound)
}

543
vendor/github.com/cilium/ebpf/btf/marshal.go generated vendored Normal file
View File

@@ -0,0 +1,543 @@
package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"sync"
"github.com/cilium/ebpf/internal"
"golang.org/x/exp/slices"
)
type MarshalOptions struct {
// Target byte order. Defaults to the system's native endianness.
Order binary.ByteOrder
// Remove function linkage information for compatibility with <5.6 kernels.
StripFuncLinkage bool
}
// KernelMarshalOptions will generate BTF suitable for the current kernel.
func KernelMarshalOptions() *MarshalOptions {
return &MarshalOptions{
Order: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
}
}
// encoder turns Types into raw BTF.
type encoder struct {
MarshalOptions
pending internal.Deque[Type]
buf *bytes.Buffer
strings *stringTableBuilder
ids map[Type]TypeID
lastID TypeID
}
var bufferPool = sync.Pool{
New: func() any {
buf := make([]byte, btfHeaderLen+128)
return &buf
},
}
func getByteSlice() *[]byte {
return bufferPool.Get().(*[]byte)
}
func putByteSlice(buf *[]byte) {
*buf = (*buf)[:0]
bufferPool.Put(buf)
}
// Builder turns Types into raw BTF.
//
// The default value may be used and represents an empty BTF blob. Void is
// added implicitly if necessary.
type Builder struct {
// Explicitly added types.
types []Type
// IDs for all added types which the user knows about.
stableIDs map[Type]TypeID
// Explicitly added strings.
strings *stringTableBuilder
}
// NewBuilder creates a Builder from a list of types.
//
// It is more efficient than calling [Add] individually.
//
// Returns an error if adding any of the types fails.
func NewBuilder(types []Type) (*Builder, error) {
b := &Builder{
make([]Type, 0, len(types)),
make(map[Type]TypeID, len(types)),
nil,
}
for _, typ := range types {
_, err := b.Add(typ)
if err != nil {
return nil, fmt.Errorf("add %s: %w", typ, err)
}
}
return b, nil
}
// Add a Type and allocate a stable ID for it.
//
// Adding the identical Type multiple times is valid and will return the same ID.
//
// See [Type] for details on identity.
func (b *Builder) Add(typ Type) (TypeID, error) {
if b.stableIDs == nil {
b.stableIDs = make(map[Type]TypeID)
}
if _, ok := typ.(*Void); ok {
// Equality is weird for void, since it is a zero sized type.
return 0, nil
}
if ds, ok := typ.(*Datasec); ok {
if err := datasecResolveWorkaround(b, ds); err != nil {
return 0, err
}
}
id, ok := b.stableIDs[typ]
if ok {
return id, nil
}
b.types = append(b.types, typ)
id = TypeID(len(b.types))
if int(id) != len(b.types) {
return 0, fmt.Errorf("no more type IDs")
}
b.stableIDs[typ] = id
return id, nil
}
// Marshal encodes all types in the Marshaler into BTF wire format.
//
// opts may be nil.
func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
stb := b.strings
if stb == nil {
// Assume that most types are named. This makes encoding large BTF like
// vmlinux a lot cheaper.
stb = newStringTableBuilder(len(b.types))
} else {
// Avoid modifying the Builder's string table.
stb = b.strings.Copy()
}
if opts == nil {
opts = &MarshalOptions{Order: internal.NativeEndian}
}
// Reserve space for the BTF header.
buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
w := internal.NewBuffer(buf)
defer internal.PutBuffer(w)
e := encoder{
MarshalOptions: *opts,
buf: w,
strings: stb,
lastID: TypeID(len(b.types)),
ids: make(map[Type]TypeID, len(b.types)),
}
// Ensure that types are marshaled in the exact order they were Add()ed.
// Otherwise the ID returned from Add() won't match.
e.pending.Grow(len(b.types))
for _, typ := range b.types {
e.pending.Push(typ)
e.ids[typ] = b.stableIDs[typ]
}
if err := e.deflatePending(); err != nil {
return nil, err
}
length := e.buf.Len()
typeLen := uint32(length - btfHeaderLen)
stringLen := e.strings.Length()
buf = e.strings.AppendEncoded(e.buf.Bytes())
// Fill out the header, and write it out.
header := &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(btfHeaderLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringLen),
}
err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header)
if err != nil {
return nil, fmt.Errorf("write header: %v", err)
}
return buf, nil
}
// addString adds a string to the resulting BTF.
//
// Adding the same string multiple times will return the same result.
//
// Returns an identifier into the string table or an error if the string
// contains invalid characters.
func (b *Builder) addString(str string) (uint32, error) {
if b.strings == nil {
b.strings = newStringTableBuilder(0)
}
return b.strings.Add(str)
}
func (e *encoder) allocateID(typ Type) error {
id := e.lastID + 1
if id < e.lastID {
return errors.New("type ID overflow")
}
e.pending.Push(typ)
e.ids[typ] = id
e.lastID = id
return nil
}
// id returns the ID for the given type or panics with an error.
func (e *encoder) id(typ Type) TypeID {
if _, ok := typ.(*Void); ok {
return 0
}
id, ok := e.ids[typ]
if !ok {
panic(fmt.Errorf("no ID for type %v", typ))
}
return id
}
func (e *encoder) deflatePending() error {
// Declare root outside of the loop to avoid repeated heap allocations.
var root Type
skip := func(t Type) (skip bool) {
if t == root {
// Force descending into the current root type even if it already
// has an ID. Otherwise we miss children of types that have their
// ID pre-allocated via Add.
return false
}
_, isVoid := t.(*Void)
_, alreadyEncoded := e.ids[t]
return isVoid || alreadyEncoded
}
for !e.pending.Empty() {
root = e.pending.Shift()
// Allocate IDs for all children of typ, including transitive dependencies.
iter := postorderTraversal(root, skip)
for iter.Next() {
if iter.Type == root {
// The iterator yields root at the end, do not allocate another ID.
break
}
if err := e.allocateID(iter.Type); err != nil {
return err
}
}
if err := e.deflateType(root); err != nil {
id := e.ids[root]
return fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
}
}
return nil
}
func (e *encoder) deflateType(typ Type) (err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
err, ok = r.(error)
if !ok {
panic(r)
}
}
}()
var raw rawType
raw.NameOff, err = e.strings.Add(typ.TypeName())
if err != nil {
return err
}
switch v := typ.(type) {
case *Void:
return errors.New("Void is implicit in BTF wire format")
case *Int:
raw.SetKind(kindInt)
raw.SetSize(v.Size)
var bi btfInt
bi.SetEncoding(v.Encoding)
// We need to set bits in addition to size, since btf_type_int_is_regular
// otherwise flags this as a bitfield.
bi.SetBits(byte(v.Size) * 8)
raw.data = bi
case *Pointer:
raw.SetKind(kindPointer)
raw.SetType(e.id(v.Target))
case *Array:
raw.SetKind(kindArray)
raw.data = &btfArray{
e.id(v.Type),
e.id(v.Index),
v.Nelems,
}
case *Struct:
raw.SetKind(kindStruct)
raw.SetSize(v.Size)
raw.data, err = e.convertMembers(&raw.btfType, v.Members)
case *Union:
raw.SetKind(kindUnion)
raw.SetSize(v.Size)
raw.data, err = e.convertMembers(&raw.btfType, v.Members)
case *Enum:
raw.SetSize(v.size())
raw.SetVlen(len(v.Values))
raw.SetSigned(v.Signed)
if v.has64BitValues() {
raw.SetKind(kindEnum64)
raw.data, err = e.deflateEnum64Values(v.Values)
} else {
raw.SetKind(kindEnum)
raw.data, err = e.deflateEnumValues(v.Values)
}
case *Fwd:
raw.SetKind(kindForward)
raw.SetFwdKind(v.Kind)
case *Typedef:
raw.SetKind(kindTypedef)
raw.SetType(e.id(v.Type))
case *Volatile:
raw.SetKind(kindVolatile)
raw.SetType(e.id(v.Type))
case *Const:
raw.SetKind(kindConst)
raw.SetType(e.id(v.Type))
case *Restrict:
raw.SetKind(kindRestrict)
raw.SetType(e.id(v.Type))
case *Func:
raw.SetKind(kindFunc)
raw.SetType(e.id(v.Type))
if !e.StripFuncLinkage {
raw.SetLinkage(v.Linkage)
}
case *FuncProto:
raw.SetKind(kindFuncProto)
raw.SetType(e.id(v.Return))
raw.SetVlen(len(v.Params))
raw.data, err = e.deflateFuncParams(v.Params)
case *Var:
raw.SetKind(kindVar)
raw.SetType(e.id(v.Type))
raw.data = btfVariable{uint32(v.Linkage)}
case *Datasec:
raw.SetKind(kindDatasec)
raw.SetSize(v.Size)
raw.SetVlen(len(v.Vars))
raw.data = e.deflateVarSecinfos(v.Vars)
case *Float:
raw.SetKind(kindFloat)
raw.SetSize(v.Size)
case *declTag:
raw.SetKind(kindDeclTag)
raw.SetType(e.id(v.Type))
raw.data = &btfDeclTag{uint32(v.Index)}
raw.NameOff, err = e.strings.Add(v.Value)
case *typeTag:
raw.SetKind(kindTypeTag)
raw.SetType(e.id(v.Type))
raw.NameOff, err = e.strings.Add(v.Value)
default:
return fmt.Errorf("don't know how to deflate %T", v)
}
if err != nil {
return err
}
return raw.Marshal(e.buf, e.Order)
}
func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
bms := make([]btfMember, 0, len(members))
isBitfield := false
for _, member := range members {
isBitfield = isBitfield || member.BitfieldSize > 0
offset := member.Offset
if isBitfield {
offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
}
nameOff, err := e.strings.Add(member.Name)
if err != nil {
return nil, err
}
bms = append(bms, btfMember{
nameOff,
e.id(member.Type),
uint32(offset),
})
}
header.SetVlen(len(members))
header.SetBitfield(isBitfield)
return bms, nil
}
func (e *encoder) deflateEnumValues(values []EnumValue) ([]btfEnum, error) {
bes := make([]btfEnum, 0, len(values))
for _, value := range values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
if value.Value > math.MaxUint32 {
return nil, fmt.Errorf("value of enum %q exceeds 32 bits", value.Name)
}
bes = append(bes, btfEnum{
nameOff,
uint32(value.Value),
})
}
return bes, nil
}
func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) {
bes := make([]btfEnum64, 0, len(values))
for _, value := range values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
bes = append(bes, btfEnum64{
nameOff,
uint32(value.Value),
uint32(value.Value >> 32),
})
}
return bes, nil
}
func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
bps := make([]btfParam, 0, len(params))
for _, param := range params {
nameOff, err := e.strings.Add(param.Name)
if err != nil {
return nil, err
}
bps = append(bps, btfParam{
nameOff,
e.id(param.Type),
})
}
return bps, nil
}
func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
vsis := make([]btfVarSecinfo, 0, len(vars))
for _, v := range vars {
vsis = append(vsis, btfVarSecinfo{
e.id(v.Type),
v.Offset,
v.Size,
})
}
return vsis
}
// MarshalMapKV creates a BTF object containing a map key and value.
//
// The function is intended for the use of the ebpf package and may be removed
// at any point in time.
func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
var b Builder
if key != nil {
keyID, err = b.Add(key)
if err != nil {
return nil, 0, 0, fmt.Errorf("add key type: %w", err)
}
}
if value != nil {
valueID, err = b.Add(value)
if err != nil {
return nil, 0, 0, fmt.Errorf("add value type: %w", err)
}
}
handle, err := NewHandle(&b)
if err != nil {
// Check for 'full' map BTF support, since kernels between 4.18 and 5.2
// already support BTF blobs for maps without Var or Datasec just fine.
if err := haveMapBTF(); err != nil {
return nil, 0, 0, err
}
}
return handle, keyID, valueID, err
}

View File

@@ -6,6 +6,9 @@ import (
"errors"
"fmt"
"io"
"strings"
"golang.org/x/exp/maps"
)
type stringTable struct {
@@ -88,11 +91,6 @@ func (st *stringTable) lookup(offset uint32) (string, error) {
return st.strings[i], nil
}
func (st *stringTable) Length() int {
last := len(st.offsets) - 1
return int(st.offsets[last]) + len(st.strings[last]) + 1
}
func (st *stringTable) Marshal(w io.Writer) error {
for _, str := range st.strings {
_, err := io.WriteString(w, str)
@@ -107,6 +105,11 @@ func (st *stringTable) Marshal(w io.Writer) error {
return nil
}
// Num returns the number of strings in the table.
func (st *stringTable) Num() int {
return len(st.strings)
}
// search is a copy of sort.Search specialised for uint32.
//
// Licensed under https://go.dev/LICENSE
@@ -126,3 +129,86 @@ func search(ints []uint32, needle uint32) int {
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}
// stringTableBuilder builds BTF string tables.
type stringTableBuilder struct {
length uint32
strings map[string]uint32
}
// newStringTableBuilder creates a builder with the given capacity.
//
// capacity may be zero.
func newStringTableBuilder(capacity int) *stringTableBuilder {
var stb stringTableBuilder
if capacity == 0 {
// Use the runtime's small default size.
stb.strings = make(map[string]uint32)
} else {
stb.strings = make(map[string]uint32, capacity)
}
// Ensure that the empty string is at index 0.
stb.append("")
return &stb
}
// Add a string to the table.
//
// Adding the same string multiple times will only store it once.
func (stb *stringTableBuilder) Add(str string) (uint32, error) {
if strings.IndexByte(str, 0) != -1 {
return 0, fmt.Errorf("string contains null: %q", str)
}
offset, ok := stb.strings[str]
if ok {
return offset, nil
}
return stb.append(str), nil
}
func (stb *stringTableBuilder) append(str string) uint32 {
offset := stb.length
stb.length += uint32(len(str)) + 1
stb.strings[str] = offset
return offset
}
// Lookup finds the offset of a string in the table.
//
// Returns an error if str hasn't been added yet.
func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
offset, ok := stb.strings[str]
if !ok {
return 0, fmt.Errorf("string %q is not in table", str)
}
return offset, nil
}
// Length returns the length in bytes.
func (stb *stringTableBuilder) Length() int {
return int(stb.length)
}
// AppendEncoded appends the string table to the end of the provided buffer.
func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
n := len(buf)
buf = append(buf, make([]byte, stb.Length())...)
strings := buf[n:]
for str, offset := range stb.strings {
copy(strings[offset:], str)
}
return buf
}
// Copy the string table builder.
func (stb *stringTableBuilder) Copy() *stringTableBuilder {
return &stringTableBuilder{
stb.length,
maps.Clone(stb.strings),
}
}

141
vendor/github.com/cilium/ebpf/btf/traversal.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
package btf
import (
"fmt"
"github.com/cilium/ebpf/internal"
)
// Functions to traverse a cyclic graph of types. The below was very useful:
// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
type postorderIterator struct {
// Iteration skips types for which this function returns true.
skip func(Type) bool
// The root type. May be nil if skip(root) is true.
root Type
// Contains types which need to be either walked or yielded.
types typeDeque
// Contains a boolean whether the type has been walked or not.
walked internal.Deque[bool]
// The set of types which has been pushed onto types.
pushed map[Type]struct{}
// The current type. Only valid after a call to Next().
Type Type
}
// postorderTraversal iterates all types reachable from root by visiting the
// leaves of the graph first.
//
// Types for which skip returns true are ignored. skip may be nil.
func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
// Avoid allocations for the common case of a skipped root.
if skip != nil && skip(root) {
return postorderIterator{}
}
po := postorderIterator{root: root, skip: skip}
walkType(root, po.push)
return po
}
func (po *postorderIterator) push(t *Type) {
if _, ok := po.pushed[*t]; ok || *t == po.root {
return
}
if po.skip != nil && po.skip(*t) {
return
}
if po.pushed == nil {
// Lazily allocate pushed to avoid an allocation for Types without children.
po.pushed = make(map[Type]struct{})
}
po.pushed[*t] = struct{}{}
po.types.Push(t)
po.walked.Push(false)
}
// Next returns true if there is another Type to traverse.
func (po *postorderIterator) Next() bool {
for !po.types.Empty() {
t := po.types.Pop()
if !po.walked.Pop() {
// Push the type again, so that we re-evaluate it in done state
// after all children have been handled.
po.types.Push(t)
po.walked.Push(true)
// Add all direct children to todo.
walkType(*t, po.push)
} else {
// We've walked this type previously, so we now know that all
// children have been handled.
po.Type = *t
return true
}
}
// Only return root once.
po.Type, po.root = po.root, nil
return po.Type != nil
}
// walkType calls fn on each child of typ.
func walkType(typ Type, fn func(*Type)) {
// Explicitly type switch on the most common types to allow the inliner to
// do its work. This avoids allocating intermediate slices from walk() on
// the heap.
switch v := typ.(type) {
case *Void, *Int, *Enum, *Fwd, *Float:
// No children to traverse.
case *Pointer:
fn(&v.Target)
case *Array:
fn(&v.Index)
fn(&v.Type)
case *Struct:
for i := range v.Members {
fn(&v.Members[i].Type)
}
case *Union:
for i := range v.Members {
fn(&v.Members[i].Type)
}
case *Typedef:
fn(&v.Type)
case *Volatile:
fn(&v.Type)
case *Const:
fn(&v.Type)
case *Restrict:
fn(&v.Type)
case *Func:
fn(&v.Type)
case *FuncProto:
fn(&v.Return)
for i := range v.Params {
fn(&v.Params[i].Type)
}
case *Var:
fn(&v.Type)
case *Datasec:
for i := range v.Vars {
fn(&v.Vars[i].Type)
}
case *declTag:
fn(&v.Type)
case *typeTag:
fn(&v.Type)
case *cycle:
// cycle has children, but we ignore them deliberately.
default:
panic(fmt.Sprintf("don't know how to walk Type %T", v))
}
}

View File

@@ -1,6 +1,7 @@
package btf
import (
"errors"
"fmt"
"io"
"math"
@@ -8,14 +9,27 @@ import (
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
)
const maxTypeDepth = 32
// TypeID identifies a type in a BTF section.
type TypeID uint32
type TypeID = sys.TypeID
// Type represents a type described by BTF.
//
// Identity of Type follows the [Go specification]: two Types are considered
// equal if they have the same concrete type and the same dynamic value, aka
// they point at the same location in memory. This means that the following
// Types are considered distinct even though they have the same "shape".
//
// a := &Int{Size: 1}
// b := &Int{Size: 1}
// a != b
//
// [Go specification]: https://go.dev/ref/spec#Comparison_operators
type Type interface {
// Type can be formatted using the %s and %v verbs. %s outputs only the
// identity of the type, without any detail. %v outputs additional detail.
@@ -35,9 +49,7 @@ type Type interface {
// Make a copy of the type, without copying Type members.
copy() Type
// Enumerate all nested Types. Repeated calls must visit nested
// types in the same order.
walk(*typeDeque)
// New implementations must update walkType.
}
var (
@@ -51,20 +63,11 @@ var (
_ Type = (*Var)(nil)
_ Type = (*Datasec)(nil)
_ Type = (*Float)(nil)
_ Type = (*declTag)(nil)
_ Type = (*typeTag)(nil)
_ Type = (*cycle)(nil)
)
// types is a list of Type.
//
// The order determines the ID of a type.
type types []Type
func (ts types) ByID(id TypeID) (Type, error) {
if int(id) > len(ts) {
return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
}
return ts[id], nil
}
// Void is the unit type of BTF.
type Void struct{}
@@ -72,40 +75,32 @@ func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
func (v *Void) TypeName() string { return "" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
func (v *Void) walk(*typeDeque) {}
type IntEncoding byte
// Valid IntEncodings.
//
// These may look like they are flags, but they aren't.
const (
Signed IntEncoding = 1 << iota
Char
Bool
Unsigned IntEncoding = 0
Signed IntEncoding = 1
Char IntEncoding = 2
Bool IntEncoding = 4
)
func (ie IntEncoding) IsSigned() bool {
return ie&Signed != 0
}
func (ie IntEncoding) IsChar() bool {
return ie&Char != 0
}
func (ie IntEncoding) IsBool() bool {
return ie&Bool != 0
}
func (ie IntEncoding) String() string {
switch {
case ie.IsChar() && ie.IsSigned():
switch ie {
case Char:
// NB: There is no way to determine signedness for char.
return "char"
case ie.IsChar() && !ie.IsSigned():
return "uchar"
case ie.IsBool():
case Bool:
return "bool"
case ie.IsSigned():
case Signed:
return "signed"
default:
case Unsigned:
return "unsigned"
default:
return fmt.Sprintf("IntEncoding(%d)", byte(ie))
}
}
@@ -126,7 +121,6 @@ func (i *Int) Format(fs fmt.State, verb rune) {
func (i *Int) TypeName() string { return i.Name }
func (i *Int) size() uint32 { return i.Size }
func (i *Int) walk(*typeDeque) {}
func (i *Int) copy() Type {
cpy := *i
return &cpy
@@ -141,9 +135,8 @@ func (p *Pointer) Format(fs fmt.State, verb rune) {
formatType(fs, verb, p, "target=", p.Target)
}
func (p *Pointer) TypeName() string { return "" }
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
func (p *Pointer) TypeName() string { return "" }
func (p *Pointer) size() uint32 { return 8 }
func (p *Pointer) copy() Type {
cpy := *p
return &cpy
@@ -162,11 +155,6 @@ func (arr *Array) Format(fs fmt.State, verb rune) {
func (arr *Array) TypeName() string { return "" }
func (arr *Array) walk(tdq *typeDeque) {
tdq.push(&arr.Index)
tdq.push(&arr.Type)
}
func (arr *Array) copy() Type {
cpy := *arr
return &cpy
@@ -188,12 +176,6 @@ func (s *Struct) TypeName() string { return s.Name }
func (s *Struct) size() uint32 { return s.Size }
func (s *Struct) walk(tdq *typeDeque) {
for i := range s.Members {
tdq.push(&s.Members[i].Type)
}
}
func (s *Struct) copy() Type {
cpy := *s
cpy.Members = copyMembers(s.Members)
@@ -220,12 +202,6 @@ func (u *Union) TypeName() string { return u.Name }
func (u *Union) size() uint32 { return u.Size }
func (u *Union) walk(tdq *typeDeque) {
for i := range u.Members {
tdq.push(&u.Members[i].Type)
}
}
func (u *Union) copy() Type {
cpy := *u
cpy.Members = copyMembers(u.Members)
@@ -243,6 +219,7 @@ func copyMembers(orig []Member) []Member {
}
type composite interface {
Type
members() []Member
}
@@ -273,7 +250,9 @@ type Member struct {
type Enum struct {
Name string
// Size of the enum value in bytes.
Size uint32
Size uint32
// True if the values should be interpreted as signed integers.
Signed bool
Values []EnumValue
}
@@ -288,11 +267,10 @@ func (e *Enum) TypeName() string { return e.Name }
// Is is not a valid Type
type EnumValue struct {
Name string
Value int32
Value uint64
}
func (e *Enum) size() uint32 { return e.Size }
func (e *Enum) walk(*typeDeque) {}
func (e *Enum) size() uint32 { return e.Size }
func (e *Enum) copy() Type {
cpy := *e
cpy.Values = make([]EnumValue, len(e.Values))
@@ -300,6 +278,21 @@ func (e *Enum) copy() Type {
return &cpy
}
// has64BitValues returns true if the Enum contains a value larger than 32 bits.
// Kernels before 6.0 have enum values that overrun u32 replaced with zeroes.
//
// 64-bit enums have their Enum.Size attributes correctly set to 8, but if we
// use the size attribute as a heuristic during BTF marshaling, we'll emit
// ENUM64s to kernels that don't support them.
func (e *Enum) has64BitValues() bool {
for _, v := range e.Values {
if v.Value > math.MaxUint32 {
return true
}
}
return false
}
// FwdKind is the type of forward declaration.
type FwdKind int
@@ -332,7 +325,6 @@ func (f *Fwd) Format(fs fmt.State, verb rune) {
func (f *Fwd) TypeName() string { return f.Name }
func (f *Fwd) walk(*typeDeque) {}
func (f *Fwd) copy() Type {
cpy := *f
return &cpy
@@ -350,7 +342,6 @@ func (td *Typedef) Format(fs fmt.State, verb rune) {
func (td *Typedef) TypeName() string { return td.Name }
func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
func (td *Typedef) copy() Type {
cpy := *td
return &cpy
@@ -367,8 +358,7 @@ func (v *Volatile) Format(fs fmt.State, verb rune) {
func (v *Volatile) TypeName() string { return "" }
func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Volatile) qualify() Type { return v.Type }
func (v *Volatile) copy() Type {
cpy := *v
return &cpy
@@ -385,8 +375,7 @@ func (c *Const) Format(fs fmt.State, verb rune) {
func (c *Const) TypeName() string { return "" }
func (c *Const) qualify() Type { return c.Type }
func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
func (c *Const) qualify() Type { return c.Type }
func (c *Const) copy() Type {
cpy := *c
return &cpy
@@ -403,8 +392,7 @@ func (r *Restrict) Format(fs fmt.State, verb rune) {
func (r *Restrict) TypeName() string { return "" }
func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
func (r *Restrict) qualify() Type { return r.Type }
func (r *Restrict) copy() Type {
cpy := *r
return &cpy
@@ -422,13 +410,18 @@ func FuncMetadata(ins *asm.Instruction) *Func {
return fn
}
// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
ins.Metadata.Set(funcInfoMeta{}, fn)
return ins
}
func (f *Func) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
}
func (f *Func) TypeName() string { return f.Name }
func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
func (f *Func) copy() Type {
cpy := *f
return &cpy
@@ -446,13 +439,6 @@ func (fp *FuncProto) Format(fs fmt.State, verb rune) {
func (fp *FuncProto) TypeName() string { return "" }
func (fp *FuncProto) walk(tdq *typeDeque) {
tdq.push(&fp.Return)
for i := range fp.Params {
tdq.push(&fp.Params[i].Type)
}
}
func (fp *FuncProto) copy() Type {
cpy := *fp
cpy.Params = make([]FuncParam, len(fp.Params))
@@ -478,7 +464,6 @@ func (v *Var) Format(fs fmt.State, verb rune) {
func (v *Var) TypeName() string { return v.Name }
func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
func (v *Var) copy() Type {
cpy := *v
return &cpy
@@ -499,12 +484,6 @@ func (ds *Datasec) TypeName() string { return ds.Name }
func (ds *Datasec) size() uint32 { return ds.Size }
func (ds *Datasec) walk(tdq *typeDeque) {
for i := range ds.Vars {
tdq.push(&ds.Vars[i].Type)
}
}
func (ds *Datasec) copy() Type {
cpy := *ds
cpy.Vars = make([]VarSecinfo, len(ds.Vars))
@@ -516,6 +495,7 @@ func (ds *Datasec) copy() Type {
//
// It is not a valid Type.
type VarSecinfo struct {
// Var or Func.
Type Type
Offset uint32
Size uint32
@@ -535,12 +515,48 @@ func (f *Float) Format(fs fmt.State, verb rune) {
func (f *Float) TypeName() string { return f.Name }
func (f *Float) size() uint32 { return f.Size }
func (f *Float) walk(*typeDeque) {}
func (f *Float) copy() Type {
cpy := *f
return &cpy
}
// declTag associates metadata with a declaration.
type declTag struct {
Type Type
Value string
// The index this tag refers to in the target type. For composite types,
// a value of -1 indicates that the tag refers to the whole type. Otherwise
// it indicates which member or argument the tag applies to.
Index int
}
func (dt *declTag) Format(fs fmt.State, verb rune) {
formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index)
}
func (dt *declTag) TypeName() string { return "" }
func (dt *declTag) copy() Type {
cpy := *dt
return &cpy
}
// typeTag associates metadata with a type.
type typeTag struct {
Type Type
Value string
}
func (tt *typeTag) Format(fs fmt.State, verb rune) {
formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value)
}
func (tt *typeTag) TypeName() string { return "" }
func (tt *typeTag) qualify() Type { return tt.Type }
func (tt *typeTag) copy() Type {
cpy := *tt
return &cpy
}
// cycle is a type which had to be elided since it exceeded maxTypeDepth.
type cycle struct {
root Type
@@ -549,7 +565,6 @@ type cycle struct {
func (c *cycle) ID() TypeID { return math.MaxUint32 }
func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
func (c *cycle) TypeName() string { return "" }
func (c *cycle) walk(*typeDeque) {}
func (c *cycle) copy() Type {
cpy := *c
return &cpy
@@ -576,8 +591,11 @@ var (
_ qualifier = (*Const)(nil)
_ qualifier = (*Restrict)(nil)
_ qualifier = (*Volatile)(nil)
_ qualifier = (*typeTag)(nil)
)
var errUnsizedType = errors.New("type is unsized")
// Sizeof returns the size of a type in bytes.
//
// Returns an error if the size can't be computed.
@@ -612,7 +630,7 @@ func Sizeof(typ Type) (int, error) {
continue
default:
return 0, fmt.Errorf("unsized type %T", typ)
return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
}
if n > 0 && elem > math.MaxInt64/n {
@@ -632,16 +650,33 @@ func Sizeof(typ Type) (int, error) {
// alignof returns the alignment of a type.
//
// Currently only supports the subset of types necessary for bitfield relocations.
// Returns an error if the Type can't be aligned, like an integer with an uneven
// size. Currently only supports the subset of types necessary for bitfield
// relocations.
func alignof(typ Type) (int, error) {
var n int
switch t := UnderlyingType(typ).(type) {
case *Enum:
return int(t.size()), nil
n = int(t.size())
case *Int:
return int(t.Size), nil
n = int(t.Size)
case *Array:
return alignof(t.Type)
default:
return 0, fmt.Errorf("can't calculate alignment of %T", t)
}
if !pow(n) {
return 0, fmt.Errorf("alignment value %d is not a power of two", n)
}
return n, nil
}
// pow returns true if n is a power of two.
func pow(n int) bool {
return n != 0 && (n&(n-1)) == 0
}
// Transformer modifies a given Type and returns the result.
@@ -655,7 +690,7 @@ type Transformer func(Type) Type
// typ may form a cycle. If transform is not nil, it is called with the
// to be copied type, and the returned value is copied instead.
func Copy(typ Type, transform Transformer) Type {
copies := make(copier)
copies := copier{copies: make(map[Type]Type)}
copies.copy(&typ, transform)
return typ
}
@@ -667,7 +702,7 @@ func copyTypes(types []Type, transform Transformer) []Type {
result := make([]Type, len(types))
copy(result, types)
copies := make(copier)
copies := copier{copies: make(map[Type]Type, len(types))}
for i := range result {
copies.copy(&result[i], transform)
}
@@ -675,13 +710,15 @@ func copyTypes(types []Type, transform Transformer) []Type {
return result
}
type copier map[Type]Type
type copier struct {
copies map[Type]Type
work typeDeque
}
func (c copier) copy(typ *Type, transform Transformer) {
var work typeDeque
for t := typ; t != nil; t = work.pop() {
func (c *copier) copy(typ *Type, transform Transformer) {
for t := typ; t != nil; t = c.work.Pop() {
// *t is the identity of the type.
if cpy := c[*t]; cpy != nil {
if cpy := c.copies[*t]; cpy != nil {
*t = cpy
continue
}
@@ -693,108 +730,41 @@ func (c copier) copy(typ *Type, transform Transformer) {
cpy = (*t).copy()
}
c[*t] = cpy
c.copies[*t] = cpy
*t = cpy
// Mark any nested types for copying.
cpy.walk(&work)
walkType(cpy, c.work.Push)
}
}
// typeDeque keeps track of pointers to types which still
// need to be visited.
type typeDeque struct {
types []*Type
read, write uint64
mask uint64
}
func (dq *typeDeque) empty() bool {
return dq.read == dq.write
}
// push adds a type to the stack.
func (dq *typeDeque) push(t *Type) {
if dq.write-dq.read < uint64(len(dq.types)) {
dq.types[dq.write&dq.mask] = t
dq.write++
return
}
new := len(dq.types) * 2
if new == 0 {
new = 8
}
types := make([]*Type, new)
pivot := dq.read & dq.mask
n := copy(types, dq.types[pivot:])
n += copy(types[n:], dq.types[:pivot])
types[n] = t
dq.types = types
dq.mask = uint64(new) - 1
dq.read, dq.write = 0, uint64(n+1)
}
// shift returns the first element or null.
func (dq *typeDeque) shift() *Type {
if dq.empty() {
return nil
}
index := dq.read & dq.mask
t := dq.types[index]
dq.types[index] = nil
dq.read++
return t
}
// pop returns the last element or null.
func (dq *typeDeque) pop() *Type {
if dq.empty() {
return nil
}
dq.write--
index := dq.write & dq.mask
t := dq.types[index]
dq.types[index] = nil
return t
}
// all returns all elements.
//
// The deque is empty after calling this method.
func (dq *typeDeque) all() []*Type {
length := dq.write - dq.read
types := make([]*Type, 0, length)
for t := dq.shift(); t != nil; t = dq.shift() {
types = append(types, t)
}
return types
}
type typeDeque = internal.Deque[*Type]
// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
// it into a graph of Types connected via pointers.
//
// If baseTypes are provided, then the raw types are
// considered to be of a split BTF (e.g., a kernel module).
// If base is provided, then the raw types are considered to be of a split BTF
// (e.g., a kernel module).
//
// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
// units, multiple types may share the same name. A Type may form a cyclic graph
// by pointing at itself.
func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) {
func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([]Type, error) {
types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1)
// Void is defined to always be type ID 0, and is thus omitted from BTF.
types = append(types, (*Void)(nil))
if baseTypes == nil {
// Void is defined to always be type ID 0, and is thus omitted from BTF.
types = append(types, (*Void)(nil))
} else {
// For split BTF, the next ID is max base BTF type ID + 1
typeIDOffset = TypeID(len(baseTypes))
firstTypeID := TypeID(0)
if base != nil {
var err error
firstTypeID, err = base.nextTypeID()
if err != nil {
return nil, err
}
// Split BTF doesn't contain Void.
types = types[:0]
}
type fixupDef struct {
@@ -803,39 +773,42 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
var fixups []fixupDef
fixup := func(id TypeID, typ *Type) {
if id < TypeID(len(baseTypes)) {
*typ = baseTypes[id]
return
fixup := func(id TypeID, typ *Type) bool {
if id < firstTypeID {
if baseType, err := base.TypeByID(id); err == nil {
*typ = baseType
return true
}
}
idx := id
if baseTypes != nil {
idx = id - TypeID(len(baseTypes))
}
if idx < TypeID(len(types)) {
idx := int(id - firstTypeID)
if idx < len(types) {
// We've already inflated this type, fix it up immediately.
*typ = types[idx]
return
return true
}
fixups = append(fixups, fixupDef{id, typ})
return false
}
type assertion struct {
id TypeID
typ *Type
want reflect.Type
}
var assertions []assertion
assert := func(typ *Type, want reflect.Type) error {
if *typ != nil {
// The type has already been fixed up, check the type immediately.
if reflect.TypeOf(*typ) != want {
return fmt.Errorf("expected %s, got %T", want, *typ)
}
fixupAndAssert := func(id TypeID, typ *Type, want reflect.Type) error {
if !fixup(id, typ) {
assertions = append(assertions, assertion{id, typ, want})
return nil
}
assertions = append(assertions, assertion{typ, want})
// The type has already been fixed up, check the type immediately.
if reflect.TypeOf(*typ) != want {
return fmt.Errorf("type ID %d: expected %s, got %T", id, want, *typ)
}
return nil
}
@@ -903,12 +876,17 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
return members, nil
}
for i, raw := range rawTypes {
var declTags []*declTag
for _, raw := range rawTypes {
var (
id = typeIDOffset + TypeID(i)
id = firstTypeID + TypeID(len(types))
typ Type
)
if id < firstTypeID {
return nil, fmt.Errorf("no more type IDs")
}
name, err := rawStrings.Lookup(raw.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for type id %d: %w", id, err)
@@ -936,14 +914,14 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
@@ -952,24 +930,23 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindEnum:
rawvals := raw.data.([]btfEnum)
vals := make([]EnumValue, 0, len(rawvals))
signed := raw.Signed()
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
vals = append(vals, EnumValue{
Name: name,
Value: btfVal.Val,
})
value := uint64(btfVal.Val)
if signed {
// Sign extend values to 64 bit.
value = uint64(int32(btfVal.Val))
}
vals = append(vals, EnumValue{name, value})
}
typ = &Enum{name, raw.Size(), vals}
typ = &Enum{name, raw.Size(), signed, vals}
case kindForward:
if raw.KindFlag() {
typ = &Fwd{name, FwdUnion}
} else {
typ = &Fwd{name, FwdStruct}
}
typ = &Fwd{name, raw.FwdKind()}
case kindTypedef:
typedef := &Typedef{name, nil}
@@ -993,8 +970,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindFunc:
fn := &Func{name, nil, raw.Linkage()}
fixup(raw.Type(), &fn.Type)
if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
if err := fixupAndAssert(raw.Type(), &fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
return nil, err
}
typ = fn
@@ -1036,15 +1012,42 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
for i := range vars {
fixup(btfVars[i].Type, &vars[i].Type)
if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
return nil, err
}
}
typ = &Datasec{name, raw.SizeType, vars}
typ = &Datasec{name, raw.Size(), vars}
case kindFloat:
typ = &Float{name, raw.Size()}
case kindDeclTag:
btfIndex := raw.data.(*btfDeclTag).ComponentIdx
if uint64(btfIndex) > math.MaxInt {
return nil, fmt.Errorf("type id %d: index exceeds int", id)
}
dt := &declTag{nil, name, int(int32(btfIndex))}
fixup(raw.Type(), &dt.Type)
typ = dt
declTags = append(declTags, dt)
case kindTypeTag:
tt := &typeTag{nil, name}
fixup(raw.Type(), &tt.Type)
typ = tt
case kindEnum64:
rawvals := raw.data.([]btfEnum64)
vals := make([]EnumValue, 0, len(rawvals))
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
}
value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32)
vals = append(vals, EnumValue{name, value})
}
typ = &Enum{name, raw.Size(), raw.Signed(), vals}
default:
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
@@ -1053,19 +1056,20 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types)+len(baseTypes) {
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
if i < len(baseTypes) {
return nil, fmt.Errorf("fixup for base type id %d is not expected", i)
if fixup.id < firstTypeID {
return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id)
}
*fixup.typ = types[i-len(baseTypes)]
idx := int(fixup.id - firstTypeID)
if idx >= len(types) {
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
*fixup.typ = types[idx]
}
for _, bitfieldFixup := range bitfieldFixups {
if bitfieldFixup.id < TypeID(len(baseTypes)) {
if bitfieldFixup.id < firstTypeID {
return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
}
@@ -1079,7 +1083,29 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
for _, assertion := range assertions {
if reflect.TypeOf(*assertion.typ) != assertion.want {
return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
return nil, fmt.Errorf("type ID %d: expected %s, got %T", assertion.id, assertion.want, *assertion.typ)
}
}
for _, dt := range declTags {
switch t := dt.Type.(type) {
case *Var, *Typedef:
if dt.Index != -1 {
return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index)
}
case composite:
if dt.Index >= len(t.members()) {
return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t)
}
case *Func:
if dt.Index >= len(t.Type.(*FuncProto).Params) {
return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t)
}
default:
return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t)
}
}
@@ -1123,6 +1149,29 @@ func UnderlyingType(typ Type) Type {
return &cycle{typ}
}
// as returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
// until it finds a T.
//
// Returns the zero value and false if there is no T or if the type is nested
// too deeply.
func as[T Type](typ Type) (T, bool) {
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (typ).(type) {
case T:
return v, true
case qualifier:
typ = v.qualify()
case *Typedef:
typ = v.Type
default:
goto notFound
}
}
notFound:
var zero T
return zero, false
}
type formatState struct {
fmt.State
depth int
@@ -1145,10 +1194,7 @@ func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{})
return
}
// This is the same as %T, but elides the package name. Assumes that
// formattableType is implemented by a pointer receiver.
goTypeName := reflect.TypeOf(t).Elem().Name()
_, _ = io.WriteString(f, goTypeName)
_, _ = io.WriteString(f, internal.GoTypeName(t))
if name := t.TypeName(); name != "" {
// Output BTF type name if present.

26
vendor/github.com/cilium/ebpf/btf/workarounds.go generated vendored Normal file
View File

@@ -0,0 +1,26 @@
package btf
// datasecResolveWorkaround ensures that certain vars in a Datasec are added
// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
//
// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
for _, vsi := range ds.Vars {
v, ok := vsi.Type.(*Var)
if !ok {
continue
}
switch v.Type.(type) {
case *Typedef, *Volatile, *Const, *Restrict, *typeTag:
// NB: We must never call Add on a Datasec, otherwise we risk
// infinite recursion.
_, err := b.Add(v.Type)
if err != nil {
return err
}
}
}
return nil
}