chore: update cgroups and ttrpc versions
- update github.com/containerd/cgroups to v1.1.0 - update github.com/containerd/ttrpc to v1.2.1 Signed-off-by: Akhil Mohan <akhilerm@gmail.com>
This commit is contained in:
897
vendor/github.com/cilium/ebpf/btf/btf.go
generated
vendored
Normal file
897
vendor/github.com/cilium/ebpf/btf/btf.go
generated
vendored
Normal file
@@ -0,0 +1,897 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
const btfMagic = 0xeB9F
|
||||
|
||||
// Errors returned by BTF functions.
|
||||
var (
|
||||
ErrNotSupported = internal.ErrNotSupported
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrNoExtendedInfo = errors.New("no extended info")
|
||||
)
|
||||
|
||||
// ID represents the unique ID of a BTF object.
|
||||
type ID = sys.BTFID
|
||||
|
||||
// Spec represents decoded BTF.
|
||||
type Spec struct {
|
||||
// Data from .BTF.
|
||||
rawTypes []rawType
|
||||
strings *stringTable
|
||||
|
||||
// All types contained by the spec. For the base type, the position of
|
||||
// a type in the slice is its ID.
|
||||
types types
|
||||
|
||||
// Type IDs indexed by type.
|
||||
typeIDs map[Type]TypeID
|
||||
|
||||
// Types indexed by essential name.
|
||||
// Includes all struct flavors and types with the same name.
|
||||
namedTypes map[essentialName][]Type
|
||||
|
||||
byteOrder binary.ByteOrder
|
||||
}
|
||||
|
||||
type btfHeader struct {
|
||||
Magic uint16
|
||||
Version uint8
|
||||
Flags uint8
|
||||
HdrLen uint32
|
||||
|
||||
TypeOff uint32
|
||||
TypeLen uint32
|
||||
StringOff uint32
|
||||
StringLen uint32
|
||||
}
|
||||
|
||||
// typeStart returns the offset from the beginning of the .BTF section
|
||||
// to the start of its type entries.
|
||||
func (h *btfHeader) typeStart() int64 {
|
||||
return int64(h.HdrLen + h.TypeOff)
|
||||
}
|
||||
|
||||
// stringStart returns the offset from the beginning of the .BTF section
|
||||
// to the start of its string table.
|
||||
func (h *btfHeader) stringStart() int64 {
|
||||
return int64(h.HdrLen + h.StringOff)
|
||||
}
|
||||
|
||||
// LoadSpec opens file and calls LoadSpecFromReader on it.
|
||||
func LoadSpec(file string) (*Spec, error) {
|
||||
fh, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
return LoadSpecFromReader(fh)
|
||||
}
|
||||
|
||||
// LoadSpecFromReader reads from an ELF or a raw BTF blob.
|
||||
//
|
||||
// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
|
||||
// may be nil.
|
||||
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
||||
file, err := internal.NewSafeELFFile(rd)
|
||||
if err != nil {
|
||||
if bo := guessRawBTFByteOrder(rd); bo != nil {
|
||||
// Try to parse a naked BTF blob. This will return an error if
|
||||
// we encounter a Datasec, since we can't fix it up.
|
||||
spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
|
||||
return spec, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loadSpecFromELF(file)
|
||||
}
|
||||
|
||||
// LoadSpecAndExtInfosFromReader reads from an ELF.
|
||||
//
|
||||
// ExtInfos may be nil if the ELF doesn't contain section metadta.
|
||||
// Returns ErrNotFound if the ELF contains no BTF.
|
||||
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
|
||||
file, err := internal.NewSafeELFFile(rd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
spec, err := loadSpecFromELF(file)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
|
||||
if err != nil && !errors.Is(err, ErrNotFound) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return spec, extInfos, nil
|
||||
}
|
||||
|
||||
// variableOffsets extracts all symbols offsets from an ELF and indexes them by
|
||||
// section and variable name.
|
||||
//
|
||||
// References to variables in BTF data sections carry unsigned 32-bit offsets.
|
||||
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
|
||||
// beyond this range. Since these symbols cannot be described by BTF info,
|
||||
// ignore them here.
|
||||
func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
|
||||
symbols, err := file.Symbols()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read symbols: %v", err)
|
||||
}
|
||||
|
||||
variableOffsets := make(map[variable]uint32)
|
||||
for _, symbol := range symbols {
|
||||
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
|
||||
// Ignore things like SHN_ABS
|
||||
continue
|
||||
}
|
||||
|
||||
if symbol.Value > math.MaxUint32 {
|
||||
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
|
||||
continue
|
||||
}
|
||||
|
||||
if int(symbol.Section) >= len(file.Sections) {
|
||||
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
|
||||
}
|
||||
|
||||
secName := file.Sections[symbol.Section].Name
|
||||
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
|
||||
}
|
||||
|
||||
return variableOffsets, nil
|
||||
}
|
||||
|
||||
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
|
||||
var (
|
||||
btfSection *elf.Section
|
||||
sectionSizes = make(map[string]uint32)
|
||||
)
|
||||
|
||||
for _, sec := range file.Sections {
|
||||
switch sec.Name {
|
||||
case ".BTF":
|
||||
btfSection = sec
|
||||
default:
|
||||
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
|
||||
break
|
||||
}
|
||||
|
||||
if sec.Size > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
|
||||
}
|
||||
|
||||
sectionSizes[sec.Name] = uint32(sec.Size)
|
||||
}
|
||||
}
|
||||
|
||||
if btfSection == nil {
|
||||
return nil, fmt.Errorf("btf: %w", ErrNotFound)
|
||||
}
|
||||
|
||||
vars, err := variableOffsets(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if btfSection.ReaderAt == nil {
|
||||
return nil, fmt.Errorf("compressed BTF is not supported")
|
||||
}
|
||||
|
||||
rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
|
||||
}
|
||||
|
||||
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
|
||||
baseTypes types, baseStrings *stringTable) (*Spec, error) {
|
||||
|
||||
rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
|
||||
}
|
||||
|
||||
func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
|
||||
baseTypes types) (*Spec, error) {
|
||||
|
||||
types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
|
||||
|
||||
return &Spec{
|
||||
rawTypes: rawTypes,
|
||||
namedTypes: typesByName,
|
||||
typeIDs: typeIDs,
|
||||
types: types,
|
||||
strings: rawStrings,
|
||||
byteOrder: bo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
|
||||
namedTypes := 0
|
||||
for _, typ := range types {
|
||||
if typ.TypeName() != "" {
|
||||
// Do a pre-pass to figure out how big types by name has to be.
|
||||
// Most types have unique names, so it's OK to ignore essentialName
|
||||
// here.
|
||||
namedTypes++
|
||||
}
|
||||
}
|
||||
|
||||
typeIDs := make(map[Type]TypeID, len(types))
|
||||
typesByName := make(map[essentialName][]Type, namedTypes)
|
||||
|
||||
for i, typ := range types {
|
||||
if name := newEssentialName(typ.TypeName()); name != "" {
|
||||
typesByName[name] = append(typesByName[name], typ)
|
||||
}
|
||||
typeIDs[typ] = TypeID(i) + typeIDOffset
|
||||
}
|
||||
|
||||
return typeIDs, typesByName
|
||||
}
|
||||
|
||||
// LoadKernelSpec returns the current kernel's BTF information.
|
||||
//
|
||||
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
|
||||
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
|
||||
func LoadKernelSpec() (*Spec, error) {
|
||||
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||
if err == nil {
|
||||
defer fh.Close()
|
||||
|
||||
return loadRawSpec(fh, internal.NativeEndian, nil, nil)
|
||||
}
|
||||
|
||||
file, err := findVMLinux()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return loadSpecFromELF(file)
|
||||
}
|
||||
|
||||
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
|
||||
func findVMLinux() (*internal.SafeELFFile, error) {
|
||||
release, err := internal.KernelRelease()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use same list of locations as libbpf
|
||||
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
|
||||
locations := []string{
|
||||
"/boot/vmlinux-%s",
|
||||
"/lib/modules/%s/vmlinux-%[1]s",
|
||||
"/lib/modules/%s/build/vmlinux",
|
||||
"/usr/lib/modules/%s/kernel/vmlinux",
|
||||
"/usr/lib/debug/boot/vmlinux-%s",
|
||||
"/usr/lib/debug/boot/vmlinux-%s.debug",
|
||||
"/usr/lib/debug/lib/modules/%s/vmlinux",
|
||||
}
|
||||
|
||||
for _, loc := range locations {
|
||||
file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return file, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
|
||||
}
|
||||
|
||||
// parseBTFHeader parses the header of the .BTF section.
|
||||
func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
|
||||
var header btfHeader
|
||||
if err := binary.Read(r, bo, &header); err != nil {
|
||||
return nil, fmt.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
remainder := int64(header.HdrLen) - int64(binary.Size(&header))
|
||||
if remainder < 0 {
|
||||
return nil, errors.New("header length shorter than btfHeader size")
|
||||
}
|
||||
|
||||
if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
|
||||
return nil, fmt.Errorf("header padding: %v", err)
|
||||
}
|
||||
|
||||
return &header, nil
|
||||
}
|
||||
|
||||
func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
|
||||
buf := new(bufio.Reader)
|
||||
for _, bo := range []binary.ByteOrder{
|
||||
binary.LittleEndian,
|
||||
binary.BigEndian,
|
||||
} {
|
||||
buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
|
||||
if _, err := parseBTFHeader(buf, bo); err == nil {
|
||||
return bo
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseBTF reads a .BTF section into memory and parses it into a list of
|
||||
// raw types and a string table.
|
||||
func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) {
|
||||
buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
|
||||
header, err := parseBTFHeader(buf, bo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
|
||||
}
|
||||
|
||||
rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)),
|
||||
baseStrings)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read type names: %w", err)
|
||||
}
|
||||
|
||||
buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
|
||||
rawTypes, err := readTypes(buf, bo, header.TypeLen)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't read types: %w", err)
|
||||
}
|
||||
|
||||
return rawTypes, rawStrings, nil
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
section string
|
||||
name string
|
||||
}
|
||||
|
||||
func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
|
||||
for i, rawType := range rawTypes {
|
||||
if rawType.Kind() != kindDatasec {
|
||||
continue
|
||||
}
|
||||
|
||||
name, err := rawStrings.Lookup(rawType.NameOff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if name == ".kconfig" || name == ".ksyms" {
|
||||
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
|
||||
}
|
||||
|
||||
if rawTypes[i].SizeType != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
size, ok := sectionSizes[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing size", name)
|
||||
}
|
||||
|
||||
rawTypes[i].SizeType = size
|
||||
|
||||
secinfos := rawType.data.([]btfVarSecinfo)
|
||||
for j, secInfo := range secinfos {
|
||||
id := int(secInfo.Type - 1)
|
||||
if id >= len(rawTypes) {
|
||||
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
|
||||
}
|
||||
|
||||
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
|
||||
}
|
||||
|
||||
offset, ok := variableOffsets[variable{name, varName}]
|
||||
if !ok {
|
||||
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
|
||||
}
|
||||
|
||||
secinfos[j].Offset = offset
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy creates a copy of Spec.
|
||||
func (s *Spec) Copy() *Spec {
|
||||
types := copyTypes(s.types, nil)
|
||||
|
||||
typeIDOffset := TypeID(0)
|
||||
if len(s.types) != 0 {
|
||||
typeIDOffset = s.typeIDs[s.types[0]]
|
||||
}
|
||||
typeIDs, typesByName := indexTypes(types, typeIDOffset)
|
||||
|
||||
// NB: Other parts of spec are not copied since they are immutable.
|
||||
return &Spec{
|
||||
s.rawTypes,
|
||||
s.strings,
|
||||
types,
|
||||
typeIDs,
|
||||
typesByName,
|
||||
s.byteOrder,
|
||||
}
|
||||
}
|
||||
|
||||
type marshalOpts struct {
|
||||
ByteOrder binary.ByteOrder
|
||||
StripFuncLinkage bool
|
||||
}
|
||||
|
||||
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
header = new(btfHeader)
|
||||
headerLen = binary.Size(header)
|
||||
)
|
||||
|
||||
// Reserve space for the header. We have to write it last since
|
||||
// we don't know the size of the type section yet.
|
||||
_, _ = buf.Write(make([]byte, headerLen))
|
||||
|
||||
// Write type section, just after the header.
|
||||
for _, raw := range s.rawTypes {
|
||||
switch {
|
||||
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
|
||||
raw.SetLinkage(StaticFunc)
|
||||
}
|
||||
|
||||
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
typeLen := uint32(buf.Len() - headerLen)
|
||||
|
||||
// Write string section after type section.
|
||||
stringsLen := s.strings.Length()
|
||||
buf.Grow(stringsLen)
|
||||
if err := s.strings.Marshal(&buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fill out the header, and write it out.
|
||||
header = &btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
Flags: 0,
|
||||
HdrLen: uint32(headerLen),
|
||||
TypeOff: 0,
|
||||
TypeLen: typeLen,
|
||||
StringOff: typeLen,
|
||||
StringLen: uint32(stringsLen),
|
||||
}
|
||||
|
||||
raw := buf.Bytes()
|
||||
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't write header: %v", err)
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
type sliceWriter []byte
|
||||
|
||||
func (sw sliceWriter) Write(p []byte) (int, error) {
|
||||
if len(p) != len(sw) {
|
||||
return 0, errors.New("size doesn't match")
|
||||
}
|
||||
|
||||
return copy(sw, p), nil
|
||||
}
|
||||
|
||||
// TypeByID returns the BTF Type with the given type ID.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if a Type with the given ID
|
||||
// does not exist in the Spec.
|
||||
func (s *Spec) TypeByID(id TypeID) (Type, error) {
|
||||
return s.types.ByID(id)
|
||||
}
|
||||
|
||||
// TypeID returns the ID for a given Type.
|
||||
//
|
||||
// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
|
||||
func (s *Spec) TypeID(typ Type) (TypeID, error) {
|
||||
if _, ok := typ.(*Void); ok {
|
||||
// Equality is weird for void, since it is a zero sized type.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
id, ok := s.typeIDs[typ]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// AnyTypesByName returns a list of BTF Types with the given name.
|
||||
//
|
||||
// If the BTF blob describes multiple compilation units like vmlinux, multiple
|
||||
// Types with the same name and kind can exist, but might not describe the same
|
||||
// data structure.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
|
||||
func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
|
||||
types := s.namedTypes[newEssentialName(name)]
|
||||
if len(types) == 0 {
|
||||
return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
|
||||
}
|
||||
|
||||
// Return a copy to prevent changes to namedTypes.
|
||||
result := make([]Type, 0, len(types))
|
||||
for _, t := range types {
|
||||
// Match against the full name, not just the essential one
|
||||
// in case the type being looked up is a struct flavor.
|
||||
if t.TypeName() == name {
|
||||
result = append(result, t)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// AnyTypeByName returns a Type with the given name.
|
||||
//
|
||||
// Returns an error if multiple types of that name exist.
|
||||
func (s *Spec) AnyTypeByName(name string) (Type, error) {
|
||||
types, err := s.AnyTypesByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
return nil, fmt.Errorf("found multiple types: %v", types)
|
||||
}
|
||||
|
||||
return types[0], nil
|
||||
}
|
||||
|
||||
// TypeByName searches for a Type with a specific name. Since multiple
|
||||
// Types with the same name can exist, the parameter typ is taken to
|
||||
// narrow down the search in case of a clash.
|
||||
//
|
||||
// typ must be a non-nil pointer to an implementation of a Type.
|
||||
// On success, the address of the found Type will be copied to typ.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no matching
|
||||
// Type exists in the Spec. If multiple candidates are found,
|
||||
// an error is returned.
|
||||
func (s *Spec) TypeByName(name string, typ interface{}) error {
|
||||
typValue := reflect.ValueOf(typ)
|
||||
if typValue.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("%T is not a pointer", typ)
|
||||
}
|
||||
|
||||
typPtr := typValue.Elem()
|
||||
if !typPtr.CanSet() {
|
||||
return fmt.Errorf("%T cannot be set", typ)
|
||||
}
|
||||
|
||||
wanted := typPtr.Type()
|
||||
if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
|
||||
return fmt.Errorf("%T does not satisfy Type interface", typ)
|
||||
}
|
||||
|
||||
types, err := s.AnyTypesByName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var candidate Type
|
||||
for _, typ := range types {
|
||||
if reflect.TypeOf(typ) != wanted {
|
||||
continue
|
||||
}
|
||||
|
||||
if candidate != nil {
|
||||
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
|
||||
}
|
||||
|
||||
candidate = typ
|
||||
}
|
||||
|
||||
if candidate == nil {
|
||||
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||
}
|
||||
|
||||
typPtr.Set(reflect.ValueOf(candidate))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadSplitSpecFromReader loads split BTF from a reader.
|
||||
//
|
||||
// Types from base are used to resolve references in the split BTF.
|
||||
// The returned Spec only contains types from the split BTF, not from the base.
|
||||
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
|
||||
return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
|
||||
}
|
||||
|
||||
// TypesIterator iterates over types of a given spec.
|
||||
type TypesIterator struct {
|
||||
spec *Spec
|
||||
index int
|
||||
// The last visited type in the spec.
|
||||
Type Type
|
||||
}
|
||||
|
||||
// Iterate returns the types iterator.
|
||||
func (s *Spec) Iterate() *TypesIterator {
|
||||
return &TypesIterator{spec: s, index: 0}
|
||||
}
|
||||
|
||||
// Next returns true as long as there are any remaining types.
|
||||
func (iter *TypesIterator) Next() bool {
|
||||
if len(iter.spec.types) <= iter.index {
|
||||
return false
|
||||
}
|
||||
|
||||
iter.Type = iter.spec.types[iter.index]
|
||||
iter.index++
|
||||
return true
|
||||
}
|
||||
|
||||
// Handle is a reference to BTF loaded into the kernel.
|
||||
type Handle struct {
|
||||
fd *sys.FD
|
||||
|
||||
// Size of the raw BTF in bytes.
|
||||
size uint32
|
||||
}
|
||||
|
||||
// NewHandle loads BTF into the kernel.
|
||||
//
|
||||
// Returns ErrNotSupported if BTF is not supported.
|
||||
func NewHandle(spec *Spec) (*Handle, error) {
|
||||
if err := haveBTF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if spec.byteOrder != internal.NativeEndian {
|
||||
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
|
||||
}
|
||||
|
||||
btf, err := spec.marshal(marshalOpts{
|
||||
ByteOrder: internal.NativeEndian,
|
||||
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't marshal BTF: %w", err)
|
||||
}
|
||||
|
||||
if uint64(len(btf)) > math.MaxUint32 {
|
||||
return nil, errors.New("BTF exceeds the maximum size")
|
||||
}
|
||||
|
||||
attr := &sys.BtfLoadAttr{
|
||||
Btf: sys.NewSlicePointer(btf),
|
||||
BtfSize: uint32(len(btf)),
|
||||
}
|
||||
|
||||
fd, err := sys.BtfLoad(attr)
|
||||
if err != nil {
|
||||
logBuf := make([]byte, 64*1024)
|
||||
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
|
||||
attr.BtfLogSize = uint32(len(logBuf))
|
||||
attr.BtfLogLevel = 1
|
||||
// NB: The syscall will never return ENOSPC as of 5.18-rc4.
|
||||
_, _ = sys.BtfLoad(attr)
|
||||
return nil, internal.ErrorWithLog(err, logBuf)
|
||||
}
|
||||
|
||||
return &Handle{fd, attr.BtfSize}, nil
|
||||
}
|
||||
|
||||
// NewHandleFromID returns the BTF handle for a given id.
|
||||
//
|
||||
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
|
||||
//
|
||||
// Returns ErrNotExist, if there is no BTF with the given id.
|
||||
//
|
||||
// Requires CAP_SYS_ADMIN.
|
||||
func NewHandleFromID(id ID) (*Handle, error) {
|
||||
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
|
||||
Id: uint32(id),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
|
||||
}
|
||||
|
||||
info, err := newHandleInfoFromFD(fd)
|
||||
if err != nil {
|
||||
_ = fd.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Handle{fd, info.size}, nil
|
||||
}
|
||||
|
||||
// Spec parses the kernel BTF into Go types.
|
||||
//
|
||||
// base is used to decode split BTF and may be nil.
|
||||
func (h *Handle) Spec(base *Spec) (*Spec, error) {
|
||||
var btfInfo sys.BtfInfo
|
||||
btfBuffer := make([]byte, h.size)
|
||||
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
|
||||
|
||||
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var baseTypes types
|
||||
var baseStrings *stringTable
|
||||
if base != nil {
|
||||
baseTypes = base.types
|
||||
baseStrings = base.strings
|
||||
}
|
||||
|
||||
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
|
||||
}
|
||||
|
||||
// Close destroys the handle.
|
||||
//
|
||||
// Subsequent calls to FD will return an invalid value.
|
||||
func (h *Handle) Close() error {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return h.fd.Close()
|
||||
}
|
||||
|
||||
// FD returns the file descriptor for the handle.
|
||||
func (h *Handle) FD() int {
|
||||
return h.fd.Int()
|
||||
}
|
||||
|
||||
// Info returns metadata about the handle.
|
||||
func (h *Handle) Info() (*HandleInfo, error) {
|
||||
return newHandleInfoFromFD(h.fd)
|
||||
}
|
||||
|
||||
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
|
||||
const minHeaderLength = 24
|
||||
|
||||
typesLen := uint32(binary.Size(types))
|
||||
header := btfHeader{
|
||||
Magic: btfMagic,
|
||||
Version: 1,
|
||||
HdrLen: minHeaderLength,
|
||||
TypeOff: 0,
|
||||
TypeLen: typesLen,
|
||||
StringOff: typesLen,
|
||||
StringLen: uint32(len(strings)),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = binary.Write(buf, bo, &header)
|
||||
_ = binary.Write(buf, bo, types)
|
||||
buf.Write(strings)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
|
||||
var (
|
||||
types struct {
|
||||
Integer btfType
|
||||
Var btfType
|
||||
btfVar struct{ Linkage uint32 }
|
||||
}
|
||||
strings = []byte{0, 'a', 0}
|
||||
)
|
||||
|
||||
// We use a BTF_KIND_VAR here, to make sure that
|
||||
// the kernel understands BTF at least as well as we
|
||||
// do. BTF_KIND_VAR was introduced ~5.1.
|
||||
types.Integer.SetKind(kindPointer)
|
||||
types.Var.NameOff = 1
|
||||
types.Var.SetKind(kindVar)
|
||||
types.Var.SizeType = 1
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
|
||||
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
|
||||
Btf: sys.NewSlicePointer(btf),
|
||||
BtfSize: uint32(len(btf)),
|
||||
})
|
||||
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
|
||||
// Treat both EINVAL and EPERM as not supported: loading the program
|
||||
// might still succeed without BTF.
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
return nil
|
||||
})
|
||||
|
||||
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
|
||||
if err := haveBTF(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
types struct {
|
||||
FuncProto btfType
|
||||
Func btfType
|
||||
}
|
||||
strings = []byte{0, 'a', 0}
|
||||
)
|
||||
|
||||
types.FuncProto.SetKind(kindFuncProto)
|
||||
types.Func.SetKind(kindFunc)
|
||||
types.Func.SizeType = 1 // aka FuncProto
|
||||
types.Func.NameOff = 1
|
||||
types.Func.SetLinkage(GlobalFunc)
|
||||
|
||||
btf := marshalBTF(&types, strings, internal.NativeEndian)
|
||||
|
||||
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
|
||||
Btf: sys.NewSlicePointer(btf),
|
||||
BtfSize: uint32(len(btf)),
|
||||
})
|
||||
if errors.Is(err, unix.EINVAL) {
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd.Close()
|
||||
return nil
|
||||
})
|
343
vendor/github.com/cilium/ebpf/btf/btf_types.go
generated
vendored
Normal file
343
vendor/github.com/cilium/ebpf/btf/btf_types.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
|
||||
|
||||
// btfKind describes a Type.
|
||||
type btfKind uint8
|
||||
|
||||
// Equivalents of the BTF_KIND_* constants.
|
||||
const (
|
||||
kindUnknown btfKind = iota
|
||||
kindInt
|
||||
kindPointer
|
||||
kindArray
|
||||
kindStruct
|
||||
kindUnion
|
||||
kindEnum
|
||||
kindForward
|
||||
kindTypedef
|
||||
kindVolatile
|
||||
kindConst
|
||||
kindRestrict
|
||||
// Added ~4.20
|
||||
kindFunc
|
||||
kindFuncProto
|
||||
// Added ~5.1
|
||||
kindVar
|
||||
kindDatasec
|
||||
// Added ~5.13
|
||||
kindFloat
|
||||
)
|
||||
|
||||
// FuncLinkage describes BTF function linkage metadata.
|
||||
type FuncLinkage int
|
||||
|
||||
// Equivalent of enum btf_func_linkage.
|
||||
const (
|
||||
StaticFunc FuncLinkage = iota // static
|
||||
GlobalFunc // global
|
||||
ExternFunc // extern
|
||||
)
|
||||
|
||||
// VarLinkage describes BTF variable linkage metadata.
|
||||
type VarLinkage int
|
||||
|
||||
const (
|
||||
StaticVar VarLinkage = iota // static
|
||||
GlobalVar // global
|
||||
ExternVar // extern
|
||||
)
|
||||
|
||||
const (
|
||||
btfTypeKindShift = 24
|
||||
btfTypeKindLen = 5
|
||||
btfTypeVlenShift = 0
|
||||
btfTypeVlenMask = 16
|
||||
btfTypeKindFlagShift = 31
|
||||
btfTypeKindFlagMask = 1
|
||||
)
|
||||
|
||||
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
|
||||
type btfType struct {
|
||||
NameOff uint32
|
||||
/* "info" bits arrangement
|
||||
* bits 0-15: vlen (e.g. # of struct's members), linkage
|
||||
* bits 16-23: unused
|
||||
* bits 24-28: kind (e.g. int, ptr, array...etc)
|
||||
* bits 29-30: unused
|
||||
* bit 31: kind_flag, currently used by
|
||||
* struct, union and fwd
|
||||
*/
|
||||
Info uint32
|
||||
/* "size" is used by INT, ENUM, STRUCT and UNION.
|
||||
* "size" tells the size of the type it is describing.
|
||||
*
|
||||
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
|
||||
* FUNC and FUNC_PROTO.
|
||||
* "type" is a type_id referring to another type.
|
||||
*/
|
||||
SizeType uint32
|
||||
}
|
||||
|
||||
func (k btfKind) String() string {
|
||||
switch k {
|
||||
case kindUnknown:
|
||||
return "Unknown"
|
||||
case kindInt:
|
||||
return "Integer"
|
||||
case kindPointer:
|
||||
return "Pointer"
|
||||
case kindArray:
|
||||
return "Array"
|
||||
case kindStruct:
|
||||
return "Struct"
|
||||
case kindUnion:
|
||||
return "Union"
|
||||
case kindEnum:
|
||||
return "Enumeration"
|
||||
case kindForward:
|
||||
return "Forward"
|
||||
case kindTypedef:
|
||||
return "Typedef"
|
||||
case kindVolatile:
|
||||
return "Volatile"
|
||||
case kindConst:
|
||||
return "Const"
|
||||
case kindRestrict:
|
||||
return "Restrict"
|
||||
case kindFunc:
|
||||
return "Function"
|
||||
case kindFuncProto:
|
||||
return "Function Proto"
|
||||
case kindVar:
|
||||
return "Variable"
|
||||
case kindDatasec:
|
||||
return "Section"
|
||||
case kindFloat:
|
||||
return "Float"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown (%d)", k)
|
||||
}
|
||||
}
|
||||
|
||||
func mask(len uint32) uint32 {
|
||||
return (1 << len) - 1
|
||||
}
|
||||
|
||||
func readBits(value, len, shift uint32) uint32 {
|
||||
return (value >> shift) & mask(len)
|
||||
}
|
||||
|
||||
func writeBits(value, len, shift, new uint32) uint32 {
|
||||
value &^= mask(len) << shift
|
||||
value |= (new & mask(len)) << shift
|
||||
return value
|
||||
}
|
||||
|
||||
func (bt *btfType) info(len, shift uint32) uint32 {
|
||||
return readBits(bt.Info, len, shift)
|
||||
}
|
||||
|
||||
func (bt *btfType) setInfo(value, len, shift uint32) {
|
||||
bt.Info = writeBits(bt.Info, len, shift, value)
|
||||
}
|
||||
|
||||
func (bt *btfType) Kind() btfKind {
|
||||
return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetKind(kind btfKind) {
|
||||
bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Vlen() int {
|
||||
return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetVlen(vlen int) {
|
||||
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) KindFlag() bool {
|
||||
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
|
||||
}
|
||||
|
||||
func (bt *btfType) Linkage() FuncLinkage {
|
||||
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
|
||||
}
|
||||
|
||||
func (bt *btfType) SetLinkage(linkage FuncLinkage) {
|
||||
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
|
||||
}
|
||||
|
||||
func (bt *btfType) Type() TypeID {
|
||||
// TODO: Panic here if wrong kind?
|
||||
return TypeID(bt.SizeType)
|
||||
}
|
||||
|
||||
func (bt *btfType) Size() uint32 {
|
||||
// TODO: Panic here if wrong kind?
|
||||
return bt.SizeType
|
||||
}
|
||||
|
||||
func (bt *btfType) SetSize(size uint32) {
|
||||
bt.SizeType = size
|
||||
}
|
||||
|
||||
type rawType struct {
|
||||
btfType
|
||||
data interface{}
|
||||
}
|
||||
|
||||
func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
||||
if err := binary.Write(w, bo, &rt.btfType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rt.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return binary.Write(w, bo, rt.data)
|
||||
}
|
||||
|
||||
// btfInt encodes additional data for integers.
|
||||
//
|
||||
// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
|
||||
// ? = undefined
|
||||
// e = encoding
|
||||
// o = offset (bitfields?)
|
||||
// b = bits (bitfields)
|
||||
type btfInt struct {
|
||||
Raw uint32
|
||||
}
|
||||
|
||||
const (
|
||||
btfIntEncodingLen = 4
|
||||
btfIntEncodingShift = 24
|
||||
btfIntOffsetLen = 8
|
||||
btfIntOffsetShift = 16
|
||||
btfIntBitsLen = 8
|
||||
btfIntBitsShift = 0
|
||||
)
|
||||
|
||||
func (bi btfInt) Encoding() IntEncoding {
|
||||
return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
|
||||
}
|
||||
|
||||
func (bi *btfInt) SetEncoding(e IntEncoding) {
|
||||
bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
|
||||
}
|
||||
|
||||
func (bi btfInt) Offset() Bits {
|
||||
return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
|
||||
}
|
||||
|
||||
func (bi *btfInt) SetOffset(offset uint32) {
|
||||
bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
|
||||
}
|
||||
|
||||
func (bi btfInt) Bits() Bits {
|
||||
return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
|
||||
}
|
||||
|
||||
func (bi *btfInt) SetBits(bits byte) {
|
||||
bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
|
||||
}
|
||||
|
||||
type btfArray struct {
|
||||
Type TypeID
|
||||
IndexType TypeID
|
||||
Nelems uint32
|
||||
}
|
||||
|
||||
type btfMember struct {
|
||||
NameOff uint32
|
||||
Type TypeID
|
||||
Offset uint32
|
||||
}
|
||||
|
||||
type btfVarSecinfo struct {
|
||||
Type TypeID
|
||||
Offset uint32
|
||||
Size uint32
|
||||
}
|
||||
|
||||
type btfVariable struct {
|
||||
Linkage uint32
|
||||
}
|
||||
|
||||
type btfEnum struct {
|
||||
NameOff uint32
|
||||
Val int32
|
||||
}
|
||||
|
||||
type btfParam struct {
|
||||
NameOff uint32
|
||||
Type TypeID
|
||||
}
|
||||
|
||||
func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
|
||||
var header btfType
|
||||
// because of the interleaving between types and struct members it is difficult to
|
||||
// precompute the numbers of raw types this will parse
|
||||
// this "guess" is a good first estimation
|
||||
sizeOfbtfType := uintptr(binary.Size(btfType{}))
|
||||
tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
|
||||
types := make([]rawType, 0, tyMaxCount)
|
||||
|
||||
for id := TypeID(1); ; id++ {
|
||||
if err := binary.Read(r, bo, &header); err == io.EOF {
|
||||
return types, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
|
||||
}
|
||||
|
||||
var data interface{}
|
||||
switch header.Kind() {
|
||||
case kindInt:
|
||||
data = new(btfInt)
|
||||
case kindPointer:
|
||||
case kindArray:
|
||||
data = new(btfArray)
|
||||
case kindStruct:
|
||||
fallthrough
|
||||
case kindUnion:
|
||||
data = make([]btfMember, header.Vlen())
|
||||
case kindEnum:
|
||||
data = make([]btfEnum, header.Vlen())
|
||||
case kindForward:
|
||||
case kindTypedef:
|
||||
case kindVolatile:
|
||||
case kindConst:
|
||||
case kindRestrict:
|
||||
case kindFunc:
|
||||
case kindFuncProto:
|
||||
data = make([]btfParam, header.Vlen())
|
||||
case kindVar:
|
||||
data = new(btfVariable)
|
||||
case kindDatasec:
|
||||
data = make([]btfVarSecinfo, header.Vlen())
|
||||
case kindFloat:
|
||||
default:
|
||||
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
|
||||
}
|
||||
|
||||
if data == nil {
|
||||
types = append(types, rawType{header, nil})
|
||||
continue
|
||||
}
|
||||
|
||||
if err := binary.Read(r, bo, data); err != nil {
|
||||
return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
|
||||
}
|
||||
|
||||
types = append(types, rawType{header, data})
|
||||
}
|
||||
}
|
44
vendor/github.com/cilium/ebpf/btf/btf_types_string.go
generated
vendored
Normal file
44
vendor/github.com/cilium/ebpf/btf/btf_types_string.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
|
||||
|
||||
package btf
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[StaticFunc-0]
|
||||
_ = x[GlobalFunc-1]
|
||||
_ = x[ExternFunc-2]
|
||||
}
|
||||
|
||||
const _FuncLinkage_name = "staticglobalextern"
|
||||
|
||||
var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
|
||||
|
||||
func (i FuncLinkage) String() string {
|
||||
if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
|
||||
return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
|
||||
}
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[StaticVar-0]
|
||||
_ = x[GlobalVar-1]
|
||||
_ = x[ExternVar-2]
|
||||
}
|
||||
|
||||
const _VarLinkage_name = "staticglobalextern"
|
||||
|
||||
var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
|
||||
|
||||
func (i VarLinkage) String() string {
|
||||
if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
|
||||
return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
|
||||
}
|
972
vendor/github.com/cilium/ebpf/btf/core.go
generated
vendored
Normal file
972
vendor/github.com/cilium/ebpf/btf/core.go
generated
vendored
Normal file
@@ -0,0 +1,972 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
)
|
||||
|
||||
// Code in this file is derived from libbpf, which is available under a BSD
|
||||
// 2-Clause license.
|
||||
|
||||
// COREFixup is the result of computing a CO-RE relocation for a target.
|
||||
type COREFixup struct {
|
||||
kind coreKind
|
||||
local uint32
|
||||
target uint32
|
||||
// True if there is no valid fixup. The instruction is replaced with an
|
||||
// invalid dummy.
|
||||
poison bool
|
||||
// True if the validation of the local value should be skipped. Used by
|
||||
// some kinds of bitfield relocations.
|
||||
skipLocalValidation bool
|
||||
}
|
||||
|
||||
func (f *COREFixup) equal(other COREFixup) bool {
|
||||
return f.local == other.local && f.target == other.target
|
||||
}
|
||||
|
||||
func (f *COREFixup) String() string {
|
||||
if f.poison {
|
||||
return fmt.Sprintf("%s=poison", f.kind)
|
||||
}
|
||||
return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
|
||||
}
|
||||
|
||||
func (f *COREFixup) Apply(ins *asm.Instruction) error {
|
||||
if f.poison {
|
||||
const badRelo = 0xbad2310
|
||||
|
||||
*ins = asm.BuiltinFunc(badRelo).Call()
|
||||
return nil
|
||||
}
|
||||
|
||||
switch class := ins.OpCode.Class(); class {
|
||||
case asm.LdXClass, asm.StClass, asm.StXClass:
|
||||
if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
|
||||
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
|
||||
}
|
||||
|
||||
if f.target > math.MaxInt16 {
|
||||
return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
|
||||
}
|
||||
|
||||
ins.Offset = int16(f.target)
|
||||
|
||||
case asm.LdClass:
|
||||
if !ins.IsConstantLoad(asm.DWord) {
|
||||
return fmt.Errorf("not a dword-sized immediate load")
|
||||
}
|
||||
|
||||
if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
|
||||
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
|
||||
}
|
||||
|
||||
ins.Constant = int64(f.target)
|
||||
|
||||
case asm.ALUClass:
|
||||
if ins.OpCode.ALUOp() == asm.Swap {
|
||||
return fmt.Errorf("relocation against swap")
|
||||
}
|
||||
|
||||
fallthrough
|
||||
|
||||
case asm.ALU64Class:
|
||||
if src := ins.OpCode.Source(); src != asm.ImmSource {
|
||||
return fmt.Errorf("invalid source %s", src)
|
||||
}
|
||||
|
||||
if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
|
||||
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
|
||||
}
|
||||
|
||||
if f.target > math.MaxInt32 {
|
||||
return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
|
||||
}
|
||||
|
||||
ins.Constant = int64(f.target)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid class %s", class)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f COREFixup) isNonExistant() bool {
|
||||
return f.kind.checksForExistence() && f.target == 0
|
||||
}
|
||||
|
||||
// coreKind is the type of CO-RE relocation as specified in BPF source code.
|
||||
type coreKind uint32
|
||||
|
||||
const (
|
||||
reloFieldByteOffset coreKind = iota /* field byte offset */
|
||||
reloFieldByteSize /* field size in bytes */
|
||||
reloFieldExists /* field existence in target kernel */
|
||||
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
||||
reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
||||
reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
||||
reloTypeIDLocal /* type ID in local BPF object */
|
||||
reloTypeIDTarget /* type ID in target kernel */
|
||||
reloTypeExists /* type existence in target kernel */
|
||||
reloTypeSize /* type size in bytes */
|
||||
reloEnumvalExists /* enum value existence in target kernel */
|
||||
reloEnumvalValue /* enum value integer value */
|
||||
)
|
||||
|
||||
func (k coreKind) checksForExistence() bool {
|
||||
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
|
||||
}
|
||||
|
||||
func (k coreKind) String() string {
|
||||
switch k {
|
||||
case reloFieldByteOffset:
|
||||
return "byte_off"
|
||||
case reloFieldByteSize:
|
||||
return "byte_sz"
|
||||
case reloFieldExists:
|
||||
return "field_exists"
|
||||
case reloFieldSigned:
|
||||
return "signed"
|
||||
case reloFieldLShiftU64:
|
||||
return "lshift_u64"
|
||||
case reloFieldRShiftU64:
|
||||
return "rshift_u64"
|
||||
case reloTypeIDLocal:
|
||||
return "local_type_id"
|
||||
case reloTypeIDTarget:
|
||||
return "target_type_id"
|
||||
case reloTypeExists:
|
||||
return "type_exists"
|
||||
case reloTypeSize:
|
||||
return "type_size"
|
||||
case reloEnumvalExists:
|
||||
return "enumval_exists"
|
||||
case reloEnumvalValue:
|
||||
return "enumval_value"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// CORERelocate calculates the difference in types between local and target.
|
||||
//
|
||||
// Returns a list of fixups which can be applied to instructions to make them
|
||||
// match the target type(s).
|
||||
//
|
||||
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
|
||||
// for relos[i].
|
||||
func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
|
||||
if local.byteOrder != target.byteOrder {
|
||||
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
||||
}
|
||||
|
||||
type reloGroup struct {
|
||||
relos []*CORERelocation
|
||||
// Position of each relocation in relos.
|
||||
indices []int
|
||||
}
|
||||
|
||||
// Split relocations into per Type lists.
|
||||
relosByType := make(map[Type]*reloGroup)
|
||||
result := make([]COREFixup, len(relos))
|
||||
for i, relo := range relos {
|
||||
if relo.kind == reloTypeIDLocal {
|
||||
// Filtering out reloTypeIDLocal here makes our lives a lot easier
|
||||
// down the line, since it doesn't have a target at all.
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||
}
|
||||
|
||||
id, err := local.TypeID(relo.typ)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
result[i] = COREFixup{
|
||||
kind: relo.kind,
|
||||
local: uint32(id),
|
||||
target: uint32(id),
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
group, ok := relosByType[relo.typ]
|
||||
if !ok {
|
||||
group = &reloGroup{}
|
||||
relosByType[relo.typ] = group
|
||||
}
|
||||
group.relos = append(group.relos, relo)
|
||||
group.indices = append(group.indices, i)
|
||||
}
|
||||
|
||||
for localType, group := range relosByType {
|
||||
localTypeName := localType.TypeName()
|
||||
if localTypeName == "" {
|
||||
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
|
||||
targets := target.namedTypes[newEssentialName(localTypeName)]
|
||||
fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("relocate %s: %w", localType, err)
|
||||
}
|
||||
|
||||
for j, index := range group.indices {
|
||||
result[index] = fixups[j]
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
||||
var errImpossibleRelocation = errors.New("impossible relocation")
|
||||
|
||||
// coreCalculateFixups calculates the fixups for the given relocations using
|
||||
// the "best" target.
|
||||
//
|
||||
// The best target is determined by scoring: the less poisoning we have to do
|
||||
// the better the target is.
|
||||
func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
|
||||
localID, err := localSpec.TypeID(local)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("local type ID: %w", err)
|
||||
}
|
||||
local = Copy(local, UnderlyingType)
|
||||
|
||||
bestScore := len(relos)
|
||||
var bestFixups []COREFixup
|
||||
for i := range targets {
|
||||
targetID, err := targetSpec.TypeID(targets[i])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target type ID: %w", err)
|
||||
}
|
||||
target := Copy(targets[i], UnderlyingType)
|
||||
|
||||
score := 0 // lower is better
|
||||
fixups := make([]COREFixup, 0, len(relos))
|
||||
for _, relo := range relos {
|
||||
fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target %s: %w", target, err)
|
||||
}
|
||||
if fixup.poison || fixup.isNonExistant() {
|
||||
score++
|
||||
}
|
||||
fixups = append(fixups, fixup)
|
||||
}
|
||||
|
||||
if score > bestScore {
|
||||
// We have a better target already, ignore this one.
|
||||
continue
|
||||
}
|
||||
|
||||
if score < bestScore {
|
||||
// This is the best target yet, use it.
|
||||
bestScore = score
|
||||
bestFixups = fixups
|
||||
continue
|
||||
}
|
||||
|
||||
// Some other target has the same score as the current one. Make sure
|
||||
// the fixups agree with each other.
|
||||
for i, fixup := range bestFixups {
|
||||
if !fixup.equal(fixups[i]) {
|
||||
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bestFixups == nil {
|
||||
// Nothing at all matched, probably because there are no suitable
|
||||
// targets at all.
|
||||
//
|
||||
// Poison everything except checksForExistence.
|
||||
bestFixups = make([]COREFixup, len(relos))
|
||||
for i, relo := range relos {
|
||||
if relo.kind.checksForExistence() {
|
||||
bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
|
||||
} else {
|
||||
bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bestFixups, nil
|
||||
}
|
||||
|
||||
// coreCalculateFixup calculates the fixup for a single local type, target type
|
||||
// and relocation.
|
||||
func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
|
||||
fixup := func(local, target uint32) (COREFixup, error) {
|
||||
return COREFixup{kind: relo.kind, local: local, target: target}, nil
|
||||
}
|
||||
fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
|
||||
return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
|
||||
}
|
||||
poison := func() (COREFixup, error) {
|
||||
if relo.kind.checksForExistence() {
|
||||
return fixup(1, 0)
|
||||
}
|
||||
return COREFixup{kind: relo.kind, poison: true}, nil
|
||||
}
|
||||
zero := COREFixup{}
|
||||
|
||||
switch relo.kind {
|
||||
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||
}
|
||||
|
||||
err := coreAreTypesCompatible(local, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloTypeExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloTypeIDTarget:
|
||||
return fixup(uint32(localID), uint32(targetID))
|
||||
|
||||
case reloTypeSize:
|
||||
localSize, err := Sizeof(local)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
targetSize, err := Sizeof(target)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
return fixup(uint32(localSize), uint32(targetSize))
|
||||
}
|
||||
|
||||
case reloEnumvalValue, reloEnumvalExists:
|
||||
localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloEnumvalExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloEnumvalValue:
|
||||
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
|
||||
}
|
||||
|
||||
case reloFieldSigned:
|
||||
switch local.(type) {
|
||||
case *Enum:
|
||||
return fixup(1, 1)
|
||||
case *Int:
|
||||
return fixup(
|
||||
uint32(local.(*Int).Encoding&Signed),
|
||||
uint32(target.(*Int).Encoding&Signed),
|
||||
)
|
||||
default:
|
||||
return fixupWithoutValidation(0, 0)
|
||||
}
|
||||
|
||||
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
|
||||
if _, ok := target.(*Fwd); ok {
|
||||
// We can't relocate fields using a forward declaration, so
|
||||
// skip it. If a non-forward declaration is present in the BTF
|
||||
// we'll find it in one of the other iterations.
|
||||
return poison()
|
||||
}
|
||||
|
||||
localField, targetField, err := coreFindField(local, relo.accessor, target)
|
||||
if errors.Is(err, errImpossibleRelocation) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, fmt.Errorf("target %s: %w", target, err)
|
||||
}
|
||||
|
||||
maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
|
||||
f.skipLocalValidation = localField.bitfieldSize > 0
|
||||
return f, err
|
||||
}
|
||||
|
||||
switch relo.kind {
|
||||
case reloFieldExists:
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloFieldByteOffset:
|
||||
return maybeSkipValidation(fixup(localField.offset, targetField.offset))
|
||||
|
||||
case reloFieldByteSize:
|
||||
localSize, err := Sizeof(localField.Type)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
targetSize, err := Sizeof(targetField.Type)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
|
||||
|
||||
case reloFieldLShiftU64:
|
||||
var target uint32
|
||||
if byteOrder == binary.LittleEndian {
|
||||
targetSize, err := targetField.sizeBits()
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
target = uint32(64 - targetField.bitfieldOffset - targetSize)
|
||||
} else {
|
||||
loadWidth, err := Sizeof(targetField.Type)
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
|
||||
}
|
||||
return fixupWithoutValidation(0, target)
|
||||
|
||||
case reloFieldRShiftU64:
|
||||
targetSize, err := targetField.sizeBits()
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
return fixupWithoutValidation(0, uint32(64-targetSize))
|
||||
}
|
||||
}
|
||||
|
||||
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
|
||||
}
|
||||
|
||||
/* coreAccessor contains a path through a struct. It contains at least one index.
|
||||
*
|
||||
* The interpretation depends on the kind of the relocation. The following is
|
||||
* taken from struct bpf_core_relo in libbpf_internal.h:
|
||||
*
|
||||
* - for field-based relocations, string encodes an accessed field using
|
||||
* a sequence of field and array indices, separated by colon (:). It's
|
||||
* conceptually very close to LLVM's getelementptr ([0]) instruction's
|
||||
* arguments for identifying offset to a field.
|
||||
* - for type-based relocations, strings is expected to be just "0";
|
||||
* - for enum value-based relocations, string contains an index of enum
|
||||
* value within its enum type;
|
||||
*
|
||||
* Example to provide a better feel.
|
||||
*
|
||||
* struct sample {
|
||||
* int a;
|
||||
* struct {
|
||||
* int b[10];
|
||||
* };
|
||||
* };
|
||||
*
|
||||
* struct sample s = ...;
|
||||
* int x = &s->a; // encoded as "0:0" (a is field #0)
|
||||
* int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
|
||||
* // b is field #0 inside anon struct, accessing elem #5)
|
||||
* int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
|
||||
*/
|
||||
type coreAccessor []int
|
||||
|
||||
func parseCOREAccessor(accessor string) (coreAccessor, error) {
|
||||
if accessor == "" {
|
||||
return nil, fmt.Errorf("empty accessor")
|
||||
}
|
||||
|
||||
parts := strings.Split(accessor, ":")
|
||||
result := make(coreAccessor, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
// 31 bits to avoid overflowing int on 32 bit platforms.
|
||||
index, err := strconv.ParseUint(part, 10, 31)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("accessor index %q: %s", part, err)
|
||||
}
|
||||
|
||||
result = append(result, int(index))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ca coreAccessor) String() string {
|
||||
strs := make([]string, 0, len(ca))
|
||||
for _, i := range ca {
|
||||
strs = append(strs, strconv.Itoa(i))
|
||||
}
|
||||
return strings.Join(strs, ":")
|
||||
}
|
||||
|
||||
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
|
||||
e, ok := t.(*Enum)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an enum: %s", t)
|
||||
}
|
||||
|
||||
if len(ca) > 1 {
|
||||
return nil, fmt.Errorf("invalid accessor %s for enum", ca)
|
||||
}
|
||||
|
||||
i := ca[0]
|
||||
if i >= len(e.Values) {
|
||||
return nil, fmt.Errorf("invalid index %d for %s", i, e)
|
||||
}
|
||||
|
||||
return &e.Values[i], nil
|
||||
}
|
||||
|
||||
// coreField represents the position of a "child" of a composite type from the
|
||||
// start of that type.
|
||||
//
|
||||
// /- start of composite
|
||||
// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
|
||||
// \- start of field end of field -/
|
||||
type coreField struct {
|
||||
Type Type
|
||||
|
||||
// The position of the field from the start of the composite type in bytes.
|
||||
offset uint32
|
||||
|
||||
// The offset of the bitfield in bits from the start of the field.
|
||||
bitfieldOffset Bits
|
||||
|
||||
// The size of the bitfield in bits.
|
||||
//
|
||||
// Zero if the field is not a bitfield.
|
||||
bitfieldSize Bits
|
||||
}
|
||||
|
||||
func (cf *coreField) adjustOffsetToNthElement(n int) error {
|
||||
size, err := Sizeof(cf.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cf.offset += uint32(n) * uint32(size)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cf *coreField) adjustOffsetBits(offset Bits) error {
|
||||
align, err := alignof(cf.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We can compute the load offset by:
|
||||
// 1) converting the bit offset to bytes with a flooring division.
|
||||
// 2) dividing and multiplying that offset by the alignment, yielding the
|
||||
// load size aligned offset.
|
||||
offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
|
||||
|
||||
// The number of bits remaining is the bit offset less the number of bits
|
||||
// we can "skip" with the aligned offset.
|
||||
cf.bitfieldOffset = offset - Bits(offsetBytes*8)
|
||||
|
||||
// We know that cf.offset is aligned at to at least align since we get it
|
||||
// from the compiler via BTF. Adding an aligned offsetBytes preserves the
|
||||
// alignment.
|
||||
cf.offset += offsetBytes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cf *coreField) sizeBits() (Bits, error) {
|
||||
if cf.bitfieldSize > 0 {
|
||||
return cf.bitfieldSize, nil
|
||||
}
|
||||
|
||||
// Someone is trying to access a non-bitfield via a bit shift relocation.
|
||||
// This happens when a field changes from a bitfield to a regular field
|
||||
// between kernel versions. Synthesise the size to make the shifts work.
|
||||
size, err := Sizeof(cf.Type)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return Bits(size * 8), nil
|
||||
}
|
||||
|
||||
// coreFindField descends into the local type using the accessor and tries to
|
||||
// find an equivalent field in target at each step.
|
||||
//
|
||||
// Returns the field and the offset of the field from the start of
|
||||
// target in bits.
|
||||
func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
|
||||
local := coreField{Type: localT}
|
||||
target := coreField{Type: targetT}
|
||||
|
||||
// The first index is used to offset a pointer of the base type like
|
||||
// when accessing an array.
|
||||
if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
|
||||
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
|
||||
}
|
||||
|
||||
var localMaybeFlex, targetMaybeFlex bool
|
||||
for i, acc := range localAcc[1:] {
|
||||
switch localType := local.Type.(type) {
|
||||
case composite:
|
||||
// For composite types acc is used to find the field in the local type,
|
||||
// and then we try to find a field in target with the same name.
|
||||
localMembers := localType.members()
|
||||
if acc >= len(localMembers) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
|
||||
}
|
||||
|
||||
localMember := localMembers[acc]
|
||||
if localMember.Name == "" {
|
||||
_, ok := localMember.Type.(composite)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
|
||||
}
|
||||
|
||||
// This is an anonymous struct or union, ignore it.
|
||||
local = coreField{
|
||||
Type: localMember.Type,
|
||||
offset: local.offset + localMember.Offset.Bytes(),
|
||||
}
|
||||
localMaybeFlex = false
|
||||
continue
|
||||
}
|
||||
|
||||
targetType, ok := target.Type.(composite)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
targetMember, last, err := coreFindMember(targetType, localMember.Name)
|
||||
if err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
local = coreField{
|
||||
Type: localMember.Type,
|
||||
offset: local.offset,
|
||||
bitfieldSize: localMember.BitfieldSize,
|
||||
}
|
||||
localMaybeFlex = acc == len(localMembers)-1
|
||||
|
||||
target = coreField{
|
||||
Type: targetMember.Type,
|
||||
offset: target.offset,
|
||||
bitfieldSize: targetMember.BitfieldSize,
|
||||
}
|
||||
targetMaybeFlex = last
|
||||
|
||||
if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
|
||||
local.offset += localMember.Offset.Bytes()
|
||||
target.offset += targetMember.Offset.Bytes()
|
||||
break
|
||||
}
|
||||
|
||||
// Either of the members is a bitfield. Make sure we're at the
|
||||
// end of the accessor.
|
||||
if next := i + 1; next < len(localAcc[1:]) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
|
||||
}
|
||||
|
||||
if err := local.adjustOffsetBits(localMember.Offset); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
case *Array:
|
||||
// For arrays, acc is the index in the target.
|
||||
targetType, ok := target.Type.(*Array)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
if localType.Nelems == 0 && !localMaybeFlex {
|
||||
return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
|
||||
}
|
||||
if targetType.Nelems == 0 && !targetMaybeFlex {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
|
||||
}
|
||||
|
||||
if localType.Nelems > 0 && acc >= int(localType.Nelems) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
|
||||
}
|
||||
if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
|
||||
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
local = coreField{
|
||||
Type: localType.Type,
|
||||
offset: local.offset,
|
||||
}
|
||||
localMaybeFlex = false
|
||||
|
||||
if err := local.adjustOffsetToNthElement(acc); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
target = coreField{
|
||||
Type: targetType.Type,
|
||||
offset: target.offset,
|
||||
}
|
||||
targetMaybeFlex = false
|
||||
|
||||
if err := target.adjustOffsetToNthElement(acc); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
|
||||
default:
|
||||
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
|
||||
}
|
||||
|
||||
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
|
||||
return coreField{}, coreField{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return local, target, nil
|
||||
}
|
||||
|
||||
// coreFindMember finds a member in a composite type while handling anonymous
|
||||
// structs and unions.
|
||||
func coreFindMember(typ composite, name string) (Member, bool, error) {
|
||||
if name == "" {
|
||||
return Member{}, false, errors.New("can't search for anonymous member")
|
||||
}
|
||||
|
||||
type offsetTarget struct {
|
||||
composite
|
||||
offset Bits
|
||||
}
|
||||
|
||||
targets := []offsetTarget{{typ, 0}}
|
||||
visited := make(map[composite]bool)
|
||||
|
||||
for i := 0; i < len(targets); i++ {
|
||||
target := targets[i]
|
||||
|
||||
// Only visit targets once to prevent infinite recursion.
|
||||
if visited[target] {
|
||||
continue
|
||||
}
|
||||
if len(visited) >= maxTypeDepth {
|
||||
// This check is different than libbpf, which restricts the entire
|
||||
// path to BPF_CORE_SPEC_MAX_LEN items.
|
||||
return Member{}, false, fmt.Errorf("type is nested too deep")
|
||||
}
|
||||
visited[target] = true
|
||||
|
||||
members := target.members()
|
||||
for j, member := range members {
|
||||
if member.Name == name {
|
||||
// NB: This is safe because member is a copy.
|
||||
member.Offset += target.offset
|
||||
return member, j == len(members)-1, nil
|
||||
}
|
||||
|
||||
// The names don't match, but this member could be an anonymous struct
|
||||
// or union.
|
||||
if member.Name != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
comp, ok := member.Type.(composite)
|
||||
if !ok {
|
||||
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
|
||||
}
|
||||
|
||||
targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
|
||||
}
|
||||
}
|
||||
|
||||
return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
|
||||
func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
|
||||
localValue, err := localAcc.enumValue(local)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
targetEnum, ok := target.(*Enum)
|
||||
if !ok {
|
||||
return nil, nil, errImpossibleRelocation
|
||||
}
|
||||
|
||||
localName := newEssentialName(localValue.Name)
|
||||
for i, targetValue := range targetEnum.Values {
|
||||
if newEssentialName(targetValue.Name) != localName {
|
||||
continue
|
||||
}
|
||||
|
||||
return localValue, &targetEnum.Values[i], nil
|
||||
}
|
||||
|
||||
return nil, nil, errImpossibleRelocation
|
||||
}
|
||||
|
||||
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
||||
*
|
||||
* Check local and target types for compatibility. This check is used for
|
||||
* type-based CO-RE relocations and follow slightly different rules than
|
||||
* field-based relocations. This function assumes that root types were already
|
||||
* checked for name match. Beyond that initial root-level name check, names
|
||||
* are completely ignored. Compatibility rules are as follows:
|
||||
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
|
||||
* kind should match for local and target types (i.e., STRUCT is not
|
||||
* compatible with UNION);
|
||||
* - for ENUMs, the size is ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
|
||||
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
|
||||
* - FUNC_PROTOs are compatible if they have compatible signature: same
|
||||
* number of input args and compatible return and argument types.
|
||||
* These rules are not set in stone and probably will be adjusted as we get
|
||||
* more experience with using BPF CO-RE relocations.
|
||||
*
|
||||
* Returns errImpossibleRelocation if types are not compatible.
|
||||
*/
|
||||
func coreAreTypesCompatible(localType Type, targetType Type) error {
|
||||
var (
|
||||
localTs, targetTs typeDeque
|
||||
l, t = &localType, &targetType
|
||||
depth = 0
|
||||
)
|
||||
|
||||
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
||||
if depth >= maxTypeDepth {
|
||||
return errors.New("types are nested too deep")
|
||||
}
|
||||
|
||||
localType = *l
|
||||
targetType = *t
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
switch lv := (localType).(type) {
|
||||
case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
|
||||
// Nothing to do here
|
||||
|
||||
case *Pointer, *Array:
|
||||
depth++
|
||||
localType.walk(&localTs)
|
||||
targetType.walk(&targetTs)
|
||||
|
||||
case *FuncProto:
|
||||
tv := targetType.(*FuncProto)
|
||||
if len(lv.Params) != len(tv.Params) {
|
||||
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
depth++
|
||||
localType.walk(&localTs)
|
||||
targetType.walk(&targetTs)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %T", localType)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
return fmt.Errorf("dangling local type %T", *l)
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
return fmt.Errorf("dangling target type %T", *t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
|
||||
*
|
||||
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
||||
*
|
||||
* Check two types for compatibility for the purpose of field access
|
||||
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
||||
* are relocating semantically compatible entities:
|
||||
* - any two STRUCTs/UNIONs are compatible and can be mixed;
|
||||
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
|
||||
* - any two PTRs are always compatible;
|
||||
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
|
||||
* least one of enums should be anonymous;
|
||||
* - for ENUMs, check sizes, names are ignored;
|
||||
* - for INT, size and signedness are ignored;
|
||||
* - any two FLOATs are always compatible;
|
||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||
* compatibility recursively;
|
||||
* [ NB: coreAreMembersCompatible doesn't recurse, this check is done
|
||||
* by coreFindField. ]
|
||||
* - everything else shouldn't be ever a target of relocation.
|
||||
* These rules are not set in stone and probably will be adjusted as we get
|
||||
* more experience with using BPF CO-RE relocations.
|
||||
*
|
||||
* Returns errImpossibleRelocation if the members are not compatible.
|
||||
*/
|
||||
func coreAreMembersCompatible(localType Type, targetType Type) error {
|
||||
doNamesMatch := func(a, b string) error {
|
||||
if a == "" || b == "" {
|
||||
// allow anonymous and named type to match
|
||||
return nil
|
||||
}
|
||||
|
||||
if newEssentialName(a) == newEssentialName(b) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
_, lok := localType.(composite)
|
||||
_, tok := targetType.(composite)
|
||||
if lok && tok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
switch lv := localType.(type) {
|
||||
case *Array, *Pointer, *Float, *Int:
|
||||
return nil
|
||||
|
||||
case *Enum:
|
||||
tv := targetType.(*Enum)
|
||||
return doNamesMatch(lv.Name, tv.Name)
|
||||
|
||||
case *Fwd:
|
||||
tv := targetType.(*Fwd)
|
||||
return doNamesMatch(lv.Name, tv.Name)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
}
|
5
vendor/github.com/cilium/ebpf/btf/doc.go
generated
vendored
Normal file
5
vendor/github.com/cilium/ebpf/btf/doc.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
// Package btf handles data encoded according to the BPF Type Format.
|
||||
//
|
||||
// The canonical documentation lives in the Linux kernel repository and is
|
||||
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
|
||||
package btf
|
721
vendor/github.com/cilium/ebpf/btf/ext_info.go
generated
vendored
Normal file
721
vendor/github.com/cilium/ebpf/btf/ext_info.go
generated
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
// ExtInfos contains ELF section metadata.
|
||||
type ExtInfos struct {
|
||||
// The slices are sorted by offset in ascending order.
|
||||
funcInfos map[string][]funcInfo
|
||||
lineInfos map[string][]lineInfo
|
||||
relocationInfos map[string][]coreRelocationInfo
|
||||
}
|
||||
|
||||
// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no ext infos are present.
|
||||
func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
|
||||
section := file.Section(".BTF.ext")
|
||||
if section == nil {
|
||||
return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
|
||||
}
|
||||
|
||||
if section.ReaderAt == nil {
|
||||
return nil, fmt.Errorf("compressed ext_info is not supported")
|
||||
}
|
||||
|
||||
return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
|
||||
}
|
||||
|
||||
// loadExtInfos parses bare ext infos.
|
||||
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
|
||||
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
|
||||
// the header structs, resulting in one syscall per header.
|
||||
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
|
||||
extHeader, err := parseBTFExtHeader(headerRd, bo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing BTF extension header: %w", err)
|
||||
}
|
||||
|
||||
coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
|
||||
}
|
||||
|
||||
buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
|
||||
btfFuncInfos, err := parseFuncInfos(buf, bo, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing BTF function info: %w", err)
|
||||
}
|
||||
|
||||
funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
|
||||
for section, bfis := range btfFuncInfos {
|
||||
funcInfos[section], err = newFuncInfos(bfis, ts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %s: func infos: %w", section, err)
|
||||
}
|
||||
}
|
||||
|
||||
buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
|
||||
btfLineInfos, err := parseLineInfos(buf, bo, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing BTF line info: %w", err)
|
||||
}
|
||||
|
||||
lineInfos := make(map[string][]lineInfo, len(btfLineInfos))
|
||||
for section, blis := range btfLineInfos {
|
||||
lineInfos[section], err = newLineInfos(blis, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %s: line infos: %w", section, err)
|
||||
}
|
||||
}
|
||||
|
||||
if coreHeader == nil || coreHeader.COREReloLen == 0 {
|
||||
return &ExtInfos{funcInfos, lineInfos, nil}, nil
|
||||
}
|
||||
|
||||
var btfCORERelos map[string][]bpfCORERelo
|
||||
buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
|
||||
btfCORERelos, err = parseCORERelos(buf, bo, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
|
||||
}
|
||||
|
||||
coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
|
||||
for section, brs := range btfCORERelos {
|
||||
coreRelos[section], err = newRelocationInfos(brs, ts, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
|
||||
}
|
||||
|
||||
type funcInfoMeta struct{}
|
||||
type coreRelocationMeta struct{}
|
||||
|
||||
// Assign per-section metadata from BTF to a section's instructions.
|
||||
func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
|
||||
funcInfos := ei.funcInfos[section]
|
||||
lineInfos := ei.lineInfos[section]
|
||||
reloInfos := ei.relocationInfos[section]
|
||||
|
||||
iter := insns.Iterate()
|
||||
for iter.Next() {
|
||||
if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
|
||||
iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
|
||||
funcInfos = funcInfos[1:]
|
||||
}
|
||||
|
||||
if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset {
|
||||
*iter.Ins = iter.Ins.WithSource(lineInfos[0].line)
|
||||
lineInfos = lineInfos[1:]
|
||||
}
|
||||
|
||||
if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset {
|
||||
iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo)
|
||||
reloInfos = reloInfos[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalExtInfos encodes function and line info embedded in insns into kernel
|
||||
// wire format.
|
||||
func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
|
||||
iter := insns.Iterate()
|
||||
var fiBuf, liBuf bytes.Buffer
|
||||
for iter.Next() {
|
||||
if fn := FuncMetadata(iter.Ins); fn != nil {
|
||||
fi := &funcInfo{
|
||||
fn: fn,
|
||||
offset: iter.Offset,
|
||||
}
|
||||
if err := fi.marshal(&fiBuf, typeID); err != nil {
|
||||
return nil, nil, fmt.Errorf("write func info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if line, ok := iter.Ins.Source().(*Line); ok {
|
||||
li := &lineInfo{
|
||||
line: line,
|
||||
offset: iter.Offset,
|
||||
}
|
||||
if err := li.marshal(&liBuf); err != nil {
|
||||
return nil, nil, fmt.Errorf("write line info: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fiBuf.Bytes(), liBuf.Bytes(), nil
|
||||
}
|
||||
|
||||
// btfExtHeader is found at the start of the .BTF.ext section.
|
||||
type btfExtHeader struct {
|
||||
Magic uint16
|
||||
Version uint8
|
||||
Flags uint8
|
||||
|
||||
// HdrLen is larger than the size of struct btfExtHeader when it is
|
||||
// immediately followed by a btfExtCOREHeader.
|
||||
HdrLen uint32
|
||||
|
||||
FuncInfoOff uint32
|
||||
FuncInfoLen uint32
|
||||
LineInfoOff uint32
|
||||
LineInfoLen uint32
|
||||
}
|
||||
|
||||
// parseBTFExtHeader parses the header of the .BTF.ext section.
|
||||
func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
|
||||
var header btfExtHeader
|
||||
if err := binary.Read(r, bo, &header); err != nil {
|
||||
return nil, fmt.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
if header.Magic != btfMagic {
|
||||
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
|
||||
}
|
||||
|
||||
if header.Version != 1 {
|
||||
return nil, fmt.Errorf("unexpected version %v", header.Version)
|
||||
}
|
||||
|
||||
if header.Flags != 0 {
|
||||
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
|
||||
}
|
||||
|
||||
if int64(header.HdrLen) < int64(binary.Size(&header)) {
|
||||
return nil, fmt.Errorf("header length shorter than btfExtHeader size")
|
||||
}
|
||||
|
||||
return &header, nil
|
||||
}
|
||||
|
||||
// funcInfoStart returns the offset from the beginning of the .BTF.ext section
|
||||
// to the start of its func_info entries.
|
||||
func (h *btfExtHeader) funcInfoStart() int64 {
|
||||
return int64(h.HdrLen + h.FuncInfoOff)
|
||||
}
|
||||
|
||||
// lineInfoStart returns the offset from the beginning of the .BTF.ext section
|
||||
// to the start of its line_info entries.
|
||||
func (h *btfExtHeader) lineInfoStart() int64 {
|
||||
return int64(h.HdrLen + h.LineInfoOff)
|
||||
}
|
||||
|
||||
// coreReloStart returns the offset from the beginning of the .BTF.ext section
|
||||
// to the start of its CO-RE relocation entries.
|
||||
func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
|
||||
return int64(h.HdrLen + ch.COREReloOff)
|
||||
}
|
||||
|
||||
// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
|
||||
// field is larger than its size.
|
||||
type btfExtCOREHeader struct {
|
||||
COREReloOff uint32
|
||||
COREReloLen uint32
|
||||
}
|
||||
|
||||
// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
|
||||
// header bytes are present, extHeader.HdrLen will be larger than the struct,
|
||||
// indicating the presence of a CO-RE extension header.
|
||||
func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
|
||||
extHdrSize := int64(binary.Size(&extHeader))
|
||||
remainder := int64(extHeader.HdrLen) - extHdrSize
|
||||
|
||||
if remainder == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var coreHeader btfExtCOREHeader
|
||||
if err := binary.Read(r, bo, &coreHeader); err != nil {
|
||||
return nil, fmt.Errorf("can't read header: %v", err)
|
||||
}
|
||||
|
||||
return &coreHeader, nil
|
||||
}
|
||||
|
||||
type btfExtInfoSec struct {
|
||||
SecNameOff uint32
|
||||
NumInfo uint32
|
||||
}
|
||||
|
||||
// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
|
||||
// appearing within func_info and line_info sub-sections.
|
||||
// These headers appear once for each program section in the ELF and are
|
||||
// followed by one or more func/line_info records for the section.
|
||||
func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
|
||||
var infoHeader btfExtInfoSec
|
||||
if err := binary.Read(r, bo, &infoHeader); err != nil {
|
||||
return "", nil, fmt.Errorf("read ext info header: %w", err)
|
||||
}
|
||||
|
||||
secName, err := strings.Lookup(infoHeader.SecNameOff)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("get section name: %w", err)
|
||||
}
|
||||
if secName == "" {
|
||||
return "", nil, fmt.Errorf("extinfo header refers to empty section name")
|
||||
}
|
||||
|
||||
if infoHeader.NumInfo == 0 {
|
||||
return "", nil, fmt.Errorf("section %s has zero records", secName)
|
||||
}
|
||||
|
||||
return secName, &infoHeader, nil
|
||||
}
|
||||
|
||||
// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
|
||||
// or line_infos segment that describes the length of all extInfoRecords in
|
||||
// that segment.
|
||||
func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
|
||||
const maxRecordSize = 256
|
||||
|
||||
var recordSize uint32
|
||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||
return 0, fmt.Errorf("can't read record size: %v", err)
|
||||
}
|
||||
|
||||
if recordSize < 4 {
|
||||
// Need at least InsnOff worth of bytes per record.
|
||||
return 0, errors.New("record size too short")
|
||||
}
|
||||
if recordSize > maxRecordSize {
|
||||
return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
|
||||
}
|
||||
|
||||
return recordSize, nil
|
||||
}
|
||||
|
||||
// The size of a FuncInfo in BTF wire format.
|
||||
var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
|
||||
|
||||
type funcInfo struct {
|
||||
fn *Func
|
||||
offset asm.RawInstructionOffset
|
||||
}
|
||||
|
||||
type bpfFuncInfo struct {
|
||||
// Instruction offset of the function within an ELF section.
|
||||
InsnOff uint32
|
||||
TypeID TypeID
|
||||
}
|
||||
|
||||
func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
|
||||
typ, err := ts.ByID(fi.TypeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fn, ok := typ.(*Func)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
|
||||
}
|
||||
|
||||
// C doesn't have anonymous functions, but check just in case.
|
||||
if fn.Name == "" {
|
||||
return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
|
||||
}
|
||||
|
||||
return &funcInfo{
|
||||
fn,
|
||||
asm.RawInstructionOffset(fi.InsnOff),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
|
||||
fis := make([]funcInfo, 0, len(bfis))
|
||||
for _, bfi := range bfis {
|
||||
fi, err := newFuncInfo(bfi, ts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
|
||||
}
|
||||
fis = append(fis, *fi)
|
||||
}
|
||||
sort.Slice(fis, func(i, j int) bool {
|
||||
return fis[i].offset <= fis[j].offset
|
||||
})
|
||||
return fis, nil
|
||||
}
|
||||
|
||||
// marshal into the BTF wire format.
|
||||
func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
|
||||
id, err := typeID(fi.fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bfi := bpfFuncInfo{
|
||||
InsnOff: uint32(fi.offset),
|
||||
TypeID: id,
|
||||
}
|
||||
return binary.Write(w, internal.NativeEndian, &bfi)
|
||||
}
|
||||
|
||||
// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
|
||||
// func infos indexed by section name.
|
||||
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
|
||||
recordSize, err := parseExtInfoRecordSize(r, bo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[string][]bpfFuncInfo)
|
||||
for {
|
||||
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return result, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %v: %w", secName, err)
|
||||
}
|
||||
|
||||
result[secName] = records
|
||||
}
|
||||
}
|
||||
|
||||
// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
|
||||
// These records appear after a btf_ext_info_sec header in the func_info
|
||||
// sub-section of .BTF.ext.
|
||||
func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) {
|
||||
var out []bpfFuncInfo
|
||||
var fi bpfFuncInfo
|
||||
|
||||
if exp, got := FuncInfoSize, recordSize; exp != got {
|
||||
// BTF blob's record size is longer than we know how to parse.
|
||||
return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < recordNum; i++ {
|
||||
if err := binary.Read(r, bo, &fi); err != nil {
|
||||
return nil, fmt.Errorf("can't read function info: %v", err)
|
||||
}
|
||||
|
||||
if fi.InsnOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
|
||||
}
|
||||
|
||||
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
|
||||
// Convert as early as possible.
|
||||
fi.InsnOff /= asm.InstructionSize
|
||||
|
||||
out = append(out, fi)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
|
||||
|
||||
// Line represents the location and contents of a single line of source
|
||||
// code a BPF ELF was compiled from.
|
||||
type Line struct {
|
||||
fileName string
|
||||
line string
|
||||
lineNumber uint32
|
||||
lineColumn uint32
|
||||
|
||||
// TODO: We should get rid of the fields below, but for that we need to be
|
||||
// able to write BTF.
|
||||
|
||||
fileNameOff uint32
|
||||
lineOff uint32
|
||||
}
|
||||
|
||||
func (li *Line) FileName() string {
|
||||
return li.fileName
|
||||
}
|
||||
|
||||
func (li *Line) Line() string {
|
||||
return li.line
|
||||
}
|
||||
|
||||
func (li *Line) LineNumber() uint32 {
|
||||
return li.lineNumber
|
||||
}
|
||||
|
||||
func (li *Line) LineColumn() uint32 {
|
||||
return li.lineColumn
|
||||
}
|
||||
|
||||
func (li *Line) String() string {
|
||||
return li.line
|
||||
}
|
||||
|
||||
type lineInfo struct {
|
||||
line *Line
|
||||
offset asm.RawInstructionOffset
|
||||
}
|
||||
|
||||
// Constants for the format of bpfLineInfo.LineCol.
|
||||
const (
|
||||
bpfLineShift = 10
|
||||
bpfLineMax = (1 << (32 - bpfLineShift)) - 1
|
||||
bpfColumnMax = (1 << bpfLineShift) - 1
|
||||
)
|
||||
|
||||
type bpfLineInfo struct {
|
||||
// Instruction offset of the line within the whole instruction stream, in instructions.
|
||||
InsnOff uint32
|
||||
FileNameOff uint32
|
||||
LineOff uint32
|
||||
LineCol uint32
|
||||
}
|
||||
|
||||
func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
|
||||
line, err := strings.Lookup(li.LineOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lookup of line: %w", err)
|
||||
}
|
||||
|
||||
fileName, err := strings.Lookup(li.FileNameOff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lookup of filename: %w", err)
|
||||
}
|
||||
|
||||
lineNumber := li.LineCol >> bpfLineShift
|
||||
lineColumn := li.LineCol & bpfColumnMax
|
||||
|
||||
return &lineInfo{
|
||||
&Line{
|
||||
fileName,
|
||||
line,
|
||||
lineNumber,
|
||||
lineColumn,
|
||||
li.FileNameOff,
|
||||
li.LineOff,
|
||||
},
|
||||
asm.RawInstructionOffset(li.InsnOff),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) {
|
||||
lis := make([]lineInfo, 0, len(blis))
|
||||
for _, bli := range blis {
|
||||
li, err := newLineInfo(bli, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
|
||||
}
|
||||
lis = append(lis, *li)
|
||||
}
|
||||
sort.Slice(lis, func(i, j int) bool {
|
||||
return lis[i].offset <= lis[j].offset
|
||||
})
|
||||
return lis, nil
|
||||
}
|
||||
|
||||
// marshal writes the binary representation of the LineInfo to w.
|
||||
func (li *lineInfo) marshal(w io.Writer) error {
|
||||
line := li.line
|
||||
if line.lineNumber > bpfLineMax {
|
||||
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
|
||||
}
|
||||
|
||||
if line.lineColumn > bpfColumnMax {
|
||||
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
|
||||
}
|
||||
|
||||
bli := bpfLineInfo{
|
||||
uint32(li.offset),
|
||||
line.fileNameOff,
|
||||
line.lineOff,
|
||||
(line.lineNumber << bpfLineShift) | line.lineColumn,
|
||||
}
|
||||
return binary.Write(w, internal.NativeEndian, &bli)
|
||||
}
|
||||
|
||||
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
|
||||
// line infos indexed by section name.
|
||||
func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
|
||||
recordSize, err := parseExtInfoRecordSize(r, bo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := make(map[string][]bpfLineInfo)
|
||||
for {
|
||||
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return result, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %v: %w", secName, err)
|
||||
}
|
||||
|
||||
result[secName] = records
|
||||
}
|
||||
}
|
||||
|
||||
// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
|
||||
// These records appear after a btf_ext_info_sec header in the line_info
|
||||
// sub-section of .BTF.ext.
|
||||
func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) {
|
||||
var out []bpfLineInfo
|
||||
var li bpfLineInfo
|
||||
|
||||
if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
|
||||
// BTF blob's record size is longer than we know how to parse.
|
||||
return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < recordNum; i++ {
|
||||
if err := binary.Read(r, bo, &li); err != nil {
|
||||
return nil, fmt.Errorf("can't read line info: %v", err)
|
||||
}
|
||||
|
||||
if li.InsnOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
|
||||
}
|
||||
|
||||
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
|
||||
// Convert as early as possible.
|
||||
li.InsnOff /= asm.InstructionSize
|
||||
|
||||
out = append(out, li)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// bpfCORERelo matches the kernel's struct bpf_core_relo.
|
||||
type bpfCORERelo struct {
|
||||
InsnOff uint32
|
||||
TypeID TypeID
|
||||
AccessStrOff uint32
|
||||
Kind coreKind
|
||||
}
|
||||
|
||||
type CORERelocation struct {
|
||||
typ Type
|
||||
accessor coreAccessor
|
||||
kind coreKind
|
||||
}
|
||||
|
||||
func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
|
||||
relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
|
||||
return relo
|
||||
}
|
||||
|
||||
type coreRelocationInfo struct {
|
||||
relo *CORERelocation
|
||||
offset asm.RawInstructionOffset
|
||||
}
|
||||
|
||||
func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
|
||||
typ, err := ts.ByID(relo.TypeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessorStr, err := strings.Lookup(relo.AccessStrOff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessor, err := parseCOREAccessor(accessorStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
||||
}
|
||||
|
||||
return &coreRelocationInfo{
|
||||
&CORERelocation{
|
||||
typ,
|
||||
accessor,
|
||||
relo.Kind,
|
||||
},
|
||||
asm.RawInstructionOffset(relo.InsnOff),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
|
||||
rs := make([]coreRelocationInfo, 0, len(brs))
|
||||
for _, br := range brs {
|
||||
relo, err := newRelocationInfo(br, ts, strings)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
|
||||
}
|
||||
rs = append(rs, *relo)
|
||||
}
|
||||
sort.Slice(rs, func(i, j int) bool {
|
||||
return rs[i].offset < rs[j].offset
|
||||
})
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
var extInfoReloSize = binary.Size(bpfCORERelo{})
|
||||
|
||||
// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
|
||||
// CO-RE relocations indexed by section name.
|
||||
func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
|
||||
recordSize, err := parseExtInfoRecordSize(r, bo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if recordSize != uint32(extInfoReloSize) {
|
||||
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
|
||||
}
|
||||
|
||||
result := make(map[string][]bpfCORERelo)
|
||||
for {
|
||||
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return result, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("section %v: %w", secName, err)
|
||||
}
|
||||
|
||||
result[secName] = records
|
||||
}
|
||||
}
|
||||
|
||||
// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
|
||||
// coreRelos. These records appear after a btf_ext_info_sec header in the
|
||||
// core_relos sub-section of .BTF.ext.
|
||||
func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
|
||||
var out []bpfCORERelo
|
||||
|
||||
var relo bpfCORERelo
|
||||
for i := uint32(0); i < recordNum; i++ {
|
||||
if err := binary.Read(r, bo, &relo); err != nil {
|
||||
return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
|
||||
}
|
||||
|
||||
if relo.InsnOff%asm.InstructionSize != 0 {
|
||||
return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
|
||||
}
|
||||
|
||||
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
|
||||
// Convert as early as possible.
|
||||
relo.InsnOff /= asm.InstructionSize
|
||||
|
||||
out = append(out, relo)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
319
vendor/github.com/cilium/ebpf/btf/format.go
generated
vendored
Normal file
319
vendor/github.com/cilium/ebpf/btf/format.go
generated
vendored
Normal file
@@ -0,0 +1,319 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errNestedTooDeep = errors.New("nested too deep")
|
||||
|
||||
// GoFormatter converts a Type to Go syntax.
|
||||
//
|
||||
// A zero GoFormatter is valid to use.
|
||||
type GoFormatter struct {
|
||||
w strings.Builder
|
||||
|
||||
// Types present in this map are referred to using the given name if they
|
||||
// are encountered when outputting another type.
|
||||
Names map[Type]string
|
||||
|
||||
// Identifier is called for each field of struct-like types. By default the
|
||||
// field name is used as is.
|
||||
Identifier func(string) string
|
||||
|
||||
// EnumIdentifier is called for each element of an enum. By default the
|
||||
// name of the enum type is concatenated with Identifier(element).
|
||||
EnumIdentifier func(name, element string) string
|
||||
}
|
||||
|
||||
// TypeDeclaration generates a Go type declaration for a BTF type.
|
||||
func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
|
||||
gf.w.Reset()
|
||||
if err := gf.writeTypeDecl(name, typ); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return gf.w.String(), nil
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) identifier(s string) string {
|
||||
if gf.Identifier != nil {
|
||||
return gf.Identifier(s)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) enumIdentifier(name, element string) string {
|
||||
if gf.EnumIdentifier != nil {
|
||||
return gf.EnumIdentifier(name, element)
|
||||
}
|
||||
|
||||
return name + gf.identifier(element)
|
||||
}
|
||||
|
||||
// writeTypeDecl outputs a declaration of the given type.
|
||||
//
|
||||
// It encodes https://golang.org/ref/spec#Type_declarations:
|
||||
//
|
||||
// type foo struct { bar uint32; }
|
||||
// type bar int32
|
||||
func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
|
||||
if name == "" {
|
||||
return fmt.Errorf("need a name for type %s", typ)
|
||||
}
|
||||
|
||||
switch v := skipQualifiers(typ).(type) {
|
||||
case *Enum:
|
||||
fmt.Fprintf(&gf.w, "type %s ", name)
|
||||
switch v.Size {
|
||||
case 1:
|
||||
gf.w.WriteString("int8")
|
||||
case 2:
|
||||
gf.w.WriteString("int16")
|
||||
case 4:
|
||||
gf.w.WriteString("int32")
|
||||
case 8:
|
||||
gf.w.WriteString("int64")
|
||||
default:
|
||||
return fmt.Errorf("%s: invalid enum size %d", typ, v.Size)
|
||||
}
|
||||
|
||||
if len(v.Values) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
gf.w.WriteString("; const ( ")
|
||||
for _, ev := range v.Values {
|
||||
id := gf.enumIdentifier(name, ev.Name)
|
||||
fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
|
||||
}
|
||||
gf.w.WriteString(")")
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
fmt.Fprintf(&gf.w, "type %s ", name)
|
||||
return gf.writeTypeLit(v, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// writeType outputs the name of a named type or a literal describing the type.
|
||||
//
|
||||
// It encodes https://golang.org/ref/spec#Types.
|
||||
//
|
||||
// foo (if foo is a named type)
|
||||
// uint32
|
||||
func (gf *GoFormatter) writeType(typ Type, depth int) error {
|
||||
typ = skipQualifiers(typ)
|
||||
|
||||
name := gf.Names[typ]
|
||||
if name != "" {
|
||||
gf.w.WriteString(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return gf.writeTypeLit(typ, depth)
|
||||
}
|
||||
|
||||
// writeTypeLit outputs a literal describing the type.
|
||||
//
|
||||
// The function ignores named types.
|
||||
//
|
||||
// It encodes https://golang.org/ref/spec#TypeLit.
|
||||
//
|
||||
// struct { bar uint32; }
|
||||
// uint32
|
||||
func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
|
||||
depth++
|
||||
if depth > maxTypeDepth {
|
||||
return errNestedTooDeep
|
||||
}
|
||||
|
||||
var err error
|
||||
switch v := skipQualifiers(typ).(type) {
|
||||
case *Int:
|
||||
gf.writeIntLit(v)
|
||||
|
||||
case *Enum:
|
||||
gf.w.WriteString("int32")
|
||||
|
||||
case *Typedef:
|
||||
err = gf.writeType(v.Type, depth)
|
||||
|
||||
case *Array:
|
||||
fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
|
||||
err = gf.writeType(v.Type, depth)
|
||||
|
||||
case *Struct:
|
||||
err = gf.writeStructLit(v.Size, v.Members, depth)
|
||||
|
||||
case *Union:
|
||||
// Always choose the first member to represent the union in Go.
|
||||
err = gf.writeStructLit(v.Size, v.Members[:1], depth)
|
||||
|
||||
case *Datasec:
|
||||
err = gf.writeDatasecLit(v, depth)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("type %T: %w", v, ErrNotSupported)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", typ, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) writeIntLit(i *Int) {
|
||||
// NB: Encoding.IsChar is ignored.
|
||||
if i.Encoding.IsBool() && i.Size == 1 {
|
||||
gf.w.WriteString("bool")
|
||||
return
|
||||
}
|
||||
|
||||
bits := i.Size * 8
|
||||
if i.Encoding.IsSigned() {
|
||||
fmt.Fprintf(&gf.w, "int%d", bits)
|
||||
} else {
|
||||
fmt.Fprintf(&gf.w, "uint%d", bits)
|
||||
}
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
|
||||
gf.w.WriteString("struct { ")
|
||||
|
||||
prevOffset := uint32(0)
|
||||
skippedBitfield := false
|
||||
for i, m := range members {
|
||||
if m.BitfieldSize > 0 {
|
||||
skippedBitfield = true
|
||||
continue
|
||||
}
|
||||
|
||||
offset := m.Offset.Bytes()
|
||||
if n := offset - prevOffset; skippedBitfield && n > 0 {
|
||||
fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
|
||||
} else {
|
||||
gf.writePadding(n)
|
||||
}
|
||||
|
||||
size, err := Sizeof(m.Type)
|
||||
if err != nil {
|
||||
return fmt.Errorf("field %d: %w", i, err)
|
||||
}
|
||||
prevOffset = offset + uint32(size)
|
||||
|
||||
if err := gf.writeStructField(m, depth); err != nil {
|
||||
return fmt.Errorf("field %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
gf.writePadding(size - prevOffset)
|
||||
gf.w.WriteString("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) writeStructField(m Member, depth int) error {
|
||||
if m.BitfieldSize > 0 {
|
||||
return fmt.Errorf("bitfields are not supported")
|
||||
}
|
||||
if m.Offset%8 != 0 {
|
||||
return fmt.Errorf("unsupported offset %d", m.Offset)
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
// Special case a nested anonymous union like
|
||||
// struct foo { union { int bar; int baz }; }
|
||||
// by replacing the whole union with its first member.
|
||||
union, ok := m.Type.(*Union)
|
||||
if !ok {
|
||||
return fmt.Errorf("anonymous fields are not supported")
|
||||
|
||||
}
|
||||
|
||||
if len(union.Members) == 0 {
|
||||
return errors.New("empty anonymous union")
|
||||
}
|
||||
|
||||
depth++
|
||||
if depth > maxTypeDepth {
|
||||
return errNestedTooDeep
|
||||
}
|
||||
|
||||
m := union.Members[0]
|
||||
size, err := Sizeof(m.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := gf.writeStructField(m, depth); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gf.writePadding(union.Size - uint32(size))
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
|
||||
|
||||
if err := gf.writeType(m.Type, depth); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gf.w.WriteString("; ")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
|
||||
gf.w.WriteString("struct { ")
|
||||
|
||||
prevOffset := uint32(0)
|
||||
for i, vsi := range ds.Vars {
|
||||
v := vsi.Type.(*Var)
|
||||
if v.Linkage != GlobalVar {
|
||||
// Ignore static, extern, etc. for now.
|
||||
continue
|
||||
}
|
||||
|
||||
if v.Name == "" {
|
||||
return fmt.Errorf("variable %d: empty name", i)
|
||||
}
|
||||
|
||||
gf.writePadding(vsi.Offset - prevOffset)
|
||||
prevOffset = vsi.Offset + vsi.Size
|
||||
|
||||
fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
|
||||
|
||||
if err := gf.writeType(v.Type, depth); err != nil {
|
||||
return fmt.Errorf("variable %d: %w", i, err)
|
||||
}
|
||||
|
||||
gf.w.WriteString("; ")
|
||||
}
|
||||
|
||||
gf.writePadding(ds.Size - prevOffset)
|
||||
gf.w.WriteString("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gf *GoFormatter) writePadding(bytes uint32) {
|
||||
if bytes > 0 {
|
||||
fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
func skipQualifiers(typ Type) Type {
|
||||
result := typ
|
||||
for depth := 0; depth <= maxTypeDepth; depth++ {
|
||||
switch v := (result).(type) {
|
||||
case qualifier:
|
||||
result = v.qualify()
|
||||
default:
|
||||
return result
|
||||
}
|
||||
}
|
||||
return &cycle{typ}
|
||||
}
|
121
vendor/github.com/cilium/ebpf/btf/handle.go
generated
vendored
Normal file
121
vendor/github.com/cilium/ebpf/btf/handle.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// HandleInfo describes a Handle.
|
||||
type HandleInfo struct {
|
||||
// ID of this handle in the kernel. The ID is only valid as long as the
|
||||
// associated handle is kept alive.
|
||||
ID ID
|
||||
|
||||
// Name is an identifying name for the BTF, currently only used by the
|
||||
// kernel.
|
||||
Name string
|
||||
|
||||
// IsKernel is true if the BTF originated with the kernel and not
|
||||
// userspace.
|
||||
IsKernel bool
|
||||
|
||||
// Size of the raw BTF in bytes.
|
||||
size uint32
|
||||
}
|
||||
|
||||
func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
|
||||
// We invoke the syscall once with a empty BTF and name buffers to get size
|
||||
// information to allocate buffers. Then we invoke it a second time with
|
||||
// buffers to receive the data.
|
||||
var btfInfo sys.BtfInfo
|
||||
if err := sys.ObjInfo(fd, &btfInfo); err != nil {
|
||||
return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
|
||||
}
|
||||
|
||||
if btfInfo.NameLen > 0 {
|
||||
// NameLen doesn't account for the terminating NUL.
|
||||
btfInfo.NameLen++
|
||||
}
|
||||
|
||||
// Don't pull raw BTF by default, since it may be quite large.
|
||||
btfSize := btfInfo.BtfSize
|
||||
btfInfo.BtfSize = 0
|
||||
|
||||
nameBuffer := make([]byte, btfInfo.NameLen)
|
||||
btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer)
|
||||
if err := sys.ObjInfo(fd, &btfInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HandleInfo{
|
||||
ID: ID(btfInfo.Id),
|
||||
Name: unix.ByteSliceToString(nameBuffer),
|
||||
IsKernel: btfInfo.KernelBtf != 0,
|
||||
size: btfSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsModule returns true if the BTF is for the kernel itself.
|
||||
func (i *HandleInfo) IsVmlinux() bool {
|
||||
return i.IsKernel && i.Name == "vmlinux"
|
||||
}
|
||||
|
||||
// IsModule returns true if the BTF is for a kernel module.
|
||||
func (i *HandleInfo) IsModule() bool {
|
||||
return i.IsKernel && i.Name != "vmlinux"
|
||||
}
|
||||
|
||||
// HandleIterator allows enumerating BTF blobs loaded into the kernel.
|
||||
type HandleIterator struct {
|
||||
// The ID of the last retrieved handle. Only valid after a call to Next.
|
||||
ID ID
|
||||
err error
|
||||
}
|
||||
|
||||
// Next retrieves a handle for the next BTF blob.
|
||||
//
|
||||
// [Handle.Close] is called if *handle is non-nil to avoid leaking fds.
|
||||
//
|
||||
// Returns true if another BTF blob was found. Call [HandleIterator.Err] after
|
||||
// the function returns false.
|
||||
func (it *HandleIterator) Next(handle **Handle) bool {
|
||||
if *handle != nil {
|
||||
(*handle).Close()
|
||||
*handle = nil
|
||||
}
|
||||
|
||||
id := it.ID
|
||||
for {
|
||||
attr := &sys.BtfGetNextIdAttr{Id: id}
|
||||
err := sys.BtfGetNextId(attr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// There are no more BTF objects.
|
||||
return false
|
||||
} else if err != nil {
|
||||
it.err = fmt.Errorf("get next BTF ID: %w", err)
|
||||
return false
|
||||
}
|
||||
|
||||
id = attr.NextId
|
||||
*handle, err = NewHandleFromID(id)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Try again with the next ID.
|
||||
continue
|
||||
} else if err != nil {
|
||||
it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
|
||||
return false
|
||||
}
|
||||
|
||||
it.ID = id
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns an error if iteration failed for some reason.
|
||||
func (it *HandleIterator) Err() error {
|
||||
return it.err
|
||||
}
|
128
vendor/github.com/cilium/ebpf/btf/strings.go
generated
vendored
Normal file
128
vendor/github.com/cilium/ebpf/btf/strings.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type stringTable struct {
|
||||
base *stringTable
|
||||
offsets []uint32
|
||||
strings []string
|
||||
}
|
||||
|
||||
// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
|
||||
type sizedReader interface {
|
||||
io.Reader
|
||||
Size() int64
|
||||
}
|
||||
|
||||
func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
|
||||
// When parsing split BTF's string table, the first entry offset is derived
|
||||
// from the last entry offset of the base BTF.
|
||||
firstStringOffset := uint32(0)
|
||||
if base != nil {
|
||||
idx := len(base.offsets) - 1
|
||||
firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1
|
||||
}
|
||||
|
||||
// Derived from vmlinux BTF.
|
||||
const averageStringLength = 16
|
||||
|
||||
n := int(r.Size() / averageStringLength)
|
||||
offsets := make([]uint32, 0, n)
|
||||
strings := make([]string, 0, n)
|
||||
|
||||
offset := firstStringOffset
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(splitNull)
|
||||
for scanner.Scan() {
|
||||
str := scanner.Text()
|
||||
offsets = append(offsets, offset)
|
||||
strings = append(strings, str)
|
||||
offset += uint32(len(str)) + 1
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(strings) == 0 {
|
||||
return nil, errors.New("string table is empty")
|
||||
}
|
||||
|
||||
if firstStringOffset == 0 && strings[0] != "" {
|
||||
return nil, errors.New("first item in string table is non-empty")
|
||||
}
|
||||
|
||||
return &stringTable{base, offsets, strings}, nil
|
||||
}
|
||||
|
||||
func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
i := bytes.IndexByte(data, 0)
|
||||
if i == -1 {
|
||||
if atEOF && len(data) > 0 {
|
||||
return 0, nil, errors.New("string table isn't null terminated")
|
||||
}
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
return i + 1, data[:i], nil
|
||||
}
|
||||
|
||||
func (st *stringTable) Lookup(offset uint32) (string, error) {
|
||||
if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] {
|
||||
return st.base.lookup(offset)
|
||||
}
|
||||
return st.lookup(offset)
|
||||
}
|
||||
|
||||
func (st *stringTable) lookup(offset uint32) (string, error) {
|
||||
i := search(st.offsets, offset)
|
||||
if i == len(st.offsets) || st.offsets[i] != offset {
|
||||
return "", fmt.Errorf("offset %d isn't start of a string", offset)
|
||||
}
|
||||
|
||||
return st.strings[i], nil
|
||||
}
|
||||
|
||||
func (st *stringTable) Length() int {
|
||||
last := len(st.offsets) - 1
|
||||
return int(st.offsets[last]) + len(st.strings[last]) + 1
|
||||
}
|
||||
|
||||
func (st *stringTable) Marshal(w io.Writer) error {
|
||||
for _, str := range st.strings {
|
||||
_, err := io.WriteString(w, str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write([]byte{0})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// search is a copy of sort.Search specialised for uint32.
|
||||
//
|
||||
// Licensed under https://go.dev/LICENSE
|
||||
func search(ints []uint32, needle uint32) int {
|
||||
// Define f(-1) == false and f(n) == true.
|
||||
// Invariant: f(i-1) == false, f(j) == true.
|
||||
i, j := 0, len(ints)
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if !(ints[h] >= needle) {
|
||||
i = h + 1 // preserves f(i-1) == false
|
||||
} else {
|
||||
j = h // preserves f(j) == true
|
||||
}
|
||||
}
|
||||
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
|
||||
return i
|
||||
}
|
1212
vendor/github.com/cilium/ebpf/btf/types.go
generated
vendored
Normal file
1212
vendor/github.com/cilium/ebpf/btf/types.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user