vendor: bump cilium/ebpf to v0.9.0

Also, change the deprecated Sym to WithSymbol.

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
This commit is contained in:
Kir Kolyshkin
2022-05-26 13:33:16 -07:00
parent 016a0d29d1
commit e0406b4ba6
51 changed files with 3380 additions and 2074 deletions

2
go.mod
View File

@@ -4,7 +4,7 @@ go 1.17
require (
github.com/checkpoint-restore/go-criu/v5 v5.3.0
github.com/cilium/ebpf v0.8.1
github.com/cilium/ebpf v0.9.0
github.com/containerd/console v1.0.3
github.com/coreos/go-systemd/v22 v22.3.2
github.com/cyphar/filepath-securejoin v0.2.3

4
go.sum
View File

@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/cilium/ebpf v0.9.0 h1:ldiV+FscPCQ/p3mNEV4O02EPbUZJFsoEtHvIr9xLTvk=
github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=

View File

@@ -174,7 +174,7 @@ func (p *program) appendRule(rule *devices.Rule) error {
}
p.insts = append(p.insts, acceptBlock(rule.Allow)...)
// set blockSym to the first instruction we added in this iteration
p.insts[prevBlockLastIdx+1] = p.insts[prevBlockLastIdx+1].Sym(blockSym)
p.insts[prevBlockLastIdx+1] = p.insts[prevBlockLastIdx+1].WithSymbol(blockSym)
p.blockID++
return nil
}
@@ -187,7 +187,7 @@ func (p *program) finalize() asm.Instructions {
blockSym := "block-" + strconv.Itoa(p.blockID)
p.insts = append(p.insts,
// R0 <- v
asm.Mov.Imm32(asm.R0, v).Sym(blockSym),
asm.Mov.Imm32(asm.R0, v).WithSymbol(blockSym),
asm.Return(),
)
p.blockID = -1

8
vendor/github.com/cilium/ebpf/MAINTAINERS.md generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Maintainers
* [Lorenz Bauer]
* [Timo Beckers] (Isovalent)
[Lorenz Bauer]: https://github.com/lmb
[Timo Beckers]: https://github.com/ti-mo

View File

@@ -1,8 +1,8 @@
# The development version of clang is distributed as the 'clang' binary,
# while stable/released versions have a version number attached.
# Pin the default clang to a stable version.
CLANG ?= clang-13
STRIP ?= llvm-strip-13
CLANG ?= clang-14
STRIP ?= llvm-strip-14
CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
# Obtain an absolute path to the directory of the Makefile.
@@ -13,7 +13,7 @@ UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
# Prefer podman if installed, otherwise use docker.
# Note: Setting the var at runtime will always override.
CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker)
CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman),, --user "${UIDGID}")
CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}")
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
@@ -35,7 +35,9 @@ TARGETS := \
testdata/map_spin_lock \
testdata/subprog_reloc \
testdata/fwd_decl \
internal/btf/testdata/relocs
btf/testdata/relocs \
btf/testdata/relocs_read \
btf/testdata/relocs_read_tgt
.PHONY: all clean container-all container-shell generate
@@ -58,9 +60,12 @@ container-shell:
clean:
-$(RM) testdata/*.elf
-$(RM) internal/btf/testdata/*.elf
-$(RM) btf/testdata/*.elf
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
format:
find . -type f -name "*.c" | xargs clang-format -i
all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
@@ -69,6 +74,7 @@ generate: export BPF_CLANG := $(CLANG)
generate: export BPF_CFLAGS := $(CFLAGS)
generate:
go generate ./cmd/bpf2go/test
go generate ./internal/sys
cd examples/ && go generate ./...
testdata/loader-%-el.elf: testdata/loader.c
@@ -89,6 +95,6 @@ testdata/loader-%-eb.elf: testdata/loader.c
# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
.PHONY: vmlinux-btf
vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz
internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX)
vmlinux-btf: btf/testdata/vmlinux-btf.gz
btf/testdata/vmlinux-btf.gz: $(VMLINUX)
objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@"

View File

@@ -5,6 +5,10 @@ package asm
// BuiltinFunc is a built-in eBPF function.
type BuiltinFunc int32
func (_ BuiltinFunc) Max() BuiltinFunc {
return maxBuiltinFunc - 1
}
// eBPF built-in functions
//
// You can regenerate this list using the following gawk script:
@@ -197,6 +201,8 @@ const (
FnGetFuncIp
FnGetAttachCookie
FnTaskPtRegs
maxBuiltinFunc
)
// Call emits a function call.

View File

@@ -10,6 +10,7 @@ import (
"math"
"strings"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -19,6 +20,10 @@ const InstructionSize = 8
// RawInstructionOffset is an offset in units of raw BPF instructions.
type RawInstructionOffset uint64
var ErrUnreferencedSymbol = errors.New("unreferenced symbol")
var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference")
var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference")
// Bytes returns the offset of an instruction in bytes.
func (rio RawInstructionOffset) Bytes() uint64 {
return uint64(rio) * InstructionSize
@@ -32,17 +37,8 @@ type Instruction struct {
Offset int16
Constant int64
// Reference denotes a reference (e.g. a jump) to another symbol.
Reference string
// Symbol denotes an instruction at the start of a function body.
Symbol string
}
// Sym creates a symbol.
func (ins Instruction) Sym(name string) Instruction {
ins.Symbol = name
return ins
// Metadata contains optional metadata about this instruction.
Metadata Metadata
}
// Unmarshal decodes a BPF instruction.
@@ -133,31 +129,65 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
return 2 * InstructionSize, nil
}
// RewriteMapPtr changes an instruction to use a new map fd.
// AssociateMap associates a Map with this Instruction.
//
// Returns an error if the instruction doesn't load a map.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
// Implicitly clears the Instruction's Reference field.
//
// Returns an error if the Instruction is not a map load.
func (ins *Instruction) AssociateMap(m FDer) error {
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.Metadata.Set(referenceMeta{}, nil)
ins.Metadata.Set(mapMeta{}, m)
return nil
}
// RewriteMapPtr changes an instruction to use a new map fd.
//
// Returns an error if the instruction doesn't load a map.
//
// Deprecated: use AssociateMap instead. If you cannot provide a Map,
// wrap an fd in a type implementing FDer.
func (ins *Instruction) RewriteMapPtr(fd int) error {
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.encodeMapFD(fd)
return nil
}
func (ins *Instruction) encodeMapFD(fd int) {
// Preserve the offset value for direct map loads.
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
rawFd := uint64(uint32(fd))
ins.Constant = int64(offset | rawFd)
return nil
}
// MapPtr returns the map fd for this instruction.
//
// The result is undefined if the instruction is not a load from a map,
// see IsLoadFromMap.
//
// Deprecated: use Map() instead.
func (ins *Instruction) MapPtr() int {
return int(int32(uint64(ins.Constant) & math.MaxUint32))
// If there is a map associated with the instruction, return its FD.
if fd := ins.Metadata.Get(mapMeta{}); fd != nil {
return fd.(FDer).FD()
}
// Fall back to the fd stored in the Constant field
return ins.mapFd()
}
// mapFd returns the map file descriptor stored in the 32 least significant
// bits of ins' Constant field.
func (ins *Instruction) mapFd() int {
return int(int32(ins.Constant))
}
// RewriteMapOffset changes the offset of a direct load from a map.
@@ -239,14 +269,23 @@ func (ins Instruction) Format(f fmt.State, c rune) {
}
if ins.IsLoadFromMap() {
fd := ins.MapPtr()
fd := ins.mapFd()
m := ins.Map()
switch ins.Src {
case PseudoMapFD:
if m != nil {
fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m)
} else {
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
}
case PseudoMapValue:
if m != nil {
fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset())
} else {
fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
}
}
goto ref
}
@@ -296,16 +335,103 @@ func (ins Instruction) Format(f fmt.State, c rune) {
}
ref:
if ins.Reference != "" {
fmt.Fprintf(f, " <%s>", ins.Reference)
if ins.Reference() != "" {
fmt.Fprintf(f, " <%s>", ins.Reference())
}
}
func (ins Instruction) equal(other Instruction) bool {
return ins.OpCode == other.OpCode &&
ins.Dst == other.Dst &&
ins.Src == other.Src &&
ins.Offset == other.Offset &&
ins.Constant == other.Constant
}
// Size returns the amount of bytes ins would occupy in binary form.
func (ins Instruction) Size() uint64 {
return uint64(InstructionSize * ins.OpCode.rawInstructions())
}
type symbolMeta struct{}
// WithSymbol marks the Instruction as a Symbol, which other Instructions
// can point to using corresponding calls to WithReference.
func (ins Instruction) WithSymbol(name string) Instruction {
ins.Metadata.Set(symbolMeta{}, name)
return ins
}
// Sym creates a symbol.
//
// Deprecated: use WithSymbol instead.
func (ins Instruction) Sym(name string) Instruction {
return ins.WithSymbol(name)
}
// Symbol returns the value ins has been marked with using WithSymbol,
// otherwise returns an empty string. A symbol is often an Instruction
// at the start of a function body.
func (ins Instruction) Symbol() string {
sym, _ := ins.Metadata.Get(symbolMeta{}).(string)
return sym
}
type referenceMeta struct{}
// WithReference makes ins reference another Symbol or map by name.
func (ins Instruction) WithReference(ref string) Instruction {
ins.Metadata.Set(referenceMeta{}, ref)
return ins
}
// Reference returns the Symbol or map name referenced by ins, if any.
func (ins Instruction) Reference() string {
ref, _ := ins.Metadata.Get(referenceMeta{}).(string)
return ref
}
type mapMeta struct{}
// Map returns the Map referenced by ins, if any.
// An Instruction will contain a Map if e.g. it references an existing,
// pinned map that was opened during ELF loading.
func (ins Instruction) Map() FDer {
fd, _ := ins.Metadata.Get(mapMeta{}).(FDer)
return fd
}
type sourceMeta struct{}
// WithSource adds source information about the Instruction.
func (ins Instruction) WithSource(src fmt.Stringer) Instruction {
ins.Metadata.Set(sourceMeta{}, src)
return ins
}
// Source returns source information about the Instruction. The field is
// present when the compiler emits BTF line info about the Instruction and
// usually contains the line of source code responsible for it.
func (ins Instruction) Source() fmt.Stringer {
str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer)
return str
}
// A Comment can be passed to Instruction.WithSource to add a comment
// to an instruction.
type Comment string
func (s Comment) String() string {
return string(s)
}
// FDer represents a resource tied to an underlying file descriptor.
// Used as a stand-in for e.g. ebpf.Map since that type cannot be
// imported here and FD() is the only method we rely on.
type FDer interface {
FD() int
}
// Instructions is an eBPF program.
type Instructions []Instruction
@@ -339,7 +465,7 @@ func (insns Instructions) Name() string {
if len(insns) == 0 {
return ""
}
return insns[0].Symbol
return insns[0].Symbol()
}
func (insns Instructions) String() string {
@@ -355,22 +481,25 @@ func (insns Instructions) Size() uint64 {
return sum
}
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
// AssociateMap updates all Instructions that Reference the given symbol
// to point to an existing Map m instead.
//
// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
// Returns ErrUnreferencedSymbol error if no references to symbol are found
// in insns. If symbol is anything else than the symbol name of map (e.g.
// a bpf2bpf subprogram), an error is returned.
func (insns Instructions) AssociateMap(symbol string, m FDer) error {
if symbol == "" {
return errors.New("empty symbol")
}
found := false
var found bool
for i := range insns {
ins := &insns[i]
if ins.Reference != symbol {
if ins.Reference() != symbol {
continue
}
if err := ins.RewriteMapPtr(fd); err != nil {
if err := ins.AssociateMap(m); err != nil {
return err
}
@@ -378,7 +507,40 @@ func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
}
if !found {
return &unreferencedSymbolError{symbol}
return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
}
return nil
}
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
//
// Returns ErrUnreferencedSymbol if the symbol isn't used.
//
// Deprecated: use AssociateMap instead.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
if symbol == "" {
return errors.New("empty symbol")
}
var found bool
for i := range insns {
ins := &insns[i]
if ins.Reference() != symbol {
continue
}
if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
ins.encodeMapFD(fd)
found = true
}
if !found {
return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
}
return nil
@@ -390,15 +552,15 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) {
offsets := make(map[string]int)
for i, ins := range insns {
if ins.Symbol == "" {
if ins.Symbol() == "" {
continue
}
if _, ok := offsets[ins.Symbol]; ok {
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
if _, ok := offsets[ins.Symbol()]; ok {
return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol())
}
offsets[ins.Symbol] = i
offsets[ins.Symbol()] = i
}
return offsets, nil
@@ -415,7 +577,7 @@ func (insns Instructions) FunctionReferences() map[string]bool {
continue
}
if ins.Reference == "" {
if ins.Reference() == "" {
continue
}
@@ -423,7 +585,7 @@ func (insns Instructions) FunctionReferences() map[string]bool {
continue
}
calls[ins.Reference] = true
calls[ins.Reference()] = true
}
return calls
@@ -435,11 +597,11 @@ func (insns Instructions) ReferenceOffsets() map[string][]int {
offsets := make(map[string][]int)
for i, ins := range insns {
if ins.Reference == "" {
if ins.Reference() == "" {
continue
}
offsets[ins.Reference] = append(offsets[ins.Reference], i)
offsets[ins.Reference()] = append(offsets[ins.Reference()], i)
}
return offsets
@@ -490,18 +652,34 @@ func (insns Instructions) Format(f fmt.State, c rune) {
iter := insns.Iterate()
for iter.Next() {
if iter.Ins.Symbol != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
if iter.Ins.Symbol() != "" {
fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol())
}
if src := iter.Ins.Source(); src != nil {
line := strings.TrimSpace(src.String())
if line != "" {
fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line)
}
}
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
}
}
// Marshal encodes a BPF program into the kernel format.
//
// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
// without a matching Symbol Instruction within insns.
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
if err := insns.encodeFunctionReferences(); err != nil {
return err
}
if err := insns.encodeMapPointers(); err != nil {
return err
}
for i, ins := range insns {
_, err := ins.Marshal(w, bo)
if err != nil {
if _, err := ins.Marshal(w, bo); err != nil {
return fmt.Errorf("instruction %d: %w", i, err)
}
}
@@ -527,6 +705,95 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
}
// encodeFunctionReferences populates the Offset (or Constant, depending on
// the instruction type) field of instructions with a Reference field to point
// to the offset of the corresponding instruction with a matching Symbol field.
//
// Only Reference Instructions that are either jumps or BPF function references
// (calls or function pointer loads) are populated.
//
// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
// without at least one corresponding Symbol Instruction within insns.
func (insns Instructions) encodeFunctionReferences() error {
// Index the offsets of instructions tagged as a symbol.
symbolOffsets := make(map[string]RawInstructionOffset)
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if ins.Symbol() == "" {
continue
}
if _, ok := symbolOffsets[ins.Symbol()]; ok {
return fmt.Errorf("duplicate symbol %s", ins.Symbol())
}
symbolOffsets[ins.Symbol()] = iter.Offset
}
// Find all instructions tagged as references to other symbols.
// Depending on the instruction type, populate their constant or offset
// fields to point to the symbol they refer to within the insn stream.
iter = insns.Iterate()
for iter.Next() {
i := iter.Index
offset := iter.Offset
ins := iter.Ins
if ins.Reference() == "" {
continue
}
switch {
case ins.IsFunctionReference() && ins.Constant == -1:
symOffset, ok := symbolOffsets[ins.Reference()]
if !ok {
return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
}
ins.Constant = int64(symOffset - offset - 1)
case ins.OpCode.Class().IsJump() && ins.Offset == -1:
symOffset, ok := symbolOffsets[ins.Reference()]
if !ok {
return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
}
ins.Offset = int16(symOffset - offset - 1)
}
}
return nil
}
// encodeMapPointers finds all Map Instructions and encodes their FDs
// into their Constant fields.
func (insns Instructions) encodeMapPointers() error {
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if !ins.IsLoadFromMap() {
continue
}
m := ins.Map()
if m == nil {
continue
}
fd := m.FD()
if fd < 0 {
return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd)
}
ins.encodeMapFD(m.FD())
}
return nil
}
// Iterate allows iterating a BPF program while keeping track of
// various offsets.
//
@@ -575,17 +842,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro
}
}
type unreferencedSymbolError struct {
symbol string
}
func (use *unreferencedSymbolError) Error() string {
return fmt.Sprintf("unreferenced symbol %s", use.symbol)
}
// IsUnreferencedSymbol returns true if err was caused by
// an unreferenced symbol.
//
// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol).
func IsUnreferencedSymbol(err error) bool {
_, ok := err.(*unreferencedSymbolError)
return ok
return errors.Is(err, ErrUnreferencedSymbol)
}

View File

@@ -67,8 +67,7 @@ func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
Dst: dst,
Offset: -1,
Constant: int64(value),
Reference: label,
}
}.WithReference(label)
}
// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled.
@@ -79,8 +78,7 @@ func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction {
Dst: dst,
Offset: -1,
Constant: int64(value),
Reference: label,
}
}.WithReference(label)
}
// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled.
@@ -90,8 +88,7 @@ func (op JumpOp) Reg(dst, src Register, label string) Instruction {
Dst: dst,
Src: src,
Offset: -1,
Reference: label,
}
}.WithReference(label)
}
// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled.
@@ -102,8 +99,7 @@ func (op JumpOp) Reg32(dst, src Register, label string) Instruction {
Dst: dst,
Src: src,
Offset: -1,
Reference: label,
}
}.WithReference(label)
}
func (op JumpOp) opCode(class Class, source Source) OpCode {
@@ -121,13 +117,11 @@ func (op JumpOp) Label(label string) Instruction {
OpCode: OpCode(JumpClass).SetJumpOp(Call),
Src: PseudoCall,
Constant: -1,
Reference: label,
}
}.WithReference(label)
}
return Instruction{
OpCode: OpCode(JumpClass).SetJumpOp(op),
Offset: -1,
Reference: label,
}
}.WithReference(label)
}

80
vendor/github.com/cilium/ebpf/asm/metadata.go generated vendored Normal file
View File

@@ -0,0 +1,80 @@
package asm
// Metadata contains metadata about an instruction.
type Metadata struct {
head *metaElement
}
type metaElement struct {
next *metaElement
key, value interface{}
}
// Find the element containing key.
//
// Returns nil if there is no such element.
func (m *Metadata) find(key interface{}) *metaElement {
for e := m.head; e != nil; e = e.next {
if e.key == key {
return e
}
}
return nil
}
// Remove an element from the linked list.
//
// Copies as many elements of the list as necessary to remove r, but doesn't
// perform a full copy.
func (m *Metadata) remove(r *metaElement) {
current := &m.head
for e := m.head; e != nil; e = e.next {
if e == r {
// We've found the element we want to remove.
*current = e.next
// No need to copy the tail.
return
}
// There is another element in front of the one we want to remove.
// We have to copy it to be able to change metaElement.next.
cpy := &metaElement{key: e.key, value: e.value}
*current = cpy
current = &cpy.next
}
}
// Set a key to a value.
//
// If value is nil, the key is removed. Avoids modifying old metadata by
// copying if necessary.
func (m *Metadata) Set(key, value interface{}) {
if e := m.find(key); e != nil {
if e.value == value {
// Key is present and the value is the same. Nothing to do.
return
}
// Key is present with a different value. Create a copy of the list
// which doesn't have the element in it.
m.remove(e)
}
// m.head is now a linked list that doesn't contain key.
if value == nil {
return
}
m.head = &metaElement{key: key, value: value, next: m.head}
}
// Get the value of a key.
//
// Returns nil if no value with the given key is present.
func (m *Metadata) Get(key interface{}) interface{} {
if e := m.find(key); e != nil {
return e.value
}
return nil
}

View File

@@ -18,22 +18,23 @@ type Class uint8
const classMask OpCode = 0x07
const (
// LdClass load memory
// LdClass loads immediate values into registers.
// Also used for non-standard load operations from cBPF.
LdClass Class = 0x00
// LdXClass load memory from constant
// LdXClass loads memory into registers.
LdXClass Class = 0x01
// StClass load register from memory
// StClass stores immediate values to memory.
StClass Class = 0x02
// StXClass load register from constant
// StXClass stores registers to memory.
StXClass Class = 0x03
// ALUClass arithmetic operators
// ALUClass describes arithmetic operators.
ALUClass Class = 0x04
// JumpClass jump operators
// JumpClass describes jump operators.
JumpClass Class = 0x05
// Jump32Class jump operators with 32 bit comparaisons
// Requires kernel 5.1
// Jump32Class describes jump operators with 32-bit comparisons.
// Requires kernel 5.1.
Jump32Class Class = 0x06
// ALU64Class arithmetic in 64 bit mode
// ALU64Class describes arithmetic operators in 64-bit mode.
ALU64Class Class = 0x07
)

View File

@@ -1,6 +1,7 @@
package btf
import (
"bufio"
"bytes"
"debug/elf"
"encoding/binary"
@@ -10,7 +11,6 @@ import (
"math"
"os"
"reflect"
"sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
@@ -33,20 +33,19 @@ type ID uint32
type Spec struct {
// Data from .BTF.
rawTypes []rawType
strings stringTable
strings *stringTable
// Inflated Types.
types []Type
// All types contained by the spec, the position of a type in the slice
// is its ID.
types types
// Type IDs indexed by type.
typeIDs map[Type]TypeID
// Types indexed by essential name.
// Includes all struct flavors and types with the same name.
namedTypes map[essentialName][]Type
// Data from .BTF.ext.
funcInfos map[string]FuncInfo
lineInfos map[string]LineInfos
coreRelos map[string]CoreRelos
byteOrder binary.ByteOrder
}
@@ -74,25 +73,60 @@ func (h *btfHeader) stringStart() int64 {
return int64(h.HdrLen + h.StringOff)
}
// LoadSpec opens file and calls LoadSpecFromReader on it.
func LoadSpec(file string) (*Spec, error) {
fh, err := os.Open(file)
if err != nil {
return nil, err
}
defer fh.Close()
return LoadSpecFromReader(fh)
}
// LoadSpecFromReader reads from an ELF or a raw BTF blob.
//
// Returns ErrNotFound if reading from an ELF which contains no BTF.
// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
// may be nil.
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
// Try to parse a naked BTF blob. This will return an error if
// we encounter a Datasec, since we can't fix it up.
return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
return spec, err
}
return nil, err
}
defer file.Close()
return loadSpecFromELF(file)
}
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
// ExtInfos may be nil if the ELF doesn't contain section metadta.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
return nil, nil, err
}
spec, err := loadSpecFromELF(file)
if err != nil {
return nil, nil, err
}
extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
if err != nil && !errors.Is(err, ErrNotFound) {
return nil, nil, err
}
return spec, extInfos, nil
}
// variableOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
@@ -132,7 +166,6 @@ func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
var (
btfSection *elf.Section
btfExtSection *elf.Section
sectionSizes = make(map[string]uint32)
)
@@ -140,8 +173,6 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
switch sec.Name {
case ".BTF":
btfSection = sec
case ".BTF.ext":
btfExtSection = sec
default:
if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
break
@@ -164,108 +195,14 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, err
}
spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, vars)
if err != nil {
return nil, err
if btfSection.ReaderAt == nil {
return nil, fmt.Errorf("compressed BTF is not supported")
}
if btfExtSection == nil {
return spec, nil
return loadRawSpec(btfSection.ReaderAt, file.ByteOrder, sectionSizes, vars)
}
if btfExtSection.ReaderAt == nil {
return nil, fmt.Errorf("compressed ext_info is not supported")
}
extInfo, err := loadExtInfos(btfExtSection, file.ByteOrder, spec.strings)
if err != nil {
return nil, fmt.Errorf("can't parse ext info: %w", err)
}
if err := spec.splitExtInfos(extInfo); err != nil {
return nil, fmt.Errorf("linking funcInfos and lineInfos: %w", err)
}
return spec, nil
}
// splitExtInfos takes FuncInfos, LineInfos and CoreRelos indexed by section and
// transforms them to be indexed by function. Retrieves function names from
// the BTF spec.
func (spec *Spec) splitExtInfos(info *extInfo) error {
ofi := make(map[string]FuncInfo)
oli := make(map[string]LineInfos)
ocr := make(map[string]CoreRelos)
for secName, secFuncs := range info.funcInfos {
// Collect functions from each section and organize them by name.
for _, fi := range secFuncs {
name, err := fi.Name(spec)
if err != nil {
return fmt.Errorf("looking up function name: %w", err)
}
// FuncInfo offsets are scoped to the ELF section. Zero them out
// since they are meaningless outside of that context. The linker
// will determine the offset of the function within the final
// instruction stream before handing it off to the kernel.
fi.InsnOff = 0
ofi[name] = fi
}
// Attribute LineInfo records to their respective functions, if any.
if lines := info.lineInfos[secName]; lines != nil {
for _, li := range lines {
fi := secFuncs.funcForOffset(li.InsnOff)
if fi == nil {
return fmt.Errorf("section %s: error looking up FuncInfo for LineInfo %v", secName, li)
}
// Offsets are ELF section-scoped, make them function-scoped by
// subtracting the function's start offset.
li.InsnOff -= fi.InsnOff
name, err := fi.Name(spec)
if err != nil {
return fmt.Errorf("looking up function name: %w", err)
}
oli[name] = append(oli[name], li)
}
}
// Attribute CO-RE relocations to their respective functions, if any.
if relos := info.relos[secName]; relos != nil {
for _, r := range relos {
fi := secFuncs.funcForOffset(r.insnOff)
if fi == nil {
return fmt.Errorf("section %s: error looking up FuncInfo for CO-RE relocation %v", secName, r)
}
// Offsets are ELF section-scoped, make them function-scoped by
// subtracting the function's start offset.
r.insnOff -= fi.InsnOff
name, err := fi.Name(spec)
if err != nil {
return fmt.Errorf("looking up function name: %w", err)
}
ocr[name] = append(ocr[name], r)
}
}
}
spec.funcInfos = ofi
spec.lineInfos = oli
spec.coreRelos = ocr
return nil
}
func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
rawTypes, rawStrings, err := parseBTF(btf, bo)
if err != nil {
return nil, err
@@ -276,46 +213,52 @@ func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uin
return nil, err
}
types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
types, err := inflateRawTypes(rawTypes, rawStrings)
if err != nil {
return nil, err
}
typeIDs, typesByName := indexTypes(types)
return &Spec{
rawTypes: rawTypes,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
strings: rawStrings,
byteOrder: bo,
}, nil
}
var kernelBTF struct {
sync.Mutex
*Spec
func indexTypes(types []Type) (map[Type]TypeID, map[essentialName][]Type) {
namedTypes := 0
for _, typ := range types {
if typ.TypeName() != "" {
// Do a pre-pass to figure out how big types by name has to be.
// Most types have unique names, so it's OK to ignore essentialName
// here.
namedTypes++
}
}
typeIDs := make(map[Type]TypeID, len(types))
typesByName := make(map[essentialName][]Type, namedTypes)
for i, typ := range types {
if name := newEssentialName(typ.TypeName()); name != "" {
typesByName[name] = append(typesByName[name], typ)
}
typeIDs[typ] = TypeID(i)
}
return typeIDs, typesByName
}
// LoadKernelSpec returns the current kernel's BTF information.
//
// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
// ErrNotSupported if BTF is not enabled.
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
kernelBTF.Lock()
defer kernelBTF.Unlock()
if kernelBTF.Spec != nil {
return kernelBTF.Spec, nil
}
var err error
kernelBTF.Spec, err = loadKernelSpec()
return kernelBTF.Spec, err
}
// loadKernelSpec attempts to load the raw vmlinux BTF blob at
// /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs.
func loadKernelSpec() (*Spec, error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
@@ -352,11 +295,11 @@ func findVMLinux() (*internal.SafeELFFile, error) {
}
for _, loc := range locations {
fh, err := os.Open(fmt.Sprintf(loc, release))
if err != nil {
file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release))
if errors.Is(err, os.ErrNotExist) {
continue
}
return internal.NewSafeELFFile(fh)
return file, err
}
return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
@@ -394,11 +337,13 @@ func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
}
func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
buf := new(bufio.Reader)
for _, bo := range []binary.ByteOrder{
binary.LittleEndian,
binary.BigEndian,
} {
if _, err := parseBTFHeader(io.NewSectionReader(r, 0, math.MaxInt64), bo); err == nil {
buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
if _, err := parseBTFHeader(buf, bo); err == nil {
return bo
}
}
@@ -408,26 +353,20 @@ func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
// parseBTF reads a .BTF section into memory and parses it into a list of
// raw types and a string table.
func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) {
rawBTF, err := io.ReadAll(btf)
if err != nil {
return nil, nil, fmt.Errorf("can't read BTF: %v", err)
}
rd := bytes.NewReader(rawBTF)
header, err := parseBTFHeader(rd, bo)
func parseBTF(btf io.ReaderAt, bo binary.ByteOrder) ([]rawType, *stringTable, error) {
buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
header, err := parseBTFHeader(buf, bo)
if err != nil {
return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
}
buf := io.NewSectionReader(rd, header.stringStart(), int64(header.StringLen))
rawStrings, err := readStringTable(buf)
rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)))
if err != nil {
return nil, nil, fmt.Errorf("can't read type names: %w", err)
}
buf = io.NewSectionReader(rd, header.typeStart(), int64(header.TypeLen))
rawTypes, err := readTypes(buf, bo)
buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
rawTypes, err := readTypes(buf, bo, header.TypeLen)
if err != nil {
return nil, nil, fmt.Errorf("can't read types: %w", err)
}
@@ -440,7 +379,7 @@ type variable struct {
name string
}
func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
for i, rawType := range rawTypes {
if rawType.Kind() != kindDatasec {
continue
@@ -492,25 +431,17 @@ func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[s
// Copy creates a copy of Spec.
func (s *Spec) Copy() *Spec {
types, _ := copyTypes(s.types, nil)
types := copyTypes(s.types, nil)
namedTypes := make(map[essentialName][]Type)
for _, typ := range types {
if name := typ.TypeName(); name != "" {
en := newEssentialName(name)
namedTypes[en] = append(namedTypes[en], typ)
}
}
typeIDs, typesByName := indexTypes(types)
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
s.rawTypes,
s.strings,
types,
namedTypes,
s.funcInfos,
s.lineInfos,
s.coreRelos,
typeIDs,
typesByName,
s.byteOrder,
}
}
@@ -546,7 +477,11 @@ func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
_, _ = buf.Write(s.strings)
stringsLen := s.strings.Length()
buf.Grow(stringsLen)
if err := s.strings.Marshal(&buf); err != nil {
return nil, err
}
// Fill out the header, and write it out.
header = &btfHeader{
@@ -557,7 +492,7 @@ func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(len(s.strings)),
StringLen: uint32(stringsLen),
}
raw := buf.Bytes()
@@ -579,35 +514,29 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
return copy(sw, p), nil
}
// Program finds the BTF for a specific function.
//
// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
// contain extended BTF info.
func (s *Spec) Program(name string) (*Program, error) {
if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
return nil, fmt.Errorf("BTF for function %s: %w", name, ErrNoExtendedInfo)
}
funcInfos, funcOK := s.funcInfos[name]
lineInfos, lineOK := s.lineInfos[name]
relos, coreOK := s.coreRelos[name]
if !funcOK && !lineOK && !coreOK {
return nil, fmt.Errorf("no extended BTF info for function %s", name)
}
return &Program{s, funcInfos, lineInfos, relos}, nil
}
// TypeByID returns the BTF Type with the given type ID.
//
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
if int(id) > len(s.types) {
return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
return s.types.ByID(id)
}
return s.types[id], nil
// TypeID returns the ID for a given Type.
//
// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
func (s *Spec) TypeID(typ Type) (TypeID, error) {
if _, ok := typ.(*Void); ok {
// Equality is weird for void, since it is a zero sized type.
return 0, nil
}
id, ok := s.typeIDs[typ]
if !ok {
return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
}
return id, nil
}
// AnyTypesByName returns a list of BTF Types with the given name.
@@ -635,6 +564,22 @@ func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
return result, nil
}
// AnyTypeByName returns a Type with the given name.
//
// Returns an error if multiple types of that name exist.
func (s *Spec) AnyTypeByName(name string) (Type, error) {
types, err := s.AnyTypesByName(name)
if err != nil {
return nil, err
}
if len(types) > 1 {
return nil, fmt.Errorf("found multiple types: %v", types)
}
return types[0], nil
}
// TypeByName searches for a Type with a specific name. Since multiple
// Types with the same name can exist, the parameter typ is taken to
// narrow down the search in case of a clash.
@@ -688,6 +633,30 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
return nil
}
// TypesIterator iterates over types of a given spec.
type TypesIterator struct {
spec *Spec
index int
// The last visited type in the spec.
Type Type
}
// Iterate returns the types iterator.
func (s *Spec) Iterate() *TypesIterator {
return &TypesIterator{spec: s, index: 0}
}
// Next returns true as long as there are any remaining types.
func (iter *TypesIterator) Next() bool {
if len(iter.spec.types) <= iter.index {
return false
}
iter.Type = iter.spec.types[iter.index]
iter.index++
return true
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
spec *Spec
@@ -730,6 +699,7 @@ func NewHandle(spec *Spec) (*Handle, error) {
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
_, logErr := sys.BtfLoad(attr)
// NB: The syscall will never return ENOSPC as of 5.18-rc4.
return nil, internal.ErrorWithLog(err, logBuf, logErr)
}
@@ -775,44 +745,6 @@ func (h *Handle) FD() int {
return h.fd.Int()
}
// Map is the BTF for a map.
type Map struct {
Spec *Spec
Key, Value Type
}
// Program is the BTF information for a stream of instructions.
type Program struct {
spec *Spec
FuncInfo FuncInfo
LineInfos LineInfos
CoreRelos CoreRelos
}
// Spec returns the BTF spec of this program.
func (p *Program) Spec() *Spec {
return p.spec
}
// Fixups returns the changes required to adjust the program to the target.
//
// Passing a nil target will relocate against the running kernel.
func (p *Program) Fixups(target *Spec) (COREFixups, error) {
if len(p.CoreRelos) == 0 {
return nil, nil
}
if target == nil {
var err error
target, err = LoadKernelSpec()
if err != nil {
return nil, err
}
}
return coreRelocate(p.spec, target, p.CoreRelos)
}
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24

View File

@@ -177,6 +177,10 @@ func (bt *btfType) Size() uint32 {
return bt.SizeType
}
func (bt *btfType) SetSize(size uint32) {
bt.SizeType = size
}
type rawType struct {
btfType
data interface{}
@@ -226,11 +230,14 @@ type btfParam struct {
Type TypeID
}
func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
var (
header btfType
types []rawType
)
func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
var header btfType
// because of the interleaving between types and struct members it is difficult to
// precompute the numbers of raw types this will parse
// this "guess" is a good first estimation
sizeOfbtfType := uintptr(binary.Size(btfType{}))
tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
types := make([]rawType, 0, tyMaxCount)
for id := TypeID(1); ; id++ {
if err := binary.Read(r, bo, &header); err == io.EOF {
@@ -282,6 +289,6 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
}
}
func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
func intEncoding(raw uint32) (IntEncoding, Bits, Bits) {
return IntEncoding((raw & 0x0f000000) >> 24), Bits(raw&0x00ff0000) >> 16, Bits(raw & 0x000000ff)
}

View File

@@ -1,11 +1,11 @@
package btf
import (
"encoding/binary"
"errors"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
@@ -17,50 +17,58 @@ import (
// COREFixup is the result of computing a CO-RE relocation for a target.
type COREFixup struct {
Kind COREKind
Local uint32
Target uint32
Poison bool
kind coreKind
local uint32
target uint32
// True if there is no valid fixup. The instruction is replaced with an
// invalid dummy.
poison bool
// True if the validation of the local value should be skipped. Used by
// some kinds of bitfield relocations.
skipLocalValidation bool
}
func (f COREFixup) equal(other COREFixup) bool {
return f.Local == other.Local && f.Target == other.Target
func (f *COREFixup) equal(other COREFixup) bool {
return f.local == other.local && f.target == other.target
}
func (f COREFixup) String() string {
if f.Poison {
return fmt.Sprintf("%s=poison", f.Kind)
func (f *COREFixup) String() string {
if f.poison {
return fmt.Sprintf("%s=poison", f.kind)
}
return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
}
func (f COREFixup) apply(ins *asm.Instruction) error {
if f.Poison {
return errors.New("can't poison individual instruction")
func (f *COREFixup) Apply(ins *asm.Instruction) error {
if f.poison {
const badRelo = 0xbad2310
*ins = asm.BuiltinFunc(badRelo).Call()
return nil
}
switch class := ins.OpCode.Class(); class {
case asm.LdXClass, asm.StClass, asm.StXClass:
if want := int16(f.Local); want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
}
if f.Target > math.MaxInt16 {
return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
if f.target > math.MaxInt16 {
return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
}
ins.Offset = int16(f.Target)
ins.Offset = int16(f.target)
case asm.LdClass:
if !ins.IsConstantLoad(asm.DWord) {
return fmt.Errorf("not a dword-sized immediate load")
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
}
ins.Constant = int64(f.Target)
ins.Constant = int64(f.target)
case asm.ALUClass:
if ins.OpCode.ALUOp() == asm.Swap {
@@ -74,15 +82,15 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
return fmt.Errorf("invalid source %s", src)
}
if want := int64(f.Local); want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
}
if f.Target > math.MaxInt32 {
return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
if f.target > math.MaxInt32 {
return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
}
ins.Constant = int64(f.Target)
ins.Constant = int64(f.target)
default:
return fmt.Errorf("invalid class %s", class)
@@ -92,57 +100,14 @@ func (f COREFixup) apply(ins *asm.Instruction) error {
}
func (f COREFixup) isNonExistant() bool {
return f.Kind.checksForExistence() && f.Target == 0
return f.kind.checksForExistence() && f.target == 0
}
type COREFixups map[uint64]COREFixup
// Apply returns a copy of insns with CO-RE relocations applied.
func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
if len(fs) == 0 {
cpy := make(asm.Instructions, len(insns))
copy(cpy, insns)
return insns, nil
}
cpy := make(asm.Instructions, 0, len(insns))
iter := insns.Iterate()
for iter.Next() {
fixup, ok := fs[iter.Offset.Bytes()]
if !ok {
cpy = append(cpy, *iter.Ins)
continue
}
ins := *iter.Ins
if fixup.Poison {
const badRelo = asm.BuiltinFunc(0xbad2310)
cpy = append(cpy, badRelo.Call())
if ins.OpCode.IsDWordLoad() {
// 64 bit constant loads occupy two raw bpf instructions, so
// we need to add another instruction as padding.
cpy = append(cpy, badRelo.Call())
}
continue
}
if err := fixup.apply(&ins); err != nil {
return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
}
cpy = append(cpy, ins)
}
return cpy, nil
}
// COREKind is the type of CO-RE relocation
type COREKind uint32
// coreKind is the type of CO-RE relocation as specified in BPF source code.
type coreKind uint32
const (
reloFieldByteOffset COREKind = iota /* field byte offset */
reloFieldByteOffset coreKind = iota /* field byte offset */
reloFieldByteSize /* field size in bytes */
reloFieldExists /* field existence in target kernel */
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
@@ -156,7 +121,11 @@ const (
reloEnumvalValue /* enum value integer value */
)
func (k COREKind) String() string {
func (k coreKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
}
func (k coreKind) String() string {
switch k {
case reloFieldByteOffset:
return "byte_off"
@@ -187,19 +156,28 @@ func (k COREKind) String() string {
}
}
func (k COREKind) checksForExistence() bool {
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
}
func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) {
// CORERelocate calculates the difference in types between local and target.
//
// Returns a list of fixups which can be applied to instructions to make them
// match the target type(s).
//
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
// for relos[i].
func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
if local.byteOrder != target.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
}
var ids []TypeID
relosByID := make(map[TypeID]CoreRelos)
result := make(COREFixups, len(relos))
for _, relo := range relos {
type reloGroup struct {
relos []*CORERelocation
// Position of each relocation in relos.
indices []int
}
// Split relocations into per Type lists.
relosByType := make(map[Type]*reloGroup)
result := make([]COREFixup, len(relos))
for i, relo := range relos {
if relo.kind == reloTypeIDLocal {
// Filtering out reloTypeIDLocal here makes our lives a lot easier
// down the line, since it doesn't have a target at all.
@@ -207,47 +185,42 @@ func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) {
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
}
result[uint64(relo.insnOff)] = COREFixup{
relo.kind,
uint32(relo.typeID),
uint32(relo.typeID),
false,
id, err := local.TypeID(relo.typ)
if err != nil {
return nil, fmt.Errorf("%s: %w", relo.kind, err)
}
result[i] = COREFixup{
kind: relo.kind,
local: uint32(id),
target: uint32(id),
}
continue
}
relos, ok := relosByID[relo.typeID]
group, ok := relosByType[relo.typ]
if !ok {
ids = append(ids, relo.typeID)
group = &reloGroup{}
relosByType[relo.typ] = group
}
relosByID[relo.typeID] = append(relos, relo)
group.relos = append(group.relos, relo)
group.indices = append(group.indices, i)
}
// Ensure we work on relocations in a deterministic order.
sort.Slice(ids, func(i, j int) bool {
return ids[i] < ids[j]
})
for _, id := range ids {
if int(id) >= len(local.types) {
return nil, fmt.Errorf("invalid type id %d", id)
}
localType := local.types[id]
for localType, group := range relosByType {
localTypeName := localType.TypeName()
if localTypeName == "" {
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
}
relos := relosByID[id]
targets := target.namedTypes[newEssentialName(localTypeName)]
fixups, err := coreCalculateFixups(localType, targets, relos)
fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
for i, relo := range relos {
result[uint64(relo.insnOff)] = fixups[i]
for j, index := range group.indices {
result[index] = fixups[j]
}
}
@@ -262,30 +235,30 @@ var errImpossibleRelocation = errors.New("impossible relocation")
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFixup, error) {
localID := local.ID()
local, err := copyType(local, skipQualifiersAndTypedefs)
func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
localID, err := localSpec.TypeID(local)
if err != nil {
return nil, err
return nil, fmt.Errorf("local type ID: %w", err)
}
local = Copy(local, UnderlyingType)
bestScore := len(relos)
var bestFixups []COREFixup
for i := range targets {
targetID := targets[i].ID()
target, err := copyType(targets[i], skipQualifiersAndTypedefs)
targetID, err := targetSpec.TypeID(targets[i])
if err != nil {
return nil, err
return nil, fmt.Errorf("target type ID: %w", err)
}
target := Copy(targets[i], UnderlyingType)
score := 0 // lower is better
fixups := make([]COREFixup, 0, len(relos))
for _, relo := range relos {
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
}
if fixup.Poison || fixup.isNonExistant() {
if fixup.poison || fixup.isNonExistant() {
score++
}
fixups = append(fixups, fixup)
@@ -307,17 +280,23 @@ func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFix
// the fixups agree with each other.
for i, fixup := range bestFixups {
if !fixup.equal(fixups[i]) {
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
}
}
}
if bestFixups == nil {
// Nothing at all matched, probably because there are no suitable
// targets at all. Poison everything!
// targets at all.
//
// Poison everything except checksForExistence.
bestFixups = make([]COREFixup, len(relos))
for i, relo := range relos {
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
if relo.kind.checksForExistence() {
bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
} else {
bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
}
}
}
@@ -326,15 +305,18 @@ func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFix
// coreCalculateFixup calculates the fixup for a single local type, target type
// and relocation.
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CoreRelo) (COREFixup, error) {
func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
fixup := func(local, target uint32) (COREFixup, error) {
return COREFixup{relo.kind, local, target, false}, nil
return COREFixup{kind: relo.kind, local: local, target: target}, nil
}
fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
}
poison := func() (COREFixup, error) {
if relo.kind.checksForExistence() {
return fixup(1, 0)
}
return COREFixup{relo.kind, 0, 0, true}, nil
return COREFixup{kind: relo.kind, poison: true}, nil
}
zero := COREFixup{}
@@ -390,7 +372,20 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
}
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
case reloFieldSigned:
switch local.(type) {
case *Enum:
return fixup(1, 1)
case *Int:
return fixup(
uint32(local.(*Int).Encoding&Signed),
uint32(target.(*Int).Encoding&Signed),
)
default:
return fixupWithoutValidation(0, 0)
}
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
if _, ok := target.(*Fwd); ok {
// We can't relocate fields using a forward declaration, so
// skip it. If a non-forward declaration is present in the BTF
@@ -406,12 +401,17 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
return zero, fmt.Errorf("target %s: %w", target, err)
}
maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
f.skipLocalValidation = localField.bitfieldSize > 0
return f, err
}
switch relo.kind {
case reloFieldExists:
return fixup(1, 1)
case reloFieldByteOffset:
return fixup(localField.offset/8, targetField.offset/8)
return maybeSkipValidation(fixup(localField.offset, targetField.offset))
case reloFieldByteSize:
localSize, err := Sizeof(localField.Type)
@@ -423,9 +423,34 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
if err != nil {
return zero, err
}
return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
return fixup(uint32(localSize), uint32(targetSize))
case reloFieldLShiftU64:
var target uint32
if byteOrder == binary.LittleEndian {
targetSize, err := targetField.sizeBits()
if err != nil {
return zero, err
}
target = uint32(64 - targetField.bitfieldOffset - targetSize)
} else {
loadWidth, err := Sizeof(targetField.Type)
if err != nil {
return zero, err
}
target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
}
return fixupWithoutValidation(0, target)
case reloFieldRShiftU64:
targetSize, err := targetField.sizeBits()
if err != nil {
return zero, err
}
return fixupWithoutValidation(0, uint32(64-targetSize))
}
}
@@ -462,7 +487,7 @@ func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID
*/
type coreAccessor []int
func parseCoreAccessor(accessor string) (coreAccessor, error) {
func parseCOREAccessor(accessor string) (coreAccessor, error) {
if accessor == "" {
return nil, fmt.Errorf("empty accessor")
}
@@ -508,18 +533,73 @@ func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
return &e.Values[i], nil
}
// coreField represents the position of a "child" of a composite type from the
// start of that type.
//
// /- start of composite
// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
// \- start of field end of field -/
type coreField struct {
Type Type
// The position of the field from the start of the composite type in bytes.
offset uint32
// The offset of the bitfield in bits from the start of the field.
bitfieldOffset Bits
// The size of the bitfield in bits.
//
// Zero if the field is not a bitfield.
bitfieldSize Bits
}
func adjustOffset(base uint32, t Type, n int) (uint32, error) {
size, err := Sizeof(t)
func (cf *coreField) adjustOffsetToNthElement(n int) error {
size, err := Sizeof(cf.Type)
if err != nil {
return 0, err
return err
}
return base + (uint32(n) * uint32(size) * 8), nil
cf.offset += uint32(n) * uint32(size)
return nil
}
func (cf *coreField) adjustOffsetBits(offset Bits) error {
align, err := alignof(cf.Type)
if err != nil {
return err
}
// We can compute the load offset by:
// 1) converting the bit offset to bytes with a flooring division.
// 2) dividing and multiplying that offset by the alignment, yielding the
// load size aligned offset.
offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
// The number of bits remaining is the bit offset less the number of bits
// we can "skip" with the aligned offset.
cf.bitfieldOffset = offset - Bits(offsetBytes*8)
// We know that cf.offset is aligned at to at least align since we get it
// from the compiler via BTF. Adding an aligned offsetBytes preserves the
// alignment.
cf.offset += offsetBytes
return nil
}
func (cf *coreField) sizeBits() (Bits, error) {
if cf.bitfieldSize > 0 {
return cf.bitfieldSize, nil
}
// Someone is trying to access a non-bitfield via a bit shift relocation.
// This happens when a field changes from a bitfield to a regular field
// between kernel versions. Synthesise the size to make the shifts work.
size, err := Sizeof(cf.Type)
if err != nil {
return 0, nil
}
return Bits(size * 8), nil
}
// coreFindField descends into the local type using the accessor and tries to
@@ -527,32 +607,33 @@ func adjustOffset(base uint32, t Type, n int) (uint32, error) {
//
// Returns the field and the offset of the field from the start of
// target in bits.
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
local := coreField{Type: localT}
target := coreField{Type: targetT}
// The first index is used to offset a pointer of the base type like
// when accessing an array.
localOffset, err := adjustOffset(0, local, localAcc[0])
if err != nil {
if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
return coreField{}, coreField{}, err
}
targetOffset, err := adjustOffset(0, target, localAcc[0])
if err != nil {
if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
return coreField{}, coreField{}, err
}
if err := coreAreMembersCompatible(local, target); err != nil {
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
var localMaybeFlex, targetMaybeFlex bool
for _, acc := range localAcc[1:] {
switch localType := local.(type) {
for i, acc := range localAcc[1:] {
switch localType := local.Type.(type) {
case composite:
// For composite types acc is used to find the field in the local type,
// and then we try to find a field in target with the same name.
localMembers := localType.members()
if acc >= len(localMembers) {
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
}
localMember := localMembers[acc]
@@ -563,13 +644,15 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
}
// This is an anonymous struct or union, ignore it.
local = localMember.Type
localOffset += localMember.OffsetBits
local = coreField{
Type: localMember.Type,
offset: local.offset + localMember.Offset.Bytes(),
}
localMaybeFlex = false
continue
}
targetType, ok := target.(composite)
targetType, ok := target.Type.(composite)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
}
@@ -579,20 +662,43 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
return coreField{}, coreField{}, err
}
if targetMember.BitfieldSize > 0 {
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
local = coreField{
Type: localMember.Type,
offset: local.offset,
bitfieldSize: localMember.BitfieldSize,
}
localMaybeFlex = acc == len(localMembers)-1
target = coreField{
Type: targetMember.Type,
offset: target.offset,
bitfieldSize: targetMember.BitfieldSize,
}
targetMaybeFlex = last
if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
local.offset += localMember.Offset.Bytes()
target.offset += targetMember.Offset.Bytes()
break
}
local = localMember.Type
localMaybeFlex = acc == len(localMembers)-1
localOffset += localMember.OffsetBits
target = targetMember.Type
targetMaybeFlex = last
targetOffset += targetMember.OffsetBits
// Either of the members is a bitfield. Make sure we're at the
// end of the accessor.
if next := i + 1; next < len(localAcc[1:]) {
return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
}
if err := local.adjustOffsetBits(localMember.Offset); err != nil {
return coreField{}, coreField{}, err
}
if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
return coreField{}, coreField{}, err
}
case *Array:
// For arrays, acc is the index in the target.
targetType, ok := target.(*Array)
targetType, ok := target.Type.(*Array)
if !ok {
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
}
@@ -611,17 +717,23 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
}
local = localType.Type
local = coreField{
Type: localType.Type,
offset: local.offset,
}
localMaybeFlex = false
localOffset, err = adjustOffset(localOffset, local, acc)
if err != nil {
if err := local.adjustOffsetToNthElement(acc); err != nil {
return coreField{}, coreField{}, err
}
target = targetType.Type
target = coreField{
Type: targetType.Type,
offset: target.offset,
}
targetMaybeFlex = false
targetOffset, err = adjustOffset(targetOffset, target, acc)
if err != nil {
if err := target.adjustOffsetToNthElement(acc); err != nil {
return coreField{}, coreField{}, err
}
@@ -629,12 +741,12 @@ func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreFie
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
}
if err := coreAreMembersCompatible(local, target); err != nil {
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, err
}
}
return coreField{local, localOffset}, coreField{target, targetOffset}, nil
return local, target, nil
}
// coreFindMember finds a member in a composite type while handling anonymous
@@ -646,7 +758,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
type offsetTarget struct {
composite
offset uint32
offset Bits
}
targets := []offsetTarget{{typ, 0}}
@@ -670,7 +782,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
for j, member := range members {
if member.Name == name {
// NB: This is safe because member is a copy.
member.OffsetBits += target.offset
member.Offset += target.offset
return member, j == len(members)-1, nil
}
@@ -685,7 +797,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
}
targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits})
targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
}
}
@@ -759,15 +871,9 @@ func coreAreTypesCompatible(localType Type, targetType Type) error {
}
switch lv := (localType).(type) {
case *Void, *Struct, *Union, *Enum, *Fwd:
case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
// Nothing to do here
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
case *Pointer, *Array:
depth++
localType.walk(&localTs)
@@ -849,7 +955,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error {
}
switch lv := localType.(type) {
case *Array, *Pointer, *Float:
case *Array, *Pointer, *Float, *Int:
return nil
case *Enum:
@@ -860,42 +966,7 @@ func coreAreMembersCompatible(localType Type, targetType Type) error {
tv := targetType.(*Fwd)
return doNamesMatch(lv.Name, tv.Name)
case *Int:
tv := targetType.(*Int)
if lv.isBitfield() || tv.isBitfield() {
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
}
return nil
default:
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
}
}
func skipQualifiersAndTypedefs(typ Type) (Type, error) {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
case *Typedef:
result = v.Type
default:
return result, nil
}
}
return nil, errors.New("exceeded type depth")
}
func skipQualifiers(typ Type) (Type, error) {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
default:
return result, nil
}
}
return nil, errors.New("exceeded type depth")
}

View File

@@ -2,7 +2,4 @@
//
// The canonical documentation lives in the Linux kernel repository and is
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
//
// The API is very much unstable. You should only use this via the main
// ebpf library.
package btf

721
vendor/github.com/cilium/ebpf/btf/ext_info.go generated vendored Normal file
View File

@@ -0,0 +1,721 @@
package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"sort"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
)
// ExtInfos contains ELF section metadata.
type ExtInfos struct {
// The slices are sorted by offset in ascending order.
funcInfos map[string][]funcInfo
lineInfos map[string][]lineInfo
relocationInfos map[string][]coreRelocationInfo
}
// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
//
// Returns an error wrapping ErrNotFound if no ext infos are present.
func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
section := file.Section(".BTF.ext")
if section == nil {
return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
}
if section.ReaderAt == nil {
return nil, fmt.Errorf("compressed ext_info is not supported")
}
return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
}
// loadExtInfos parses bare ext infos.
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
// the header structs, resulting in one syscall per header.
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
extHeader, err := parseBTFExtHeader(headerRd, bo)
if err != nil {
return nil, fmt.Errorf("parsing BTF extension header: %w", err)
}
coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
if err != nil {
return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
}
buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
btfFuncInfos, err := parseFuncInfos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF function info: %w", err)
}
funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
for section, bfis := range btfFuncInfos {
funcInfos[section], err = newFuncInfos(bfis, ts)
if err != nil {
return nil, fmt.Errorf("section %s: func infos: %w", section, err)
}
}
buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
btfLineInfos, err := parseLineInfos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF line info: %w", err)
}
lineInfos := make(map[string][]lineInfo, len(btfLineInfos))
for section, blis := range btfLineInfos {
lineInfos[section], err = newLineInfos(blis, strings)
if err != nil {
return nil, fmt.Errorf("section %s: line infos: %w", section, err)
}
}
if coreHeader == nil || coreHeader.COREReloLen == 0 {
return &ExtInfos{funcInfos, lineInfos, nil}, nil
}
var btfCORERelos map[string][]bpfCORERelo
buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
btfCORERelos, err = parseCORERelos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
}
coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
for section, brs := range btfCORERelos {
coreRelos[section], err = newRelocationInfos(brs, ts, strings)
if err != nil {
return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
}
}
return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
}
type funcInfoMeta struct{}
type coreRelocationMeta struct{}
// Assign per-section metadata from BTF to a section's instructions.
func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
funcInfos := ei.funcInfos[section]
lineInfos := ei.lineInfos[section]
reloInfos := ei.relocationInfos[section]
iter := insns.Iterate()
for iter.Next() {
if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
funcInfos = funcInfos[1:]
}
if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset {
*iter.Ins = iter.Ins.WithSource(lineInfos[0].line)
lineInfos = lineInfos[1:]
}
if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset {
iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo)
reloInfos = reloInfos[1:]
}
}
}
// MarshalExtInfos encodes function and line info embedded in insns into kernel
// wire format.
func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
iter := insns.Iterate()
var fiBuf, liBuf bytes.Buffer
for iter.Next() {
if fn := FuncMetadata(iter.Ins); fn != nil {
fi := &funcInfo{
fn: fn,
offset: iter.Offset,
}
if err := fi.marshal(&fiBuf, typeID); err != nil {
return nil, nil, fmt.Errorf("write func info: %w", err)
}
}
if line, ok := iter.Ins.Source().(*Line); ok {
li := &lineInfo{
line: line,
offset: iter.Offset,
}
if err := li.marshal(&liBuf); err != nil {
return nil, nil, fmt.Errorf("write line info: %w", err)
}
}
}
return fiBuf.Bytes(), liBuf.Bytes(), nil
}
// btfExtHeader is found at the start of the .BTF.ext section.
type btfExtHeader struct {
Magic uint16
Version uint8
Flags uint8
// HdrLen is larger than the size of struct btfExtHeader when it is
// immediately followed by a btfExtCOREHeader.
HdrLen uint32
FuncInfoOff uint32
FuncInfoLen uint32
LineInfoOff uint32
LineInfoLen uint32
}
// parseBTFExtHeader parses the header of the .BTF.ext section.
func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
var header btfExtHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
if int64(header.HdrLen) < int64(binary.Size(&header)) {
return nil, fmt.Errorf("header length shorter than btfExtHeader size")
}
return &header, nil
}
// funcInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its func_info entries.
func (h *btfExtHeader) funcInfoStart() int64 {
return int64(h.HdrLen + h.FuncInfoOff)
}
// lineInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its line_info entries.
func (h *btfExtHeader) lineInfoStart() int64 {
return int64(h.HdrLen + h.LineInfoOff)
}
// coreReloStart returns the offset from the beginning of the .BTF.ext section
// to the start of its CO-RE relocation entries.
func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
return int64(h.HdrLen + ch.COREReloOff)
}
// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
// field is larger than its size.
type btfExtCOREHeader struct {
COREReloOff uint32
COREReloLen uint32
}
// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
// header bytes are present, extHeader.HdrLen will be larger than the struct,
// indicating the presence of a CO-RE extension header.
func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
extHdrSize := int64(binary.Size(&extHeader))
remainder := int64(extHeader.HdrLen) - extHdrSize
if remainder == 0 {
return nil, nil
}
var coreHeader btfExtCOREHeader
if err := binary.Read(r, bo, &coreHeader); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
return &coreHeader, nil
}
type btfExtInfoSec struct {
SecNameOff uint32
NumInfo uint32
}
// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
// appearing within func_info and line_info sub-sections.
// These headers appear once for each program section in the ELF and are
// followed by one or more func/line_info records for the section.
func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err != nil {
return "", nil, fmt.Errorf("read ext info header: %w", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return "", nil, fmt.Errorf("get section name: %w", err)
}
if secName == "" {
return "", nil, fmt.Errorf("extinfo header refers to empty section name")
}
if infoHeader.NumInfo == 0 {
return "", nil, fmt.Errorf("section %s has zero records", secName)
}
return secName, &infoHeader, nil
}
// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
// or line_infos segment that describes the length of all extInfoRecords in
// that segment.
func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
const maxRecordSize = 256
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return 0, fmt.Errorf("can't read record size: %v", err)
}
if recordSize < 4 {
// Need at least InsnOff worth of bytes per record.
return 0, errors.New("record size too short")
}
if recordSize > maxRecordSize {
return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
}
return recordSize, nil
}
// The size of a FuncInfo in BTF wire format.
var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
type funcInfo struct {
fn *Func
offset asm.RawInstructionOffset
}
type bpfFuncInfo struct {
// Instruction offset of the function within an ELF section.
InsnOff uint32
TypeID TypeID
}
func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
typ, err := ts.ByID(fi.TypeID)
if err != nil {
return nil, err
}
fn, ok := typ.(*Func)
if !ok {
return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
}
// C doesn't have anonymous functions, but check just in case.
if fn.Name == "" {
return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
}
return &funcInfo{
fn,
asm.RawInstructionOffset(fi.InsnOff),
}, nil
}
func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
fis := make([]funcInfo, 0, len(bfis))
for _, bfi := range bfis {
fi, err := newFuncInfo(bfi, ts)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
}
fis = append(fis, *fi)
}
sort.Slice(fis, func(i, j int) bool {
return fis[i].offset <= fis[j].offset
})
return fis, nil
}
// marshal into the BTF wire format.
func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
id, err := typeID(fi.fn)
if err != nil {
return err
}
bfi := bpfFuncInfo{
InsnOff: uint32(fi.offset),
TypeID: id,
}
return binary.Write(w, internal.NativeEndian, &bfi)
}
// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
// func infos indexed by section name.
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string][]bpfFuncInfo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
// These records appear after a btf_ext_info_sec header in the func_info
// sub-section of .BTF.ext.
func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) {
var out []bpfFuncInfo
var fi bpfFuncInfo
if exp, got := FuncInfoSize, recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
}
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &fi); err != nil {
return nil, fmt.Errorf("can't read function info: %v", err)
}
if fi.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
fi.InsnOff /= asm.InstructionSize
out = append(out, fi)
}
return out, nil
}
var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
// Line represents the location and contents of a single line of source
// code a BPF ELF was compiled from.
type Line struct {
fileName string
line string
lineNumber uint32
lineColumn uint32
// TODO: We should get rid of the fields below, but for that we need to be
// able to write BTF.
fileNameOff uint32
lineOff uint32
}
func (li *Line) FileName() string {
return li.fileName
}
func (li *Line) Line() string {
return li.line
}
func (li *Line) LineNumber() uint32 {
return li.lineNumber
}
func (li *Line) LineColumn() uint32 {
return li.lineColumn
}
func (li *Line) String() string {
return li.line
}
type lineInfo struct {
line *Line
offset asm.RawInstructionOffset
}
// Constants for the format of bpfLineInfo.LineCol.
const (
bpfLineShift = 10
bpfLineMax = (1 << (32 - bpfLineShift)) - 1
bpfColumnMax = (1 << bpfLineShift) - 1
)
type bpfLineInfo struct {
// Instruction offset of the line within the whole instruction stream, in instructions.
InsnOff uint32
FileNameOff uint32
LineOff uint32
LineCol uint32
}
func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
line, err := strings.Lookup(li.LineOff)
if err != nil {
return nil, fmt.Errorf("lookup of line: %w", err)
}
fileName, err := strings.Lookup(li.FileNameOff)
if err != nil {
return nil, fmt.Errorf("lookup of filename: %w", err)
}
lineNumber := li.LineCol >> bpfLineShift
lineColumn := li.LineCol & bpfColumnMax
return &lineInfo{
&Line{
fileName,
line,
lineNumber,
lineColumn,
li.FileNameOff,
li.LineOff,
},
asm.RawInstructionOffset(li.InsnOff),
}, nil
}
func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) {
lis := make([]lineInfo, 0, len(blis))
for _, bli := range blis {
li, err := newLineInfo(bli, strings)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
}
lis = append(lis, *li)
}
sort.Slice(lis, func(i, j int) bool {
return lis[i].offset <= lis[j].offset
})
return lis, nil
}
// marshal writes the binary representation of the LineInfo to w.
func (li *lineInfo) marshal(w io.Writer) error {
line := li.line
if line.lineNumber > bpfLineMax {
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
}
if line.lineColumn > bpfColumnMax {
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
}
bli := bpfLineInfo{
uint32(li.offset),
line.fileNameOff,
line.lineOff,
(line.lineNumber << bpfLineShift) | line.lineColumn,
}
return binary.Write(w, internal.NativeEndian, &bli)
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
// line infos indexed by section name.
func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string][]bpfLineInfo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
// These records appear after a btf_ext_info_sec header in the line_info
// sub-section of .BTF.ext.
func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) {
var out []bpfLineInfo
var li bpfLineInfo
if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
}
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &li); err != nil {
return nil, fmt.Errorf("can't read line info: %v", err)
}
if li.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
li.InsnOff /= asm.InstructionSize
out = append(out, li)
}
return out, nil
}
// bpfCORERelo matches the kernel's struct bpf_core_relo.
type bpfCORERelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
Kind coreKind
}
type CORERelocation struct {
typ Type
accessor coreAccessor
kind coreKind
}
func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
return relo
}
type coreRelocationInfo struct {
relo *CORERelocation
offset asm.RawInstructionOffset
}
func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
typ, err := ts.ByID(relo.TypeID)
if err != nil {
return nil, err
}
accessorStr, err := strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCOREAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
return &coreRelocationInfo{
&CORERelocation{
typ,
accessor,
relo.Kind,
},
asm.RawInstructionOffset(relo.InsnOff),
}, nil
}
func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
rs := make([]coreRelocationInfo, 0, len(brs))
for _, br := range brs {
relo, err := newRelocationInfo(br, ts, strings)
if err != nil {
return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
}
rs = append(rs, *relo)
}
sort.Slice(rs, func(i, j int) bool {
return rs[i].offset < rs[j].offset
})
return rs, nil
}
var extInfoReloSize = binary.Size(bpfCORERelo{})
// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
// CO-RE relocations indexed by section name.
func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
if recordSize != uint32(extInfoReloSize) {
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string][]bpfCORERelo)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
// coreRelos. These records appear after a btf_ext_info_sec header in the
// core_relos sub-section of .BTF.ext.
func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
var out []bpfCORERelo
var relo bpfCORERelo
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &relo); err != nil {
return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
}
if relo.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
}
// ELF tracks offset in bytes, the kernel expects raw BPF instructions.
// Convert as early as possible.
relo.InsnOff /= asm.InstructionSize
out = append(out, relo)
}
return out, nil
}

View File

@@ -63,12 +63,7 @@ func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
return fmt.Errorf("need a name for type %s", typ)
}
typ, err := skipQualifiers(typ)
if err != nil {
return err
}
switch v := typ.(type) {
switch v := skipQualifiers(typ).(type) {
case *Enum:
fmt.Fprintf(&gf.w, "type %s int32", name)
if len(v.Values) == 0 {
@@ -83,10 +78,11 @@ func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
gf.w.WriteString(")")
return nil
}
default:
fmt.Fprintf(&gf.w, "type %s ", name)
return gf.writeTypeLit(typ, 0)
return gf.writeTypeLit(v, 0)
}
}
// writeType outputs the name of a named type or a literal describing the type.
@@ -96,10 +92,7 @@ func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
// foo (if foo is a named type)
// uint32
func (gf *GoFormatter) writeType(typ Type, depth int) error {
typ, err := skipQualifiers(typ)
if err != nil {
return err
}
typ = skipQualifiers(typ)
name := gf.Names[typ]
if name != "" {
@@ -124,12 +117,8 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
return errNestedTooDeep
}
typ, err := skipQualifiers(typ)
if err != nil {
return err
}
switch v := typ.(type) {
var err error
switch v := skipQualifiers(typ).(type) {
case *Int:
gf.writeIntLit(v)
@@ -154,7 +143,7 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
err = gf.writeDatasecLit(v, depth)
default:
return fmt.Errorf("type %s: %w", typ, ErrNotSupported)
return fmt.Errorf("type %T: %w", v, ErrNotSupported)
}
if err != nil {
@@ -190,7 +179,7 @@ func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int)
continue
}
offset := m.OffsetBits / 8
offset := m.Offset.Bytes()
if n := offset - prevOffset; skippedBitfield && n > 0 {
fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
} else {
@@ -217,8 +206,8 @@ func (gf *GoFormatter) writeStructField(m Member, depth int) error {
if m.BitfieldSize > 0 {
return fmt.Errorf("bitfields are not supported")
}
if m.OffsetBits%8 != 0 {
return fmt.Errorf("unsupported offset %d", m.OffsetBits)
if m.Offset%8 != 0 {
return fmt.Errorf("unsupported offset %d", m.Offset)
}
if m.Name == "" {
@@ -302,3 +291,16 @@ func (gf *GoFormatter) writePadding(bytes uint32) {
fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
}
}
func skipQualifiers(typ Type) Type {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
default:
return result
}
}
return &cycle{typ}
}

112
vendor/github.com/cilium/ebpf/btf/strings.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
package btf
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
)
type stringTable struct {
offsets []uint32
strings []string
}
// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
type sizedReader interface {
io.Reader
Size() int64
}
func readStringTable(r sizedReader) (*stringTable, error) {
// Derived from vmlinux BTF.
const averageStringLength = 16
n := int(r.Size() / averageStringLength)
offsets := make([]uint32, 0, n)
strings := make([]string, 0, n)
offset := uint32(0)
scanner := bufio.NewScanner(r)
scanner.Split(splitNull)
for scanner.Scan() {
str := scanner.Text()
offsets = append(offsets, offset)
strings = append(strings, str)
offset += uint32(len(str)) + 1
}
if err := scanner.Err(); err != nil {
return nil, err
}
if len(strings) == 0 {
return nil, errors.New("string table is empty")
}
if strings[0] != "" {
return nil, errors.New("first item in string table is non-empty")
}
return &stringTable{offsets, strings}, nil
}
func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
i := bytes.IndexByte(data, 0)
if i == -1 {
if atEOF && len(data) > 0 {
return 0, nil, errors.New("string table isn't null terminated")
}
return 0, nil, nil
}
return i + 1, data[:i], nil
}
func (st *stringTable) Lookup(offset uint32) (string, error) {
i := search(st.offsets, offset)
if i == len(st.offsets) || st.offsets[i] != offset {
return "", fmt.Errorf("offset %d isn't start of a string", offset)
}
return st.strings[i], nil
}
func (st *stringTable) Length() int {
last := len(st.offsets) - 1
return int(st.offsets[last]) + len(st.strings[last]) + 1
}
func (st *stringTable) Marshal(w io.Writer) error {
for _, str := range st.strings {
_, err := io.WriteString(w, str)
if err != nil {
return err
}
_, err = w.Write([]byte{0})
if err != nil {
return err
}
}
return nil
}
// search is a copy of sort.Search specialised for uint32.
//
// Licensed under https://go.dev/LICENSE
func search(ints []uint32, needle uint32) int {
// Define f(-1) == false and f(n) == true.
// Invariant: f(i-1) == false, f(j) == true.
i, j := 0, len(ints)
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if !(ints[h] >= needle) {
i = h + 1 // preserves f(i-1) == false
} else {
j = h // preserves f(j) == true
}
}
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}

View File

@@ -2,8 +2,12 @@ package btf
import (
"fmt"
"io"
"math"
"reflect"
"strings"
"github.com/cilium/ebpf/asm"
)
const maxTypeDepth = 32
@@ -11,15 +15,18 @@ const maxTypeDepth = 32
// TypeID identifies a type in a BTF section.
type TypeID uint32
// ID implements part of the Type interface.
func (tid TypeID) ID() TypeID {
return tid
}
// Type represents a type described by BTF.
type Type interface {
// The type ID of the Type within this BTF spec.
ID() TypeID
// Type can be formatted using the %s and %v verbs. %s outputs only the
// identity of the type, without any detail. %v outputs additional detail.
//
// Use the '+' flag to include the address of the type.
//
// Use the width to specify how many levels of detail to output, for example
// %1v will output detail for the root type and a short description of its
// children. %2v would output details of the root type and its children
// as well as a short description of the grandchildren.
fmt.Formatter
// Name of the type, empty for anonymous types and types that cannot
// carry a name, like Void and Pointer.
@@ -31,8 +38,6 @@ type Type interface {
// Enumerate all nested Types. Repeated calls must visit nested
// types in the same order.
walk(*typeDeque)
String() string
}
var (
@@ -48,11 +53,22 @@ var (
_ Type = (*Float)(nil)
)
// types is a list of Type.
//
// The order determines the ID of a type.
type types []Type
func (ts types) ByID(id TypeID) (Type, error) {
if int(id) > len(ts) {
return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
}
return ts[id], nil
}
// Void is the unit type of BTF.
type Void struct{}
func (v *Void) ID() TypeID { return 0 }
func (v *Void) String() string { return "void#0" }
func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
func (v *Void) TypeName() string { return "" }
func (v *Void) size() uint32 { return 0 }
func (v *Void) copy() Type { return (*Void)(nil) }
@@ -78,44 +94,34 @@ func (ie IntEncoding) IsBool() bool {
return ie&Bool != 0
}
// Int is an integer of a given length.
type Int struct {
TypeID
func (ie IntEncoding) String() string {
switch {
case ie.IsChar() && ie.IsSigned():
return "char"
case ie.IsChar() && !ie.IsSigned():
return "uchar"
case ie.IsBool():
return "bool"
case ie.IsSigned():
return "signed"
default:
return "unsigned"
}
}
// Int is an integer of a given length.
//
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
type Int struct {
Name string
// The size of the integer in bytes.
Size uint32
Encoding IntEncoding
// OffsetBits is the starting bit offset. Currently always 0.
// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
OffsetBits uint32
Bits byte
}
func (i *Int) String() string {
var s strings.Builder
switch {
case i.Encoding.IsChar():
s.WriteString("char")
case i.Encoding.IsBool():
s.WriteString("bool")
default:
if !i.Encoding.IsSigned() {
s.WriteRune('u')
}
s.WriteString("int")
fmt.Fprintf(&s, "%d", i.Size*8)
}
fmt.Fprintf(&s, "#%d", i.TypeID)
if i.Bits > 0 {
fmt.Fprintf(&s, "[bits=%d]", i.Bits)
}
return s.String()
func (i *Int) Format(fs fmt.State, verb rune) {
formatType(fs, verb, i, i.Encoding, "size=", i.Size*8)
}
func (i *Int) TypeName() string { return i.Name }
@@ -126,18 +132,13 @@ func (i *Int) copy() Type {
return &cpy
}
func (i *Int) isBitfield() bool {
return i.OffsetBits > 0
}
// Pointer is a pointer to another type.
type Pointer struct {
TypeID
Target Type
}
func (p *Pointer) String() string {
return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
func (p *Pointer) Format(fs fmt.State, verb rune) {
formatType(fs, verb, p, "target=", p.Target)
}
func (p *Pointer) TypeName() string { return "" }
@@ -150,13 +151,12 @@ func (p *Pointer) copy() Type {
// Array is an array with a fixed number of elements.
type Array struct {
TypeID
Type Type
Nelems uint32
}
func (arr *Array) String() string {
return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
func (arr *Array) Format(fs fmt.State, verb rune) {
formatType(fs, verb, arr, "type=", arr.Type, "n=", arr.Nelems)
}
func (arr *Array) TypeName() string { return "" }
@@ -169,15 +169,14 @@ func (arr *Array) copy() Type {
// Struct is a compound type of consecutive members.
type Struct struct {
TypeID
Name string
// The size of the struct including padding, in bytes
Size uint32
Members []Member
}
func (s *Struct) String() string {
return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
func (s *Struct) Format(fs fmt.State, verb rune) {
formatType(fs, verb, s, "fields=", len(s.Members))
}
func (s *Struct) TypeName() string { return s.Name }
@@ -202,15 +201,14 @@ func (s *Struct) members() []Member {
// Union is a compound type where members occupy the same memory.
type Union struct {
TypeID
Name string
// The size of the union including padding, in bytes.
Size uint32
Members []Member
}
func (u *Union) String() string {
return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
func (u *Union) Format(fs fmt.State, verb rune) {
formatType(fs, verb, u, "fields=", len(u.Members))
}
func (u *Union) TypeName() string { return u.Name }
@@ -248,26 +246,32 @@ var (
_ composite = (*Union)(nil)
)
// A value in bits.
type Bits uint32
// Bytes converts a bit value into bytes.
func (b Bits) Bytes() uint32 {
return uint32(b / 8)
}
// Member is part of a Struct or Union.
//
// It is not a valid Type.
type Member struct {
Name string
Type Type
// OffsetBits is the bit offset of this member.
OffsetBits uint32
BitfieldSize uint32
Offset Bits
BitfieldSize Bits
}
// Enum lists possible values.
type Enum struct {
TypeID
Name string
Values []EnumValue
}
func (e *Enum) String() string {
return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
func (e *Enum) Format(fs fmt.State, verb rune) {
formatType(fs, verb, e, "values=", len(e.Values))
}
func (e *Enum) TypeName() string { return e.Name }
@@ -311,13 +315,12 @@ func (fk FwdKind) String() string {
// Fwd is a forward declaration of a Type.
type Fwd struct {
TypeID
Name string
Kind FwdKind
}
func (f *Fwd) String() string {
return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
func (f *Fwd) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Kind)
}
func (f *Fwd) TypeName() string { return f.Name }
@@ -330,13 +333,12 @@ func (f *Fwd) copy() Type {
// Typedef is an alias of a Type.
type Typedef struct {
TypeID
Name string
Type Type
}
func (td *Typedef) String() string {
return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
func (td *Typedef) Format(fs fmt.State, verb rune) {
formatType(fs, verb, td, td.Type)
}
func (td *Typedef) TypeName() string { return td.Name }
@@ -349,12 +351,11 @@ func (td *Typedef) copy() Type {
// Volatile is a qualifier.
type Volatile struct {
TypeID
Type Type
}
func (v *Volatile) String() string {
return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
func (v *Volatile) Format(fs fmt.State, verb rune) {
formatType(fs, verb, v, v.Type)
}
func (v *Volatile) TypeName() string { return "" }
@@ -368,12 +369,11 @@ func (v *Volatile) copy() Type {
// Const is a qualifier.
type Const struct {
TypeID
Type Type
}
func (c *Const) String() string {
return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
func (c *Const) Format(fs fmt.State, verb rune) {
formatType(fs, verb, c, c.Type)
}
func (c *Const) TypeName() string { return "" }
@@ -387,12 +387,11 @@ func (c *Const) copy() Type {
// Restrict is a qualifier.
type Restrict struct {
TypeID
Type Type
}
func (r *Restrict) String() string {
return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
func (r *Restrict) Format(fs fmt.State, verb rune) {
formatType(fs, verb, r, r.Type)
}
func (r *Restrict) TypeName() string { return "" }
@@ -406,14 +405,18 @@ func (r *Restrict) copy() Type {
// Func is a function definition.
type Func struct {
TypeID
Name string
Type Type
Linkage FuncLinkage
}
func (f *Func) String() string {
return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID())
func FuncMetadata(ins *asm.Instruction) *Func {
fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
return fn
}
func (f *Func) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
}
func (f *Func) TypeName() string { return f.Name }
@@ -426,19 +429,12 @@ func (f *Func) copy() Type {
// FuncProto is a function declaration.
type FuncProto struct {
TypeID
Return Type
Params []FuncParam
}
func (fp *FuncProto) String() string {
var s strings.Builder
fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
for _, param := range fp.Params {
fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
}
fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
return s.String()
func (fp *FuncProto) Format(fs fmt.State, verb rune) {
formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
}
func (fp *FuncProto) TypeName() string { return "" }
@@ -464,14 +460,13 @@ type FuncParam struct {
// Var is a global variable.
type Var struct {
TypeID
Name string
Type Type
Linkage VarLinkage
}
func (v *Var) String() string {
return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name)
func (v *Var) Format(fs fmt.State, verb rune) {
formatType(fs, verb, v, v.Linkage)
}
func (v *Var) TypeName() string { return v.Name }
@@ -484,14 +479,13 @@ func (v *Var) copy() Type {
// Datasec is a global program section containing data.
type Datasec struct {
TypeID
Name string
Size uint32
Vars []VarSecinfo
}
func (ds *Datasec) String() string {
return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
func (ds *Datasec) Format(fs fmt.State, verb rune) {
formatType(fs, verb, ds)
}
func (ds *Datasec) TypeName() string { return ds.Name }
@@ -522,15 +516,14 @@ type VarSecinfo struct {
// Float is a float of a given length.
type Float struct {
TypeID
Name string
// The size of the float in bytes.
Size uint32
}
func (f *Float) String() string {
return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name)
func (f *Float) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, "size=", f.Size*8)
}
func (f *Float) TypeName() string { return f.Name }
@@ -541,6 +534,20 @@ func (f *Float) copy() Type {
return &cpy
}
// cycle is a type which had to be elided since it exceeded maxTypeDepth.
type cycle struct {
root Type
}
func (c *cycle) ID() TypeID { return math.MaxUint32 }
func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
func (c *cycle) TypeName() string { return "" }
func (c *cycle) walk(*typeDeque) {}
func (c *cycle) copy() Type {
cpy := *c
return &cpy
}
type sizer interface {
size() uint32
}
@@ -616,44 +623,54 @@ func Sizeof(typ Type) (int, error) {
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
}
// Copy a Type recursively.
func Copy(typ Type) Type {
typ, _ = copyType(typ, nil)
return typ
// alignof returns the alignment of a type.
//
// Currently only supports the subset of types necessary for bitfield relocations.
func alignof(typ Type) (int, error) {
switch t := UnderlyingType(typ).(type) {
case *Enum:
return int(t.size()), nil
case *Int:
return int(t.Size), nil
default:
return 0, fmt.Errorf("can't calculate alignment of %T", t)
}
}
// copy a Type recursively.
// Transformer modifies a given Type and returns the result.
//
// typ may form a cycle.
// For example, UnderlyingType removes any qualifiers or typedefs from a type.
// See the example on Copy for how to use a transform.
type Transformer func(Type) Type
// Copy a Type recursively.
//
// Returns any errors from transform verbatim.
func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) {
// typ may form a cycle. If transform is not nil, it is called with the
// to be copied type, and the returned value is copied instead.
func Copy(typ Type, transform Transformer) Type {
copies := make(copier)
return typ, copies.copy(&typ, transform)
copies.copy(&typ, transform)
return typ
}
// copy a slice of Types recursively.
//
// Types may form a cycle.
//
// Returns any errors from transform verbatim.
func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) {
// See Copy for the semantics.
func copyTypes(types []Type, transform Transformer) []Type {
result := make([]Type, len(types))
copy(result, types)
copies := make(copier)
for i := range result {
if err := copies.copy(&result[i], transform); err != nil {
return nil, err
}
copies.copy(&result[i], transform)
}
return result, nil
return result
}
type copier map[Type]Type
func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error {
func (c copier) copy(typ *Type, transform Transformer) {
var work typeDeque
for t := typ; t != nil; t = work.pop() {
// *t is the identity of the type.
@@ -664,11 +681,7 @@ func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error {
var cpy Type
if transform != nil {
tf, err := transform(*t)
if err != nil {
return fmt.Errorf("copy %s: %w", *t, err)
}
cpy = tf.copy()
cpy = transform(*t).copy()
} else {
cpy = (*t).copy()
}
@@ -679,8 +692,6 @@ func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error {
// Mark any nested types for copying.
cpy.walk(&work)
}
return nil
}
// typeDeque keeps track of pointers to types which still
@@ -763,18 +774,52 @@ func (dq *typeDeque) all() []*Type {
// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
// the same name. A Type may form a cyclic graph by pointing at itself.
func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[essentialName][]Type, err error) {
func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable) ([]Type, error) {
types := make([]Type, 0, len(rawTypes)+1)
types = append(types, (*Void)(nil))
type fixupDef struct {
id TypeID
expectedKind btfKind
typ *Type
}
var fixups []fixupDef
fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
fixups = append(fixups, fixupDef{id, expectedKind, typ})
fixup := func(id TypeID, typ *Type) {
if id < TypeID(len(types)) {
// We've already inflated this type, fix it up immediately.
*typ = types[id]
return
}
fixups = append(fixups, fixupDef{id, typ})
}
type assertion struct {
typ *Type
want reflect.Type
}
var assertions []assertion
assert := func(typ *Type, want reflect.Type) error {
if *typ != nil {
// The type has already been fixed up, check the type immediately.
if reflect.TypeOf(*typ) != want {
return fmt.Errorf("expected %s, got %T", want, *typ)
}
return nil
}
assertions = append(assertions, assertion{typ, want})
return nil
}
type bitfieldFixupDef struct {
id TypeID
m *Member
}
var (
legacyBitfields = make(map[TypeID][2]Bits) // offset, size
bitfieldFixups []bitfieldFixupDef
)
convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
// NB: The fixup below relies on pre-allocating this array to
// work, since otherwise append might re-allocate members.
@@ -784,26 +829,52 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
if err != nil {
return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
}
m := Member{
members = append(members, Member{
Name: name,
OffsetBits: btfMember.Offset,
}
Offset: Bits(btfMember.Offset),
})
m := &members[i]
fixup(raw[i].Type, &m.Type)
if kindFlag {
m.BitfieldSize = btfMember.Offset >> 24
m.OffsetBits &= 0xffffff
m.BitfieldSize = Bits(btfMember.Offset >> 24)
m.Offset &= 0xffffff
// We ignore legacy bitfield definitions if the current composite
// is a new-style bitfield. This is kind of safe since offset and
// size on the type of the member must be zero if kindFlat is set
// according to spec.
continue
}
members = append(members, m)
// This may be a legacy bitfield, try to fix it up.
data, ok := legacyBitfields[raw[i].Type]
if ok {
// Bingo!
m.Offset += data[0]
m.BitfieldSize = data[1]
continue
}
for i := range members {
fixup(raw[i].Type, kindUnknown, &members[i].Type)
if m.Type != nil {
// We couldn't find a legacy bitfield, but we know that the member's
// type has already been inflated. Hence we know that it can't be
// a legacy bitfield and there is nothing left to do.
continue
}
// We don't have fixup data, and the type we're pointing
// at hasn't been inflated yet. No choice but to defer
// the fixup.
bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{
raw[i].Type,
m,
})
}
return members, nil
}
types = make([]Type, 0, len(rawTypes))
types = append(types, (*Void)(nil))
namedTypes = make(map[essentialName][]Type)
for i, raw := range rawTypes {
var (
// Void is defined to always be type ID 0, and is thus
@@ -814,17 +885,21 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
name, err := rawStrings.Lookup(raw.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
return nil, fmt.Errorf("get name for type id %d: %w", id, err)
}
switch raw.Kind() {
case kindInt:
size := raw.Size()
encoding, offset, bits := intEncoding(*raw.data.(*uint32))
typ = &Int{id, name, raw.Size(), encoding, offset, bits}
if offset > 0 || bits.Bytes() != size {
legacyBitfields[id] = [2]Bits{offset, bits}
}
typ = &Int{name, size, encoding}
case kindPointer:
ptr := &Pointer{id, nil}
fixup(raw.Type(), kindUnknown, &ptr.Target)
ptr := &Pointer{nil}
fixup(raw.Type(), &ptr.Target)
typ = ptr
case kindArray:
@@ -832,23 +907,23 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
// IndexType is unused according to btf.rst.
// Don't make it available right now.
arr := &Array{id, nil, btfArr.Nelems}
fixup(btfArr.Type, kindUnknown, &arr.Type)
arr := &Array{nil, btfArr.Nelems}
fixup(btfArr.Type, &arr.Type)
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{id, name, raw.Size(), members}
typ = &Struct{name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
if err != nil {
return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
typ = &Union{id, name, raw.Size(), members}
typ = &Union{name, raw.Size(), members}
case kindEnum:
rawvals := raw.data.([]btfEnum)
@@ -856,45 +931,48 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
}
vals = append(vals, EnumValue{
Name: name,
Value: btfVal.Val,
})
}
typ = &Enum{id, name, vals}
typ = &Enum{name, vals}
case kindForward:
if raw.KindFlag() {
typ = &Fwd{id, name, FwdUnion}
typ = &Fwd{name, FwdUnion}
} else {
typ = &Fwd{id, name, FwdStruct}
typ = &Fwd{name, FwdStruct}
}
case kindTypedef:
typedef := &Typedef{id, name, nil}
fixup(raw.Type(), kindUnknown, &typedef.Type)
typedef := &Typedef{name, nil}
fixup(raw.Type(), &typedef.Type)
typ = typedef
case kindVolatile:
volatile := &Volatile{id, nil}
fixup(raw.Type(), kindUnknown, &volatile.Type)
volatile := &Volatile{nil}
fixup(raw.Type(), &volatile.Type)
typ = volatile
case kindConst:
cnst := &Const{id, nil}
fixup(raw.Type(), kindUnknown, &cnst.Type)
cnst := &Const{nil}
fixup(raw.Type(), &cnst.Type)
typ = cnst
case kindRestrict:
restrict := &Restrict{id, nil}
fixup(raw.Type(), kindUnknown, &restrict.Type)
restrict := &Restrict{nil}
fixup(raw.Type(), &restrict.Type)
typ = restrict
case kindFunc:
fn := &Func{id, name, nil, raw.Linkage()}
fixup(raw.Type(), kindFuncProto, &fn.Type)
fn := &Func{name, nil, raw.Linkage()}
fixup(raw.Type(), &fn.Type)
if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
return nil, err
}
typ = fn
case kindFuncProto:
@@ -903,24 +981,24 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
for i, param := range rawparams {
name, err := rawStrings.Lookup(param.NameOff)
if err != nil {
return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
}
params = append(params, FuncParam{
Name: name,
})
}
for i := range params {
fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
fixup(rawparams[i].Type, &params[i].Type)
}
fp := &FuncProto{id, nil, params}
fixup(raw.Type(), kindUnknown, &fp.Return)
fp := &FuncProto{nil, params}
fixup(raw.Type(), &fp.Return)
typ = fp
case kindVar:
variable := raw.data.(*btfVariable)
v := &Var{id, name, nil, VarLinkage(variable.Linkage)}
fixup(raw.Type(), kindUnknown, &v.Type)
v := &Var{name, nil, VarLinkage(variable.Linkage)}
fixup(raw.Type(), &v.Type)
typ = v
case kindDatasec:
@@ -933,44 +1011,48 @@ func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type,
})
}
for i := range vars {
fixup(btfVars[i].Type, kindVar, &vars[i].Type)
fixup(btfVars[i].Type, &vars[i].Type)
if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
return nil, err
}
typ = &Datasec{id, name, raw.SizeType, vars}
}
typ = &Datasec{name, raw.SizeType, vars}
case kindFloat:
typ = &Float{id, name, raw.Size()}
typ = &Float{name, raw.Size()}
default:
return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
types = append(types, typ)
if name := newEssentialName(typ.TypeName()); name != "" {
namedTypes[name] = append(namedTypes[name], typ)
}
}
for _, fixup := range fixups {
i := int(fixup.id)
if i >= len(types) {
return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
// Default void (id 0) to unknown
rawKind := kindUnknown
if i > 0 {
rawKind = rawTypes[i-1].Kind()
}
if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
}
*fixup.typ = types[i]
}
return types, namedTypes, nil
for _, bitfieldFixup := range bitfieldFixups {
data, ok := legacyBitfields[bitfieldFixup.id]
if ok {
// This is indeed a legacy bitfield, fix it up.
bitfieldFixup.m.Offset += data[0]
bitfieldFixup.m.BitfieldSize = data[1]
}
}
for _, assertion := range assertions {
if reflect.TypeOf(*assertion.typ) != assertion.want {
return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
}
}
return types, nil
}
// essentialName represents the name of a BTF type stripped of any flavor
@@ -984,9 +1066,116 @@ type essentialName string
// in a type name is ignored for the purpose of finding a candidate type
// in the kernel's BTF.
func newEssentialName(name string) essentialName {
if name == "" {
return ""
}
lastIdx := strings.LastIndex(name, "___")
if lastIdx > 0 {
return essentialName(name[:lastIdx])
}
return essentialName(name)
}
// UnderlyingType skips qualifiers and Typedefs.
func UnderlyingType(typ Type) Type {
result := typ
for depth := 0; depth <= maxTypeDepth; depth++ {
switch v := (result).(type) {
case qualifier:
result = v.qualify()
case *Typedef:
result = v.Type
default:
return result
}
}
return &cycle{typ}
}
type formatState struct {
fmt.State
depth int
}
// formattableType is a subset of Type, to ease unit testing of formatType.
type formattableType interface {
fmt.Formatter
TypeName() string
}
// formatType formats a type in a canonical form.
//
// Handles cyclical types by only printing cycles up to a certain depth. Elements
// in extra are separated by spaces unless the preceding element is a string
// ending in '='.
func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
if verb != 'v' && verb != 's' {
fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
return
}
// This is the same as %T, but elides the package name. Assumes that
// formattableType is implemented by a pointer receiver.
goTypeName := reflect.TypeOf(t).Elem().Name()
_, _ = io.WriteString(f, goTypeName)
if name := t.TypeName(); name != "" {
// Output BTF type name if present.
fmt.Fprintf(f, ":%q", name)
}
if f.Flag('+') {
// Output address if requested.
fmt.Fprintf(f, ":%#p", t)
}
if verb == 's' {
// %s omits details.
return
}
var depth int
if ps, ok := f.(*formatState); ok {
depth = ps.depth
f = ps.State
}
maxDepth, ok := f.Width()
if !ok {
maxDepth = 0
}
if depth > maxDepth {
// We've reached the maximum depth. This avoids infinite recursion even
// for cyclical types.
return
}
if len(extra) == 0 {
return
}
wantSpace := false
_, _ = io.WriteString(f, "[")
for _, arg := range extra {
if wantSpace {
_, _ = io.WriteString(f, " ")
}
switch v := arg.(type) {
case string:
_, _ = io.WriteString(f, v)
wantSpace = len(v) > 0 && v[len(v)-1] != '='
continue
case formattableType:
v.Format(&formatState{f, depth + 1}, verb)
default:
fmt.Fprint(f, arg)
}
wantSpace = true
}
_, _ = io.WriteString(f, "]")
}

View File

@@ -4,14 +4,11 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"reflect"
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/btf"
)
// CollectionOptions control loading a collection into the kernel.
@@ -20,6 +17,17 @@ import (
type CollectionOptions struct {
Maps MapOptions
Programs ProgramOptions
// MapReplacements takes a set of Maps that will be used instead of
// creating new ones when loading the CollectionSpec.
//
// For each given Map, there must be a corresponding MapSpec in
// CollectionSpec.Maps, and its type, key/value size, max entries and flags
// must match the values of the MapSpec.
//
// The given Maps are Clone()d before being used in the Collection, so the
// caller can Close() them freely when they are no longer needed.
MapReplacements map[string]*Map
}
// CollectionSpec describes a collection.
@@ -27,6 +35,10 @@ type CollectionSpec struct {
Maps map[string]*MapSpec
Programs map[string]*ProgramSpec
// Types holds type information about Maps and Programs.
// Modifications to Types are currently undefined behaviour.
Types *btf.Spec
// ByteOrder specifies whether the ELF was compiled for
// big-endian or little-endian architectures.
ByteOrder binary.ByteOrder
@@ -42,6 +54,7 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
Maps: make(map[string]*MapSpec, len(cs.Maps)),
Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
ByteOrder: cs.ByteOrder,
Types: cs.Types,
}
for name, spec := range cs.Maps {
@@ -61,19 +74,21 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
//
// Returns an error if a named map isn't used in at least one program.
//
// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection
// instead.
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
for symbol, m := range maps {
// have we seen a program that uses this symbol / map
seen := false
fd := m.FD()
for progName, progSpec := range cs.Programs {
err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
err := progSpec.Instructions.AssociateMap(symbol, m)
switch {
case err == nil:
seen = true
case asm.IsUnreferencedSymbol(err):
case errors.Is(err, asm.ErrUnreferencedSymbol):
// Not all programs need to use the map
default:
@@ -107,34 +122,67 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
//
// Returns an error if a constant doesn't exist.
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
rodata := cs.Maps[".rodata"]
if rodata == nil {
return errors.New("missing .rodata section")
replaced := make(map[string]bool)
for name, spec := range cs.Maps {
if !strings.HasPrefix(name, ".rodata") {
continue
}
if rodata.BTF == nil {
return errors.New(".rodata section has no BTF")
b, ds, err := spec.dataSection()
if errors.Is(err, errMapNoBTFValue) {
// Data sections without a BTF Datasec are valid, but don't support
// constant replacements.
continue
}
if n := len(rodata.Contents); n != 1 {
return fmt.Errorf("expected one key in .rodata, found %d", n)
}
kv := rodata.Contents[0]
value, ok := kv.Value.([]byte)
if !ok {
return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
}
buf := make([]byte, len(value))
copy(buf, value)
err := patchValue(buf, rodata.BTF.Value, consts)
if err != nil {
return err
return fmt.Errorf("map %s: %w", name, err)
}
// MapSpec.Copy() performs a shallow copy. Fully copy the byte slice
// to avoid any changes affecting other copies of the MapSpec.
cpy := make([]byte, len(b))
copy(cpy, b)
for _, v := range ds.Vars {
vname := v.Type.TypeName()
replacement, ok := consts[vname]
if !ok {
continue
}
if replaced[vname] {
return fmt.Errorf("section %s: duplicate variable %s", name, vname)
}
if int(v.Offset+v.Size) > len(cpy) {
return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname)
}
b, err := marshalBytes(replacement, int(v.Size))
if err != nil {
return fmt.Errorf("marshaling constant replacement %s: %w", vname, err)
}
copy(cpy[v.Offset:v.Offset+v.Size], b)
replaced[vname] = true
}
spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy}
}
var missing []string
for c := range consts {
if !replaced[c] {
missing = append(missing, c)
}
}
if len(missing) != 0 {
return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ","))
}
rodata.Contents[0] = MapKV{kv.Key, buf}
return nil
}
@@ -187,6 +235,9 @@ func (cs *CollectionSpec) Assign(to interface{}) error {
// LoadAndAssign loads Maps and Programs into the kernel and assigns them
// to a struct.
//
// Omitting Map/Program.Close() during application shutdown is an error.
// See the package documentation for details around Map and Program lifecycle.
//
// This function is a shortcut to manually checking the presence
// of maps and programs in a CollectionSpec. Consider using bpf2go
// if this sounds useful.
@@ -209,7 +260,10 @@ func (cs *CollectionSpec) Assign(to interface{}) error {
// Returns an error if any of the fields can't be found, or
// if the same Map or Program is assigned multiple times.
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
loader := newCollectionLoader(cs, opts)
loader, err := newCollectionLoader(cs, opts)
if err != nil {
return err
}
defer loader.cleanup()
// Support assigning Programs and Maps, lazy-loading the required objects.
@@ -269,14 +323,25 @@ type Collection struct {
Maps map[string]*Map
}
// NewCollection creates a Collection from a specification.
// NewCollection creates a Collection from the given spec, creating and
// loading its declared resources into the kernel.
//
// Omitting Collection.Close() during application shutdown is an error.
// See the package documentation for details around Map and Program lifecycle.
func NewCollection(spec *CollectionSpec) (*Collection, error) {
return NewCollectionWithOptions(spec, CollectionOptions{})
}
// NewCollectionWithOptions creates a Collection from a specification.
// NewCollectionWithOptions creates a Collection from the given spec using
// options, creating and loading its declared resources into the kernel.
//
// Omitting Collection.Close() during application shutdown is an error.
// See the package documentation for details around Map and Program lifecycle.
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
loader := newCollectionLoader(spec, &opts)
loader, err := newCollectionLoader(spec, &opts)
if err != nil {
return nil, err
}
defer loader.cleanup()
// Create maps first, as their fds need to be linked into programs.
@@ -314,13 +379,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
type handleCache struct {
btfHandles map[*btf.Spec]*btf.Handle
btfSpecs map[io.ReaderAt]*btf.Spec
}
func newHandleCache() *handleCache {
return &handleCache{
btfHandles: make(map[*btf.Spec]*btf.Handle),
btfSpecs: make(map[io.ReaderAt]*btf.Spec),
}
}
@@ -338,20 +401,6 @@ func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
return handle, nil
}
func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) {
if hc.btfSpecs[rd] != nil {
return hc.btfSpecs[rd], nil
}
spec, err := btf.LoadSpecFromReader(rd)
if err != nil {
return nil, err
}
hc.btfSpecs[rd] = spec
return spec, nil
}
func (hc handleCache) close() {
for _, handle := range hc.btfHandles {
handle.Close()
@@ -366,18 +415,30 @@ type collectionLoader struct {
handles *handleCache
}
func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader {
func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
if opts == nil {
opts = &CollectionOptions{}
}
// Check for existing MapSpecs in the CollectionSpec for all provided replacement maps.
for name, m := range opts.MapReplacements {
spec, ok := coll.Maps[name]
if !ok {
return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
}
if err := spec.checkCompatibility(m); err != nil {
return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
}
}
return &collectionLoader{
coll,
opts,
make(map[string]*Map),
make(map[string]*Program),
newHandleCache(),
}
}, nil
}
// finalize should be called when all the collectionLoader's resources
@@ -409,6 +470,21 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
return nil, fmt.Errorf("missing map %s", mapName)
}
if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF {
return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName)
}
if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
// Clone the map to avoid closing user's map later on.
m, err := replaceMap.Clone()
if err != nil {
return nil, err
}
cl.maps[mapName] = m
return m, nil
}
m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
@@ -434,6 +510,10 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
}
if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF {
return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName)
}
progSpec = progSpec.Copy()
// Rewrite any reference to a valid map in the program's instructions,
@@ -441,27 +521,24 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
for i := range progSpec.Instructions {
ins := &progSpec.Instructions[i]
if !ins.IsLoadFromMap() || ins.Reference == "" {
if !ins.IsLoadFromMap() || ins.Reference() == "" {
continue
}
if uint32(ins.Constant) != math.MaxUint32 {
// Don't overwrite maps already rewritten, users can
// rewrite programs in the spec themselves
// Don't overwrite map loads containing non-zero map fd's,
// they can be manually included by the caller.
// Map FDs/IDs are placed in the lower 32 bits of Constant.
if int32(ins.Constant) > 0 {
continue
}
m, err := cl.loadMap(ins.Reference)
m, err := cl.loadMap(ins.Reference())
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
fd := m.FD()
if fd < 0 {
return nil, fmt.Errorf("map %s: %w", ins.Reference, sys.ErrClosedFd)
}
if err := ins.RewriteMapPtr(m.FD()); err != nil {
return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err)
if err := ins.AssociateMap(m); err != nil {
return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err)
}
}
@@ -519,7 +596,11 @@ func (cl *collectionLoader) populateMaps() error {
return nil
}
// LoadCollection parses an object file and converts it to a collection.
// LoadCollection reads an object file and creates and loads its declared
// resources into the kernel.
//
// Omitting Collection.Close() during application shutdown is an error.
// See the package documentation for details around Map and Program lifecycle.
func LoadCollection(file string) (*Collection, error) {
spec, err := LoadCollectionSpec(file)
if err != nil {

View File

@@ -13,4 +13,13 @@
// your application as any other resource.
//
// Use the link subpackage to attach a loaded program to a hook in the kernel.
//
// Note that losing all references to Map and Program resources will cause
// their underlying file descriptors to be closed, potentially removing those
// objects from the kernel. Always retain a reference by e.g. deferring a
// Close() of a Collection or LoadAndAssign object until application exit.
//
// Special care needs to be taken when handling maps of type ProgramArray,
// as the kernel erases its contents when the last userspace or bpffs
// reference disappears, regardless of the map being in active use.
package ebpf

View File

@@ -13,8 +13,8 @@ import (
"strings"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
)
@@ -26,6 +26,7 @@ type elfCode struct {
license string
version uint32
btf *btf.Spec
extInfo *btf.ExtInfos
}
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
@@ -49,7 +50,6 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
if err != nil {
return nil, err
}
defer f.Close()
var (
licenseSection *elf.Section
@@ -95,7 +95,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, fmt.Errorf("load version: %w", err)
}
btfSpec, err := btf.LoadSpecFromReader(rd)
btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd)
if err != nil && !errors.Is(err, btf.ErrNotFound) {
return nil, fmt.Errorf("load BTF: %w", err)
}
@@ -106,6 +106,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
license: license,
version: version,
btf: btfSpec,
extInfo: btfExtInfo,
}
symbols, err := f.Symbols()
@@ -115,33 +116,8 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
ec.assignSymbols(symbols)
// Go through relocation sections, and parse the ones for sections we're
// interested in. Make sure that relocations point at valid sections.
for idx, relSection := range relSections {
section := sections[idx]
if section == nil {
continue
}
rels, err := ec.loadRelocations(relSection, symbols)
if err != nil {
return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err)
}
for _, rel := range rels {
target := sections[rel.Section]
if target == nil {
return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
}
if target.Flags&elf.SHF_STRINGS > 0 {
return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
}
target.references++
}
section.relocations = rels
if err := ec.loadRelocations(relSections, symbols); err != nil {
return nil, fmt.Errorf("load relocations: %w", err)
}
// Collect all the various ways to define maps.
@@ -164,7 +140,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, fmt.Errorf("load programs: %w", err)
}
return &CollectionSpec{maps, progs, ec.ByteOrder}, nil
return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil
}
func loadLicense(sec *elf.Section) (string, error) {
@@ -265,6 +241,39 @@ func (ec *elfCode) assignSymbols(symbols []elf.Symbol) {
}
}
// loadRelocations iterates .rel* sections and extracts relocation entries for
// sections of interest. Makes sure relocations point at valid sections.
func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error {
for idx, relSection := range relSections {
section := ec.sections[idx]
if section == nil {
continue
}
rels, err := ec.loadSectionRelocations(relSection, symbols)
if err != nil {
return fmt.Errorf("relocation for section %q: %w", section.Name, err)
}
for _, rel := range rels {
target := ec.sections[rel.Section]
if target == nil {
return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
}
if target.Flags&elf.SHF_STRINGS > 0 {
return fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
}
target.references++
}
section.relocations = rels
}
return nil
}
// loadProgramSections iterates ec's sections and emits a ProgramSpec
// for each function it finds.
//
@@ -302,13 +311,7 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
KernelVersion: ec.version,
Instructions: insns,
ByteOrder: ec.ByteOrder,
}
if ec.btf != nil {
spec.BTF, err = ec.btf.Program(name)
if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
return nil, fmt.Errorf("program %s: %w", name, err)
}
BTF: ec.btf,
}
// Function names must be unique within a single ELF blob.
@@ -342,73 +345,72 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
//
// The resulting map is indexed by function name.
func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) {
var (
r = bufio.NewReader(section.Open())
funcs = make(map[string]asm.Instructions)
offset uint64
insns asm.Instructions
)
for {
ins := asm.Instruction{
// Symbols denote the first instruction of a function body.
Symbol: section.symbols[offset].Name,
r := bufio.NewReader(section.Open())
// Decode the section's instruction stream.
var insns asm.Instructions
if err := insns.Unmarshal(r, ec.ByteOrder); err != nil {
return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err)
}
if len(insns) == 0 {
return nil, fmt.Errorf("no instructions found in section %s", section.Name)
}
// Pull one instruction from the instruction stream.
n, err := ins.Unmarshal(r, ec.ByteOrder)
if errors.Is(err, io.EOF) {
fn := insns.Name()
if fn == "" {
return nil, errors.New("reached EOF before finding a valid symbol")
}
// Reached the end of the section and the decoded instruction buffer
// contains at least one valid instruction belonging to a function.
// Store the result and stop processing instructions.
funcs[fn] = insns
break
}
if err != nil {
return nil, fmt.Errorf("offset %d: %w", offset, err)
}
// Decoded the first instruction of a function body but insns already
// holds a valid instruction stream. Store the result and flush insns.
if ins.Symbol != "" && insns.Name() != "" {
funcs[insns.Name()] = insns
insns = nil
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
offset := iter.Offset.Bytes()
// Tag Symbol Instructions.
if sym, ok := section.symbols[offset]; ok {
*ins = ins.WithSymbol(sym.Name)
}
// Apply any relocations for the current instruction.
// If no relocation is present, resolve any section-relative function calls.
if rel, ok := section.relocations[offset]; ok {
// A relocation was found for the current offset. Apply it to the insn.
if err = ec.relocateInstruction(&ins, rel); err != nil {
return nil, fmt.Errorf("offset %d: relocate instruction: %w", offset, err)
if err := ec.relocateInstruction(ins, rel); err != nil {
return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err)
}
} else {
// Up to LLVM 9, calls to subprograms within the same ELF section are
// sometimes encoded using relative jumps without relocation entries.
// If, after all relocations entries have been processed, there are
// still relative pseudocalls left, they must point to an existing
// symbol within the section.
// When splitting sections into subprograms, the targets of these calls
// are no longer in scope, so they must be resolved here.
if ins.IsFunctionReference() && ins.Constant != -1 {
tgt := jumpTarget(offset, ins)
sym := section.symbols[tgt].Name
if err := referenceRelativeJump(ins, offset, section.symbols); err != nil {
return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err)
}
}
}
if ec.extInfo != nil {
ec.extInfo.Assign(insns, section.Name)
}
return splitSymbols(insns)
}
// referenceRelativeJump turns a relative jump to another bpf subprogram within
// the same ELF section into a Reference Instruction.
//
// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes
// encoded using relative jumps instead of relocation entries. These jumps go
// out of bounds of the current program, so their targets must be memoized
// before the section's instruction stream is split.
//
// The relative jump Constant is blinded to -1 and the target Symbol is set as
// the Instruction's Reference so it can be resolved by the linker.
func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error {
if !ins.IsFunctionReference() || ins.Constant == -1 {
return nil
}
tgt := jumpTarget(offset, *ins)
sym := symbols[tgt].Name
if sym == "" {
return nil, fmt.Errorf("offset %d: no jump target found at offset %d", offset, tgt)
return fmt.Errorf("no jump target found at offset %d", tgt)
}
ins.Reference = sym
*ins = ins.WithReference(sym)
ins.Constant = -1
}
}
insns = append(insns, ins)
offset += n
}
return funcs, nil
return nil
}
// jumpTarget takes ins' offset within an instruction stream (in bytes)
@@ -452,18 +454,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
ins.Src = asm.PseudoMapFD
// Mark the instruction as needing an update when creating the
// collection.
if err := ins.RewriteMapPtr(-1); err != nil {
return err
}
case dataSection:
var offset uint32
switch typ {
case elf.STT_SECTION:
if bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind)
}
// This is really a reference to a static symbol, which clang doesn't
@@ -472,8 +468,17 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
offset = uint32(uint64(ins.Constant))
case elf.STT_OBJECT:
if bind != elf.STB_GLOBAL {
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
// LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind)
}
offset = uint32(rel.Value)
case elf.STT_NOTYPE:
// LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
if bind != elf.STB_LOCAL {
return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind)
}
offset = uint32(rel.Value)
@@ -491,12 +496,6 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
ins.Constant = int64(uint64(offset) << 32)
ins.Src = asm.PseudoMapValue
// Mark the instruction as needing an update when creating the
// collection.
if err := ins.RewriteMapPtr(-1); err != nil {
return err
}
case programSection:
switch opCode := ins.OpCode; {
case opCode.JumpOp() == asm.Call:
@@ -579,7 +578,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
}
ins.Reference = name
*ins = ins.WithReference(name)
return nil
}
@@ -914,7 +913,9 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
ValueSize: valueSize,
MaxEntries: maxEntries,
Flags: flags,
BTF: &btf.Map{Spec: spec, Key: key, Value: value},
Key: key,
Value: value,
BTF: spec,
Pinning: pinType,
InnerMap: innerMapSpec,
Contents: contents,
@@ -966,7 +967,7 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem
// The offset of the 'values' member within the _struct_ (in bits)
// is the starting point of the array. Convert to bytes. Add VarSecinfo
// offset to get the absolute position in the ELF blob.
start := (member.OffsetBits / 8) + vs.Offset
start := member.Offset.Bytes() + vs.Offset
// 'values' is encoded in BTF as a zero (variable) length struct
// member, and its contents run until the end of the VarSecinfo.
// Add VarSecinfo offset to get the absolute position in the ELF blob.
@@ -1024,15 +1025,6 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
continue
}
if ec.btf == nil {
return errors.New("data sections require BTF, make sure all consts are marked as static")
}
var datasec *btf.Datasec
if err := ec.btf.TypeByName(sec.Name, &datasec); err != nil {
return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err)
}
data, err := sec.Data()
if err != nil {
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
@@ -1049,14 +1041,25 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
ValueSize: uint32(len(data)),
MaxEntries: 1,
Contents: []MapKV{{uint32(0), data}},
BTF: &btf.Map{Spec: ec.btf, Key: &btf.Void{}, Value: datasec},
}
switch sec.Name {
case ".rodata":
// It is possible for a data section to exist without a corresponding BTF Datasec
// if it only contains anonymous values like macro-defined arrays.
if ec.btf != nil {
var ds *btf.Datasec
if ec.btf.TypeByName(sec.Name, &ds) == nil {
// Assign the spec's key and BTF only if the Datasec lookup was successful.
mapSpec.BTF = ec.btf
mapSpec.Key = &btf.Void{}
mapSpec.Value = ds
}
}
switch n := sec.Name; {
case strings.HasPrefix(n, ".rodata"):
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
mapSpec.Freeze = true
case ".bss":
case n == ".bss":
// The kernel already zero-initializes the map
mapSpec.Contents = nil
}
@@ -1114,8 +1117,8 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
{"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0},
{"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0},
{"cgroup/skb", CGroupSKB, AttachNone, 0},
{"cgroup/sock_create", CGroupSKB, AttachCGroupInetSockCreate, 0},
{"cgroup/sock_release", CGroupSKB, AttachCgroupInetSockRelease, 0},
{"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0},
{"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0},
{"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0},
{"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0},
{"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0},
@@ -1163,7 +1166,7 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
return UnspecifiedProgram, AttachNone, 0, ""
}
func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
rels := make(map[uint64]elf.Symbol)
if sec.Entsize < 16 {

View File

@@ -14,8 +14,8 @@ import (
"unsafe"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -214,7 +214,10 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
// inspecting loaded programs for troubleshooting, dumping, etc.
//
// For example, map accesses are made to reference their kernel map IDs,
// not the FDs they had when the program was inserted.
// not the FDs they had when the program was inserted. Note that before
// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated
// instructions were not sanitized, making the output even less reusable
// and less likely to round-trip or evaluate to the same program Tag.
//
// The first instruction is marked as a symbol using the Program's name.
//
@@ -233,7 +236,7 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) {
}
// Tag the first instruction with the name of the program, if available.
insns[0] = insns[0].Sym(pi.Name)
insns[0] = insns[0].WithSymbol(pi.Name)
return insns, nil
}

View File

@@ -1,497 +0,0 @@
package btf
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
)
// extInfo contains extended program metadata.
//
// It is indexed per section.
type extInfo struct {
funcInfos map[string]FuncInfos
lineInfos map[string]LineInfos
relos map[string]CoreRelos
}
// loadExtInfos parses the .BTF.ext section into its constituent parts.
func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, strings stringTable) (*extInfo, error) {
// Open unbuffered section reader. binary.Read() calls io.ReadFull on
// the header structs, resulting in one syscall per header.
headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
extHeader, err := parseBTFExtHeader(headerRd, bo)
if err != nil {
return nil, fmt.Errorf("parsing BTF extension header: %w", err)
}
coreHeader, err := parseBTFExtCoreHeader(headerRd, bo, extHeader)
if err != nil {
return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
}
buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
funcInfos, err := parseFuncInfos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF function info: %w", err)
}
buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
lineInfos, err := parseLineInfos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing BTF line info: %w", err)
}
relos := make(map[string]CoreRelos)
if coreHeader != nil && coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.CoreReloLen))
relos, err = parseCoreRelos(buf, bo, strings)
if err != nil {
return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
}
}
return &extInfo{funcInfos, lineInfos, relos}, nil
}
// btfExtHeader is found at the start of the .BTF.ext section.
type btfExtHeader struct {
Magic uint16
Version uint8
Flags uint8
// HdrLen is larger than the size of struct btfExtHeader when it is
// immediately followed by a btfExtCoreHeader.
HdrLen uint32
FuncInfoOff uint32
FuncInfoLen uint32
LineInfoOff uint32
LineInfoLen uint32
}
// parseBTFExtHeader parses the header of the .BTF.ext section.
func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
var header btfExtHeader
if err := binary.Read(r, bo, &header); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
if header.Magic != btfMagic {
return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
}
if header.Version != 1 {
return nil, fmt.Errorf("unexpected version %v", header.Version)
}
if header.Flags != 0 {
return nil, fmt.Errorf("unsupported flags %v", header.Flags)
}
if int64(header.HdrLen) < int64(binary.Size(&header)) {
return nil, fmt.Errorf("header length shorter than btfExtHeader size")
}
return &header, nil
}
// funcInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its func_info entries.
func (h *btfExtHeader) funcInfoStart() int64 {
return int64(h.HdrLen + h.FuncInfoOff)
}
// lineInfoStart returns the offset from the beginning of the .BTF.ext section
// to the start of its line_info entries.
func (h *btfExtHeader) lineInfoStart() int64 {
return int64(h.HdrLen + h.LineInfoOff)
}
// coreReloStart returns the offset from the beginning of the .BTF.ext section
// to the start of its CO-RE relocation entries.
func (h *btfExtHeader) coreReloStart(ch *btfExtCoreHeader) int64 {
return int64(h.HdrLen + ch.CoreReloOff)
}
// btfExtCoreHeader is found right after the btfExtHeader when its HdrLen
// field is larger than its size.
type btfExtCoreHeader struct {
CoreReloOff uint32
CoreReloLen uint32
}
// parseBTFExtCoreHeader parses the tail of the .BTF.ext header. If additional
// header bytes are present, extHeader.HdrLen will be larger than the struct,
// indicating the presence of a CO-RE extension header.
func parseBTFExtCoreHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCoreHeader, error) {
extHdrSize := int64(binary.Size(&extHeader))
remainder := int64(extHeader.HdrLen) - extHdrSize
if remainder == 0 {
return nil, nil
}
var coreHeader btfExtCoreHeader
if err := binary.Read(r, bo, &coreHeader); err != nil {
return nil, fmt.Errorf("can't read header: %v", err)
}
return &coreHeader, nil
}
type btfExtInfoSec struct {
SecNameOff uint32
NumInfo uint32
}
// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
// appearing within func_info and line_info sub-sections.
// These headers appear once for each program section in the ELF and are
// followed by one or more func/line_info records for the section.
func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
var infoHeader btfExtInfoSec
if err := binary.Read(r, bo, &infoHeader); err != nil {
return "", nil, fmt.Errorf("read ext info header: %w", err)
}
secName, err := strings.Lookup(infoHeader.SecNameOff)
if err != nil {
return "", nil, fmt.Errorf("get section name: %w", err)
}
if secName == "" {
return "", nil, fmt.Errorf("extinfo header refers to empty section name")
}
if infoHeader.NumInfo == 0 {
return "", nil, fmt.Errorf("section %s has zero records", secName)
}
return secName, &infoHeader, nil
}
// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
// or line_infos segment that describes the length of all extInfoRecords in
// that segment.
func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
const maxRecordSize = 256
var recordSize uint32
if err := binary.Read(r, bo, &recordSize); err != nil {
return 0, fmt.Errorf("can't read record size: %v", err)
}
if recordSize < 4 {
// Need at least InsnOff worth of bytes per record.
return 0, errors.New("record size too short")
}
if recordSize > maxRecordSize {
return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
}
return recordSize, nil
}
// FuncInfo represents the location and type ID of a function in a BPF ELF.
type FuncInfo struct {
// Instruction offset of the function within an ELF section.
// Always zero after parsing a funcinfo from an ELF, instruction streams
// are split on function boundaries.
InsnOff uint32
TypeID TypeID
}
// Name looks up the FuncInfo's corresponding function name in the given spec.
func (fi FuncInfo) Name(spec *Spec) (string, error) {
// Look up function name based on type ID.
typ, err := spec.TypeByID(fi.TypeID)
if err != nil {
return "", fmt.Errorf("looking up type by ID: %w", err)
}
if _, ok := typ.(*Func); !ok {
return "", fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
}
// C doesn't have anonymous functions, but check just in case.
if name := typ.TypeName(); name != "" {
return name, nil
}
return "", fmt.Errorf("Func with type ID %d doesn't have a name", fi.TypeID)
}
// Marshal writes the binary representation of the FuncInfo to w.
// The function offset is converted from bytes to instructions.
func (fi FuncInfo) Marshal(w io.Writer, offset uint64) error {
fi.InsnOff += uint32(offset)
// The kernel expects offsets in number of raw bpf instructions,
// while the ELF tracks it in bytes.
fi.InsnOff /= asm.InstructionSize
return binary.Write(w, internal.NativeEndian, fi)
}
type FuncInfos []FuncInfo
// funcForOffset returns the function that the instruction at the given
// ELF section offset belongs to.
//
// For example, consider an ELF section that contains 3 functions (a, b, c)
// at offsets 0, 10 and 15 respectively. Offset 5 will return function a,
// offset 12 will return b, offset >= 15 will return c, etc.
func (infos FuncInfos) funcForOffset(offset uint32) *FuncInfo {
for n, fi := range infos {
// Iterator went past the offset the caller is looking for,
// no point in continuing the search.
if offset < fi.InsnOff {
return nil
}
// If there is no next item in the list, or if the given offset
// is smaller than the next function, the offset belongs to
// the current function.
if n+1 >= len(infos) || offset < infos[n+1].InsnOff {
return &fi
}
}
return nil
}
// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
// func infos indexed by section name.
func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]FuncInfos, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string]FuncInfos)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
// These records appear after a btf_ext_info_sec header in the func_info
// sub-section of .BTF.ext.
func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (FuncInfos, error) {
var out FuncInfos
var fi FuncInfo
if exp, got := uint32(binary.Size(fi)), recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
}
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &fi); err != nil {
return nil, fmt.Errorf("can't read function info: %v", err)
}
if fi.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
}
out = append(out, fi)
}
return out, nil
}
// LineInfo represents the location and contents of a single line of source
// code a BPF ELF was compiled from.
type LineInfo struct {
// Instruction offset of the function within an ELF section.
// After parsing a LineInfo from an ELF, this offset is relative to
// the function body instead of an ELF section.
InsnOff uint32
FileNameOff uint32
LineOff uint32
LineCol uint32
}
// Marshal writes the binary representation of the LineInfo to w.
// The instruction offset is converted from bytes to instructions.
func (li LineInfo) Marshal(w io.Writer, offset uint64) error {
li.InsnOff += uint32(offset)
// The kernel expects offsets in number of raw bpf instructions,
// while the ELF tracks it in bytes.
li.InsnOff /= asm.InstructionSize
return binary.Write(w, internal.NativeEndian, li)
}
type LineInfos []LineInfo
// Marshal writes the binary representation of the LineInfos to w.
func (li LineInfos) Marshal(w io.Writer, off uint64) error {
if len(li) == 0 {
return nil
}
for _, info := range li {
if err := info.Marshal(w, off); err != nil {
return err
}
}
return nil
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
// line infos indexed by section name.
func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]LineInfos, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
result := make(map[string]LineInfos)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
// These records appear after a btf_ext_info_sec header in the line_info
// sub-section of .BTF.ext.
func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (LineInfos, error) {
var out LineInfos
var li LineInfo
if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
// BTF blob's record size is longer than we know how to parse.
return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
}
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &li); err != nil {
return nil, fmt.Errorf("can't read line info: %v", err)
}
if li.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
}
out = append(out, li)
}
return out, nil
}
// bpfCoreRelo matches the kernel's struct bpf_core_relo.
type bpfCoreRelo struct {
InsnOff uint32
TypeID TypeID
AccessStrOff uint32
Kind COREKind
}
type CoreRelo struct {
insnOff uint32
typeID TypeID
accessor coreAccessor
kind COREKind
}
type CoreRelos []CoreRelo
var extInfoReloSize = binary.Size(bpfCoreRelo{})
// parseCoreRelos parses a core_relos sub-section within .BTF.ext ito a map of
// CO-RE relocations indexed by section name.
func parseCoreRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]CoreRelos, error) {
recordSize, err := parseExtInfoRecordSize(r, bo)
if err != nil {
return nil, err
}
if recordSize != uint32(extInfoReloSize) {
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
}
result := make(map[string]CoreRelos)
for {
secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
if errors.Is(err, io.EOF) {
return result, nil
}
if err != nil {
return nil, err
}
records, err := parseCoreReloRecords(r, bo, recordSize, infoHeader.NumInfo, strings)
if err != nil {
return nil, fmt.Errorf("section %v: %w", secName, err)
}
result[secName] = records
}
}
// parseCoreReloRecords parses a stream of CO-RE relocation entries into a
// coreRelos. These records appear after a btf_ext_info_sec header in the
// core_relos sub-section of .BTF.ext.
func parseCoreReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, strings stringTable) (CoreRelos, error) {
var out CoreRelos
var relo bpfCoreRelo
for i := uint32(0); i < recordNum; i++ {
if err := binary.Read(r, bo, &relo); err != nil {
return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
}
if relo.InsnOff%asm.InstructionSize != 0 {
return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
}
accessorStr, err := strings.Lookup(relo.AccessStrOff)
if err != nil {
return nil, err
}
accessor, err := parseCoreAccessor(accessorStr)
if err != nil {
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
}
out = append(out, CoreRelo{
relo.InsnOff,
relo.TypeID,
accessor,
relo.Kind,
})
}
return out, nil
}

View File

@@ -1,54 +0,0 @@
package btf
import (
"bytes"
"errors"
"fmt"
"io"
)
type stringTable []byte
func readStringTable(r io.Reader) (stringTable, error) {
contents, err := io.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("can't read string table: %v", err)
}
if len(contents) < 1 {
return nil, errors.New("string table is empty")
}
if contents[0] != '\x00' {
return nil, errors.New("first item in string table is non-empty")
}
if contents[len(contents)-1] != '\x00' {
return nil, errors.New("string table isn't null terminated")
}
return stringTable(contents), nil
}
func (st stringTable) Lookup(offset uint32) (string, error) {
if int64(offset) > int64(^uint(0)>>1) {
return "", fmt.Errorf("offset %d overflows int", offset)
}
pos := int(offset)
if pos >= len(st) {
return "", fmt.Errorf("offset %d is out of bounds", offset)
}
if pos > 0 && st[pos-1] != '\x00' {
return "", fmt.Errorf("offset %d isn't start of a string", offset)
}
str := st[pos:]
end := bytes.IndexByte(str, '\x00')
if end == -1 {
return "", fmt.Errorf("offset %d isn't null terminated", offset)
}
return string(str[:end]), nil
}

View File

@@ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
return &SafeELFFile{file}, nil
}
// OpenSafeELFFile reads an ELF from a file.
//
// It works like NewSafeELFFile, with the exception that safe.Close will
// close the underlying file.
func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) {
defer func() {
r := recover()
if r == nil {
return
}
safe = nil
err = fmt.Errorf("reading ELF file panicked: %s", r)
}()
file, err := elf.Open(path)
if err != nil {
return nil, err
}
return &SafeELFFile{file}, nil
}
// Symbols is the safe version of elf.File.Symbols.
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
defer func() {

View File

@@ -1,29 +0,0 @@
package internal
import (
"encoding/binary"
"unsafe"
)
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian binary.ByteOrder
// Clang is set to either "el" or "eb" depending on the host's endianness.
var ClangEndian string
func init() {
if isBigEndian() {
NativeEndian = binary.BigEndian
ClangEndian = "eb"
} else {
NativeEndian = binary.LittleEndian
ClangEndian = "el"
}
}
func isBigEndian() (ret bool) {
i := int(0x1)
bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
return bs[0] == 0
}

13
vendor/github.com/cilium/ebpf/internal/endian_be.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
package internal
import "encoding/binary"
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian binary.ByteOrder = binary.BigEndian
// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
const ClangEndian = "eb"

13
vendor/github.com/cilium/ebpf/internal/endian_le.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64
package internal
import "encoding/binary"
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
// depending on the host's endianness.
var NativeEndian binary.ByteOrder = binary.LittleEndian
// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
const ClangEndian = "el"

View File

@@ -1,9 +1,9 @@
package internal
import (
"bytes"
"errors"
"fmt"
"strings"
"github.com/cilium/ebpf/internal/unix"
)
@@ -14,7 +14,13 @@ import (
// logErr should be the error returned by the syscall that generated
// the log. It is used to check for truncation of the output.
func ErrorWithLog(err error, log []byte, logErr error) error {
logStr := strings.Trim(unix.ByteSliceToString(log), "\t\r\n ")
// Convert verifier log C string by truncating it on the first 0 byte
// and trimming trailing whitespace before interpreting as a Go string.
if i := bytes.IndexByte(log, 0); i != -1 {
log = log[:i]
}
logStr := string(bytes.Trim(log, "\t\r\n "))
if errors.Is(logErr, unix.ENOSPC) {
logStr += " (truncated...)"
}

View File

@@ -54,11 +54,6 @@ type FeatureTestFn func() error
//
// Returns an error wrapping ErrNotSupported if the feature is not supported.
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
v, err := NewVersion(version)
if err != nil {
return func() error { return err }
}
ft := new(featureTest)
return func() error {
ft.RLock()
@@ -79,6 +74,11 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
err := fn()
switch {
case errors.Is(err, ErrNotSupported):
v, err := NewVersion(version)
if err != nil {
return err
}
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: name,

View File

@@ -18,7 +18,7 @@ import (
// end up being read completely anyway.
//
// Use instead of the r.Seek() + io.LimitReader() pattern.
func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) io.Reader {
func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
// Clamp the size of the buffer to one page to avoid slurping large parts
// of a file into memory. bufio.NewReader uses a hardcoded default buffer
// of 4096. Allow arches with larger pages to allocate more, but don't

View File

@@ -1,4 +1,6 @@
// Package sys contains bindings for the BPF syscall.
package sys
//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../btf/testdata/vmlinux-btf.gz
// Regenerate types.go by invoking go generate in the current directory.
//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux-btf.gz

View File

@@ -55,7 +55,10 @@ const (
BPF_SK_LOOKUP AttachType = 36
BPF_XDP AttachType = 37
BPF_SK_SKB_VERDICT AttachType = 38
__MAX_BPF_ATTACH_TYPE AttachType = 39
BPF_SK_REUSEPORT_SELECT AttachType = 39
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40
BPF_PERF_EVENT AttachType = 41
__MAX_BPF_ATTACH_TYPE AttachType = 42
)
type Cmd int32
@@ -72,6 +75,7 @@ const (
BPF_PROG_ATTACH Cmd = 8
BPF_PROG_DETACH Cmd = 9
BPF_PROG_TEST_RUN Cmd = 10
BPF_PROG_RUN Cmd = 10
BPF_PROG_GET_NEXT_ID Cmd = 11
BPF_MAP_GET_NEXT_ID Cmd = 12
BPF_PROG_GET_FD_BY_ID Cmd = 13
@@ -268,7 +272,27 @@ const (
BPF_FUNC_check_mtu FunctionId = 163
BPF_FUNC_for_each_map_elem FunctionId = 164
BPF_FUNC_snprintf FunctionId = 165
__BPF_FUNC_MAX_ID FunctionId = 166
BPF_FUNC_sys_bpf FunctionId = 166
BPF_FUNC_btf_find_by_name_kind FunctionId = 167
BPF_FUNC_sys_close FunctionId = 168
BPF_FUNC_timer_init FunctionId = 169
BPF_FUNC_timer_set_callback FunctionId = 170
BPF_FUNC_timer_start FunctionId = 171
BPF_FUNC_timer_cancel FunctionId = 172
BPF_FUNC_get_func_ip FunctionId = 173
BPF_FUNC_get_attach_cookie FunctionId = 174
BPF_FUNC_task_pt_regs FunctionId = 175
BPF_FUNC_get_branch_snapshot FunctionId = 176
BPF_FUNC_trace_vprintk FunctionId = 177
BPF_FUNC_skc_to_unix_sock FunctionId = 178
BPF_FUNC_kallsyms_lookup_name FunctionId = 179
BPF_FUNC_find_vma FunctionId = 180
BPF_FUNC_loop FunctionId = 181
BPF_FUNC_strncmp FunctionId = 182
BPF_FUNC_get_func_arg FunctionId = 183
BPF_FUNC_get_func_ret FunctionId = 184
BPF_FUNC_get_func_arg_cnt FunctionId = 185
__BPF_FUNC_MAX_ID FunctionId = 186
)
type HdrStartOff int32
@@ -288,7 +312,8 @@ const (
BPF_LINK_TYPE_ITER LinkType = 4
BPF_LINK_TYPE_NETNS LinkType = 5
BPF_LINK_TYPE_XDP LinkType = 6
MAX_BPF_LINK_TYPE LinkType = 7
BPF_LINK_TYPE_PERF_EVENT LinkType = 7
MAX_BPF_LINK_TYPE LinkType = 8
)
type MapType int32
@@ -324,6 +349,7 @@ const (
BPF_MAP_TYPE_RINGBUF MapType = 27
BPF_MAP_TYPE_INODE_STORAGE MapType = 28
BPF_MAP_TYPE_TASK_STORAGE MapType = 29
BPF_MAP_TYPE_BLOOM_FILTER MapType = 30
)
type ProgType int32
@@ -360,6 +386,7 @@ const (
BPF_PROG_TYPE_EXT ProgType = 28
BPF_PROG_TYPE_LSM ProgType = 29
BPF_PROG_TYPE_SK_LOOKUP ProgType = 30
BPF_PROG_TYPE_SYSCALL ProgType = 31
)
type RetCode int32
@@ -447,6 +474,7 @@ type MapInfo struct {
BtfKeyTypeId uint32
BtfValueTypeId uint32
_ [4]byte
MapExtra uint64
}
type ProgInfo struct {
@@ -485,6 +513,8 @@ type ProgInfo struct {
RunTimeNs uint64
RunCnt uint64
RecursionMisses uint64
VerifiedInsns uint32
_ [4]byte
}
type BtfGetFdByIdAttr struct{ Id uint32 }
@@ -572,6 +602,23 @@ func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) {
return NewFD(int(fd))
}
type LinkCreatePerfEventAttr struct {
ProgFd uint32
TargetFd uint32
AttachType AttachType
Flags uint32
BpfCookie uint64
_ [8]byte
}
func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
if err != nil {
return nil, err
}
return NewFD(int(fd))
}
type LinkUpdateAttr struct {
LinkFd uint32
NewProgFd uint32
@@ -598,6 +645,7 @@ type MapCreateAttr struct {
BtfKeyTypeId uint32
BtfValueTypeId uint32
BtfVmlinuxValueTypeId uint32
MapExtra uint64
}
func MapCreate(attr *MapCreateAttr) (*FD, error) {
@@ -876,6 +924,10 @@ type ProgLoadAttr struct {
LineInfoCnt uint32
AttachBtfId uint32
AttachProgFd uint32
CoreReloCnt uint32
FdArray Pointer
CoreRelos Pointer
CoreReloRecSize uint32
_ [4]byte
}

View File

@@ -23,6 +23,7 @@ const (
EBADF = linux.EBADF
E2BIG = linux.E2BIG
EFAULT = linux.EFAULT
EACCES = linux.EACCES
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)

View File

@@ -24,6 +24,7 @@ const (
EBADF = syscall.Errno(0)
E2BIG = syscall.Errno(0)
EFAULT = syscall.EFAULT
EACCES = syscall.Errno(0)
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)

View File

@@ -8,7 +8,9 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"unsafe"
"github.com/cilium/ebpf"
@@ -30,11 +32,25 @@ type probeType uint8
type probeArgs struct {
symbol, group, path string
offset, refCtrOffset uint64
offset, refCtrOffset, cookie uint64
pid int
ret bool
}
// KprobeOptions defines additional parameters that will be used
// when loading Kprobes.
type KprobeOptions struct {
// Arbitrary value that can be fetched from an eBPF program
// via `bpf_get_attach_cookie()`.
//
// Needs kernel 5.15+.
Cookie uint64
// Offset of the kprobe relative to the traced symbol.
// Can be used to insert kprobes at arbitrary offsets in kernel functions,
// e.g. in places where functions have been inlined.
Offset uint64
}
const (
kprobeType probeType = iota
uprobeType
@@ -78,61 +94,88 @@ func (pt probeType) RetprobeBit() (uint64, error) {
// given kernel symbol starts executing. See /proc/kallsyms for available
// symbols. For example, printk():
//
// kp, err := Kprobe("printk", prog)
// kp, err := Kprobe("printk", prog, nil)
//
// Losing the reference to the resulting Link (kp) will close the Kprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
func Kprobe(symbol string, prog *ebpf.Program) (Link, error) {
k, err := kprobe(symbol, prog, false)
func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
k, err := kprobe(symbol, prog, opts, false)
if err != nil {
return nil, err
}
err = k.attach(prog)
lnk, err := attachPerfEvent(k, prog)
if err != nil {
k.Close()
return nil, err
}
return k, nil
return lnk, nil
}
// Kretprobe attaches the given eBPF program to a perf event that fires right
// before the given kernel symbol exits, with the function stack left intact.
// See /proc/kallsyms for available symbols. For example, printk():
//
// kp, err := Kretprobe("printk", prog)
// kp, err := Kretprobe("printk", prog, nil)
//
// Losing the reference to the resulting Link (kp) will close the Kretprobe
// and prevent further execution of prog. The Link must be Closed during
// program shutdown to avoid leaking system resources.
func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) {
k, err := kprobe(symbol, prog, true)
func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
k, err := kprobe(symbol, prog, opts, true)
if err != nil {
return nil, err
}
err = k.attach(prog)
lnk, err := attachPerfEvent(k, prog)
if err != nil {
k.Close()
return nil, err
}
return k, nil
return lnk, nil
}
// isValidKprobeSymbol implements the equivalent of a regex match
// against "^[a-zA-Z_][0-9a-zA-Z_.]*$".
func isValidKprobeSymbol(s string) bool {
if len(s) < 1 {
return false
}
for i, c := range []byte(s) {
switch {
case c >= 'a' && c <= 'z':
case c >= 'A' && c <= 'Z':
case c == '_':
case i > 0 && c >= '0' && c <= '9':
// Allow `.` in symbol name. GCC-compiled kernel may change symbol name
// to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`.
// See: https://gcc.gnu.org/gcc-10/changes.html
case i > 0 && c == '.':
default:
return false
}
}
return true
}
// kprobe opens a perf event on the given symbol and attaches prog to it.
// If ret is true, create a kretprobe.
func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) {
if symbol == "" {
return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
}
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
if !rgxTraceEvent.MatchString(symbol) {
return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput)
if !isValidKprobeSymbol(symbol) {
return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput)
}
if prog.Type() != ebpf.Kprobe {
return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
@@ -140,14 +183,19 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
args := probeArgs{
pid: perfAllThreads,
symbol: platformPrefix(symbol),
symbol: symbol,
ret: ret,
}
if opts != nil {
args.cookie = opts.Cookie
args.offset = opts.Offset
}
// Use kprobe PMU if the kernel has it available.
tp, err := pmuKprobe(args)
if errors.Is(err, os.ErrNotExist) {
args.symbol = symbol
args.symbol = platformPrefix(symbol)
tp, err = pmuKprobe(args)
}
if err == nil {
@@ -158,10 +206,10 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
}
// Use tracefs if kprobe PMU is missing.
args.symbol = platformPrefix(symbol)
args.symbol = symbol
tp, err = tracefsKprobe(args)
if errors.Is(err, os.ErrNotExist) {
args.symbol = symbol
args.symbol = platformPrefix(symbol)
tp, err = tracefsKprobe(args)
}
if err != nil {
@@ -214,8 +262,12 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
}
attr = unix.PerfEventAttr{
// The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
// since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
Size: unix.PERF_ATTR_SIZE_VER1,
Type: uint32(et), // PMU event type read from sysfs
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
Ext2: args.offset, // Kernel symbol offset
Config: config, // Retprobe flag
}
case uprobeType:
@@ -243,11 +295,22 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
// https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") {
return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported)
}
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
// is returned to the caller.
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
return nil, fmt.Errorf("symbol '%s' not found: %w", args.symbol, os.ErrNotExist)
return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist)
}
// Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
// to an invalid insn boundary.
if errors.Is(err, syscall.EILSEQ) {
return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist)
}
// Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
// when attempting to set a uprobe on a trap instruction.
@@ -268,10 +331,11 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
return &perfEvent{
fd: fd,
pmuID: et,
name: args.symbol,
typ: typ.PerfEventType(args.ret),
name: args.symbol,
pmuID: et,
cookie: args.cookie,
fd: fd,
}, nil
}
@@ -326,11 +390,12 @@ func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) {
}
return &perfEvent{
fd: fd,
typ: typ.PerfEventType(args.ret),
group: group,
name: args.symbol,
tracefsID: tid,
typ: typ.PerfEventType(args.ret),
cookie: args.cookie,
fd: fd,
}, nil
}
@@ -346,7 +411,7 @@ func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
}
defer f.Close()
var pe string
var pe, token string
switch typ {
case kprobeType:
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
@@ -363,7 +428,8 @@ func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
// subsampling or rate limiting logic can be more accurately implemented in
// the eBPF program itself.
// See Documentation/kprobes.txt for more details.
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, args.symbol)
token = kprobeToken(args)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token)
case uprobeType:
// The uprobe_events syntax is as follows:
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
@@ -375,14 +441,27 @@ func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
//
// See Documentation/trace/uprobetracer.txt for more details.
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, uprobeToken(args))
token = uprobeToken(args)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token)
}
_, err = f.WriteString(pe)
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
// is returned to the caller.
// EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token
// is resolved to an invalid insn boundary.
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
return fmt.Errorf("symbol %s not found: %w", args.symbol, os.ErrNotExist)
return fmt.Errorf("token %s: %w", token, os.ErrNotExist)
}
// Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
// to an invalid insn boundary.
if errors.Is(err, syscall.EILSEQ) {
return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
}
// ERANGE is returned when the `SYM[+offs]` token is too big and cannot
// be resolved.
if errors.Is(err, syscall.ERANGE) {
return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
}
if err != nil {
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
@@ -402,7 +481,7 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
// See [k,u]probe_events syntax above. The probe type does not need to be specified
// for removals.
pe := fmt.Sprintf("-:%s/%s", group, symbol)
pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol))
if _, err = f.WriteString(pe); err != nil {
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
}
@@ -413,9 +492,9 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
// randomGroup generates a pseudorandom string for use as a tracefs group name.
// Returns an error when the output string would exceed 63 characters (kernel
// limitation), when rand.Read() fails or when prefix contains characters not
// allowed by rgxTraceEvent.
// allowed by isValidTraceID.
func randomGroup(prefix string) (string, error) {
if !rgxTraceEvent.MatchString(prefix) {
if !isValidTraceID(prefix) {
return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
}
@@ -467,3 +546,14 @@ func kretprobeBit() (uint64, error) {
})
return kprobeRetprobeBit.value, kprobeRetprobeBit.err
}
// kprobeToken creates the SYM[+offs] token for the tracefs api.
func kprobeToken(args probeArgs) string {
po := args.symbol
if args.offset != 0 {
po += fmt.Sprintf("+%#x", args.offset)
}
return po
}

View File

@@ -6,8 +6,8 @@ import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/sys"
)
@@ -325,11 +325,13 @@ func (l *RawLink) Info() (*Info, error) {
extra = &TracingInfo{}
case XDPType:
extra = &XDPInfo{}
case PerfEventType:
// no extra
default:
return nil, fmt.Errorf("unknown link info type: %d", info.Type)
}
if info.Type != RawTracepointType && info.Type != IterType {
if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType {
buf := bytes.NewReader(info.Extra[:])
err := binary.Read(buf, internal.NativeEndian, extra)
if err != nil {

View File

@@ -6,13 +6,14 @@ import (
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -43,11 +44,6 @@ import (
var (
tracefsPath = "/sys/kernel/debug/tracing"
// Trace event groups, names and kernel symbols must adhere to this set
// of characters. Non-empty, first character must not be a number, all
// characters must be alphanumeric or underscore.
rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$")
errInvalidInput = errors.New("invalid input")
)
@@ -69,6 +65,8 @@ const (
// can be attached to it. It is created based on a tracefs trace event or a
// Performance Monitoring Unit (PMU).
type perfEvent struct {
// The event type determines the types of programs that can be attached.
typ perfEventType
// Group and name of the tracepoint/kprobe/uprobe.
group string
@@ -79,52 +77,15 @@ type perfEvent struct {
// ID of the trace event read from tracefs. Valid IDs are non-zero.
tracefsID uint64
// The event type determines the types of programs that can be attached.
typ perfEventType
// User provided arbitrary value.
cookie uint64
// This is the perf event FD.
fd *sys.FD
}
func (pe *perfEvent) isLink() {}
func (pe *perfEvent) Pin(string) error {
return fmt.Errorf("pin perf event: %w", ErrNotSupported)
}
func (pe *perfEvent) Unpin() error {
return fmt.Errorf("unpin perf event: %w", ErrNotSupported)
}
// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
// owned by the perf event, which means multiple programs can be attached
// simultaneously.
//
// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
// returns EEXIST.
//
// Detaching a program from a perf event is currently not possible, so a
// program replacement mechanism cannot be implemented for perf events.
func (pe *perfEvent) Update(prog *ebpf.Program) error {
return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported)
}
func (pe *perfEvent) Info() (*Info, error) {
return nil, fmt.Errorf("can't get perf event info: %w", ErrNotSupported)
}
func (pe *perfEvent) Close() error {
if pe.fd == nil {
return nil
}
err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_DISABLE, 0)
if err != nil {
return fmt.Errorf("disabling perf event: %w", err)
}
err = pe.fd.Close()
if err != nil {
if err := pe.fd.Close(); err != nil {
return fmt.Errorf("closing perf event fd: %w", err)
}
@@ -147,48 +108,150 @@ func (pe *perfEvent) Close() error {
return nil
}
// perfEventLink represents a bpf perf link.
type perfEventLink struct {
RawLink
pe *perfEvent
}
func (pl *perfEventLink) isLink() {}
// Pinning requires the underlying perf event FD to stay open.
//
// | PerfEvent FD | BpfLink FD | Works |
// |--------------|------------|-------|
// | Open | Open | Yes |
// | Closed | Open | No |
// | Open | Closed | No (Pin() -> EINVAL) |
// | Closed | Closed | No (Pin() -> EINVAL) |
//
// There is currently no pretty way to recover the perf event FD
// when loading a pinned link, so leave as not supported for now.
func (pl *perfEventLink) Pin(string) error {
return fmt.Errorf("perf event link pin: %w", ErrNotSupported)
}
func (pl *perfEventLink) Unpin() error {
return fmt.Errorf("perf event link unpin: %w", ErrNotSupported)
}
func (pl *perfEventLink) Close() error {
if err := pl.pe.Close(); err != nil {
return fmt.Errorf("perf event link close: %w", err)
}
return pl.fd.Close()
}
func (pl *perfEventLink) Update(prog *ebpf.Program) error {
return fmt.Errorf("perf event link update: %w", ErrNotSupported)
}
// perfEventIoctl implements Link and handles the perf event lifecycle
// via ioctl().
type perfEventIoctl struct {
*perfEvent
}
func (pi *perfEventIoctl) isLink() {}
// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
// owned by the perf event, which means multiple programs can be attached
// simultaneously.
//
// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
// returns EEXIST.
//
// Detaching a program from a perf event is currently not possible, so a
// program replacement mechanism cannot be implemented for perf events.
func (pi *perfEventIoctl) Update(prog *ebpf.Program) error {
return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported)
}
func (pi *perfEventIoctl) Pin(string) error {
return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported)
}
func (pi *perfEventIoctl) Unpin() error {
return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported)
}
func (pi *perfEventIoctl) Info() (*Info, error) {
return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
}
// attach the given eBPF prog to the perf event stored in pe.
// pe must contain a valid perf event fd.
// prog's type must match the program type stored in pe.
func (pe *perfEvent) attach(prog *ebpf.Program) error {
func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
if prog == nil {
return errors.New("cannot attach a nil program")
}
if pe.fd == nil {
return errors.New("cannot attach to nil perf event")
return nil, errors.New("cannot attach a nil program")
}
if prog.FD() < 0 {
return fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
}
switch pe.typ {
case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
if t := prog.Type(); t != ebpf.Kprobe {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
}
case tracepointEvent:
if t := prog.Type(); t != ebpf.TracePoint {
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
}
default:
return fmt.Errorf("unknown perf event type: %d", pe.typ)
return nil, fmt.Errorf("unknown perf event type: %d", pe.typ)
}
kfd := pe.fd.Int()
if err := haveBPFLinkPerfEvent(); err == nil {
return attachPerfEventLink(pe, prog)
}
return attachPerfEventIoctl(pe, prog)
}
func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
if pe.cookie != 0 {
return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
}
// Assign the eBPF program to the perf event.
err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
if err != nil {
return fmt.Errorf("setting perf event bpf program: %w", err)
return nil, fmt.Errorf("setting perf event bpf program: %w", err)
}
// PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
return fmt.Errorf("enable perf event: %s", err)
if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
return nil, fmt.Errorf("enable perf event: %s", err)
}
pi := &perfEventIoctl{pe}
// Close the perf event when its reference is lost to avoid leaking system resources.
runtime.SetFinalizer(pe, (*perfEvent).Close)
return nil
runtime.SetFinalizer(pi, (*perfEventIoctl).Close)
return pi, nil
}
// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
//
// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) {
fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
ProgFd: uint32(prog.FD()),
TargetFd: pe.fd.Uint(),
AttachType: sys.BPF_PERF_EVENT,
BpfCookie: pe.cookie,
})
if err != nil {
return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
}
pl := &perfEventLink{RawLink{fd: fd}, pe}
// Close the perf event when its reference is lost to avoid leaking system resources.
runtime.SetFinalizer(pl, (*perfEventLink).Close)
return pl, nil
}
// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
@@ -201,8 +264,12 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) {
}
// getTraceEventID reads a trace event's ID from tracefs given its group and name.
// group and name must be alphanumeric or underscore, as required by the kernel.
// The kernel requires group and name to be alphanumeric or underscore.
//
// name automatically has its invalid symbols converted to underscores so the caller
// can pass a raw symbol name, e.g. a kernel symbol containing dots.
func getTraceEventID(group, name string) (uint64, error) {
name = sanitizeSymbol(name)
tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
@@ -268,3 +335,60 @@ func uint64FromFile(base string, path ...string) (uint64, error) {
et := bytes.TrimSpace(data)
return strconv.ParseUint(string(et), 10, 64)
}
// Probe BPF perf link.
//
// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Name: "probe_bpf_perf_link",
Type: ebpf.Kprobe,
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
},
License: "MIT",
})
if err != nil {
return err
}
defer prog.Close()
_, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
ProgFd: uint32(prog.FD()),
AttachType: sys.BPF_PERF_EVENT,
})
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if errors.Is(err, unix.EBADF) {
return nil
}
return err
})
// isValidTraceID implements the equivalent of a regex match
// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
//
// Trace event groups, names and kernel symbols must adhere to this set
// of characters. Non-empty, first character must not be a number, all
// characters must be alphanumeric or underscore.
func isValidTraceID(s string) bool {
if len(s) < 1 {
return false
}
for i, c := range []byte(s) {
switch {
case c >= 'a' && c <= 'z':
case c >= 'A' && c <= 'Z':
case c == '_':
case i > 0 && c >= '0' && c <= '9':
default:
return false
}
}
return true
}

View File

@@ -22,12 +22,12 @@ const (
IterType = sys.BPF_LINK_TYPE_ITER
NetNsType = sys.BPF_LINK_TYPE_NETNS
XDPType = sys.BPF_LINK_TYPE_XDP
PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
)
var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
AttachType: ebpf.AttachCGroupInetIngress,
License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),

View File

@@ -6,12 +6,22 @@ import (
"github.com/cilium/ebpf"
)
// TracepointOptions defines additional parameters that will be used
// when loading Tracepoints.
type TracepointOptions struct {
// Arbitrary value that can be fetched from an eBPF program
// via `bpf_get_attach_cookie()`.
//
// Needs kernel 5.15+.
Cookie uint64
}
// Tracepoint attaches the given eBPF program to the tracepoint with the given
// group and name. See /sys/kernel/debug/tracing/events to find available
// tracepoints. The top-level directory is the group, the event's subdirectory
// is the name. Example:
//
// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog)
// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil)
//
// Losing the reference to the resulting Link (tp) will close the Tracepoint
// and prevent further execution of prog. The Link must be Closed during
@@ -19,14 +29,14 @@ import (
//
// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
// only possible as of kernel 4.14 (commit cf5f5ce).
func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) {
func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
if group == "" || name == "" {
return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
}
if prog == nil {
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
}
if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) {
if !isValidTraceID(group) || !isValidTraceID(name) {
return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput)
}
if prog.Type() != ebpf.TracePoint {
@@ -43,18 +53,25 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) {
return nil, err
}
pe := &perfEvent{
fd: fd,
tracefsID: tid,
group: group,
name: name,
typ: tracepointEvent,
var cookie uint64
if opts != nil {
cookie = opts.Cookie
}
if err := pe.attach(prog); err != nil {
pe := &perfEvent{
typ: tracepointEvent,
group: group,
name: name,
tracefsID: tid,
cookie: cookie,
fd: fd,
}
lnk, err := attachPerfEvent(pe, prog)
if err != nil {
pe.Close()
return nil, err
}
return pe, nil
return lnk, nil
}

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal/sys"
)
@@ -61,7 +61,10 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (
}
target = targetProg.FD()
typeID = function.ID()
typeID, err = btfHandle.Spec().TypeID(function)
if err != nil {
return nil, err
}
}
link, err := AttachRawLink(RawLinkOptions{

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/cilium/ebpf"
@@ -16,10 +16,6 @@ import (
var (
uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
// rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol
// as they are not allowed to be used as the EVENT token in tracefs.
rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+")
uprobeRetprobeBit = struct {
once sync.Once
value uint64
@@ -70,6 +66,11 @@ type UprobeOptions struct {
// github.com/torvalds/linux/commit/1cc33161a83d
// github.com/torvalds/linux/commit/a6ca88b241d5
RefCtrOffset uint64
// Arbitrary value that can be fetched from an eBPF program
// via `bpf_get_attach_cookie()`.
//
// Needs kernel 5.15+.
Cookie uint64
}
// To open a new Executable, use:
@@ -197,13 +198,13 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
return nil, err
}
err = u.attach(prog)
lnk, err := attachPerfEvent(u, prog)
if err != nil {
u.Close()
return nil, err
}
return u, nil
return lnk, nil
}
// Uretprobe attaches the given eBPF program to a perf event that fires right
@@ -229,13 +230,13 @@ func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeO
return nil, err
}
err = u.attach(prog)
lnk, err := attachPerfEvent(u, prog)
if err != nil {
u.Close()
return nil, err
}
return u, nil
return lnk, nil
}
// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
@@ -278,6 +279,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
pid: pid,
refCtrOffset: opts.RefCtrOffset,
ret: ret,
cookie: opts.Cookie,
}
// Use uprobe PMU if the kernel has it available.
@@ -290,7 +292,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
}
// Use tracefs if uprobe PMU is missing.
args.symbol = uprobeSanitizedSymbol(symbol)
args.symbol = sanitizeSymbol(symbol)
tp, err = tracefsUprobe(args)
if err != nil {
return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
@@ -309,9 +311,29 @@ func tracefsUprobe(args probeArgs) (*perfEvent, error) {
return tracefsProbe(uprobeType, args)
}
// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore.
func uprobeSanitizedSymbol(symbol string) string {
return rgxUprobeSymbol.ReplaceAllString(symbol, "_")
// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore.
// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
func sanitizeSymbol(s string) string {
var b strings.Builder
b.Grow(len(s))
var skip bool
for _, c := range []byte(s) {
switch {
case c >= 'a' && c <= 'z',
c >= 'A' && c <= 'Z',
c >= '0' && c <= '9':
skip = false
b.WriteByte(c)
default:
if !skip {
b.WriteByte('_')
skip = true
}
}
}
return b.String()
}
// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.

View File

@@ -1,14 +1,43 @@
package ebpf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"sync"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/btf"
)
// splitSymbols splits insns into subsections delimited by Symbol Instructions.
// insns cannot be empty and must start with a Symbol Instruction.
//
// The resulting map is indexed by Symbol name.
func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) {
if len(insns) == 0 {
return nil, errors.New("insns is empty")
}
if insns[0].Symbol() == "" {
return nil, errors.New("insns must start with a Symbol")
}
var name string
progs := make(map[string]asm.Instructions)
for _, ins := range insns {
if sym := ins.Symbol(); sym != "" {
if progs[sym] != nil {
return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym)
}
name = sym
}
progs[name] = append(progs[name], ins)
}
return progs, nil
}
// The linker is responsible for resolving bpf-to-bpf calls between programs
// within an ELF. Each BPF program must be a self-contained binary blob,
// so when an instruction in one ELF program section wants to jump to
@@ -82,112 +111,116 @@ func findReferences(progs map[string]*ProgramSpec) error {
return nil
}
// marshalFuncInfos returns the BTF func infos of all progs in order.
func marshalFuncInfos(layout []reference) ([]byte, error) {
if len(layout) == 0 {
return nil, nil
// hasReferences returns true if insns contains one or more bpf2bpf
// function references.
func hasReferences(insns asm.Instructions) bool {
for _, i := range insns {
if i.IsFunctionReference() {
return true
}
}
return false
}
buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.FuncInfo{})*len(layout)))
for _, sym := range layout {
if err := sym.spec.BTF.FuncInfo.Marshal(buf, sym.offset); err != nil {
return nil, fmt.Errorf("marshaling prog %s func info: %w", sym.spec.Name, err)
}
}
return buf.Bytes(), nil
}
// marshalLineInfos returns the BTF line infos of all progs in order.
func marshalLineInfos(layout []reference) ([]byte, error) {
if len(layout) == 0 {
return nil, nil
}
buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.LineInfo{})*len(layout)))
for _, sym := range layout {
if err := sym.spec.BTF.LineInfos.Marshal(buf, sym.offset); err != nil {
return nil, fmt.Errorf("marshaling prog %s line infos: %w", sym.spec.Name, err)
}
}
return buf.Bytes(), nil
}
func fixupJumpsAndCalls(insns asm.Instructions) error {
symbolOffsets := make(map[string]asm.RawInstructionOffset)
// applyRelocations collects and applies any CO-RE relocations in insns.
//
// Passing a nil target will relocate against the running kernel. insns are
// modified in place.
func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error {
var relos []*btf.CORERelocation
var reloInsns []*asm.Instruction
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
if ins.Symbol == "" {
continue
if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil {
relos = append(relos, relo)
reloInsns = append(reloInsns, iter.Ins)
}
}
if _, ok := symbolOffsets[ins.Symbol]; ok {
return fmt.Errorf("duplicate symbol %s", ins.Symbol)
if len(relos) == 0 {
return nil
}
symbolOffsets[ins.Symbol] = iter.Offset
target, err := maybeLoadKernelBTF(target)
if err != nil {
return err
}
iter = insns.Iterate()
for iter.Next() {
i := iter.Index
offset := iter.Offset
ins := iter.Ins
if ins.Reference == "" {
continue
fixups, err := btf.CORERelocate(local, target, relos)
if err != nil {
return err
}
symOffset, ok := symbolOffsets[ins.Reference]
switch {
case ins.IsFunctionReference() && ins.Constant == -1:
if !ok {
break
}
ins.Constant = int64(symOffset - offset - 1)
continue
case ins.OpCode.Class().IsJump() && ins.Offset == -1:
if !ok {
break
}
ins.Offset = int16(symOffset - offset - 1)
continue
case ins.IsLoadFromMap() && ins.MapPtr() == -1:
return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedMap)
default:
// no fixup needed
continue
}
return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference, errUnsatisfiedProgram)
}
// fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels
// https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L6009
iter = insns.Iterate()
for iter.Next() {
ins := iter.Ins
if !ins.IsBuiltinCall() {
continue
}
switch asm.BuiltinFunc(ins.Constant) {
case asm.FnProbeReadKernel, asm.FnProbeReadUser:
if err := haveProbeReadKernel(); err != nil {
ins.Constant = int64(asm.FnProbeRead)
}
case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
if err := haveProbeReadKernel(); err != nil {
ins.Constant = int64(asm.FnProbeReadStr)
}
for i, fixup := range fixups {
if err := fixup.Apply(reloInsns[i]); err != nil {
return fmt.Errorf("apply fixup %s: %w", &fixup, err)
}
}
return nil
}
// fixupAndValidate is called by the ELF reader right before marshaling the
// instruction stream. It performs last-minute adjustments to the program and
// runs some sanity checks before sending it off to the kernel.
func fixupAndValidate(insns asm.Instructions) error {
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
// Map load was tagged with a Reference, but does not contain a Map pointer.
if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil {
return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference)
}
fixupProbeReadKernel(ins)
}
return nil
}
// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
// with bpf_probe_read(_str) on kernels that don't support it yet.
func fixupProbeReadKernel(ins *asm.Instruction) {
if !ins.IsBuiltinCall() {
return
}
// Kernel supports bpf_probe_read_kernel, nothing to do.
if haveProbeReadKernel() == nil {
return
}
switch asm.BuiltinFunc(ins.Constant) {
case asm.FnProbeReadKernel, asm.FnProbeReadUser:
ins.Constant = int64(asm.FnProbeRead)
case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
ins.Constant = int64(asm.FnProbeReadStr)
}
}
var kernelBTF struct {
sync.Mutex
spec *btf.Spec
}
// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise
// it returns spec unchanged.
//
// The kernel BTF is cached for the lifetime of the process.
func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) {
if spec != nil {
return spec, nil
}
kernelBTF.Lock()
defer kernelBTF.Unlock()
if kernelBTF.spec != nil {
return kernelBTF.spec, nil
}
var err error
kernelBTF.spec, err = btf.LoadKernelSpec()
return kernelBTF.spec, err
}

106
vendor/github.com/cilium/ebpf/map.go generated vendored
View File

@@ -8,12 +8,11 @@ import (
"math/rand"
"path/filepath"
"reflect"
"strings"
"time"
"unsafe"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -24,7 +23,8 @@ var (
ErrKeyNotExist = errors.New("key does not exist")
ErrKeyExist = errors.New("key already exists")
ErrIterationAborted = errors.New("iteration aborted")
ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map")
ErrMapIncompatible = errors.New("map spec is incompatible with existing map")
errMapNoBTFValue = errors.New("map spec does not contain a BTF Value")
)
// MapOptions control loading a map into the kernel.
@@ -76,8 +76,11 @@ type MapSpec struct {
// Must be nil or empty before instantiating the MapSpec into a Map.
Extra *bytes.Reader
// The key and value type of this map. May be nil.
Key, Value btf.Type
// The BTF associated with this map.
BTF *btf.Map
BTF *btf.Spec
}
func (ms *MapSpec) String() string {
@@ -125,6 +128,31 @@ func (ms *MapSpec) clampPerfEventArraySize() error {
return nil
}
// dataSection returns the contents and BTF Datasec descriptor of the spec.
func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) {
if ms.Value == nil {
return nil, nil, errMapNoBTFValue
}
ds, ok := ms.Value.(*btf.Datasec)
if !ok {
return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value)
}
if n := len(ms.Contents); n != 1 {
return nil, nil, fmt.Errorf("expected one key, found %d", n)
}
kv := ms.Contents[0]
value, ok := kv.Value.([]byte)
if !ok {
return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value)
}
return value, ds, nil
}
// MapKV is used to initialize the contents of a Map.
type MapKV struct {
Key interface{}
@@ -398,15 +426,25 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
}
if spec.hasBTF() {
handle, err := handles.btfHandle(spec.BTF.Spec)
handle, err := handles.btfHandle(spec.BTF)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
keyTypeID, err := spec.BTF.TypeID(spec.Key)
if err != nil {
return nil, err
}
valueTypeID, err := spec.BTF.TypeID(spec.Value)
if err != nil {
return nil, err
}
attr.BtfFd = uint32(handle.FD())
attr.BtfKeyTypeId = uint32(spec.BTF.Key.ID())
attr.BtfValueTypeId = uint32(spec.BTF.Value.ID())
attr.BtfKeyTypeId = uint32(keyTypeID)
attr.BtfValueTypeId = uint32(valueTypeID)
}
}
@@ -1269,60 +1307,6 @@ func marshalMap(m *Map, length int) ([]byte, error) {
return buf, nil
}
func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error {
replaced := make(map[string]bool)
replace := func(name string, offset, size int, replacement interface{}) error {
if offset+size > len(value) {
return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
}
buf, err := marshalBytes(replacement, size)
if err != nil {
return fmt.Errorf("marshal %s: %w", name, err)
}
copy(value[offset:offset+size], buf)
replaced[name] = true
return nil
}
switch parent := typ.(type) {
case *btf.Datasec:
for _, secinfo := range parent.Vars {
name := string(secinfo.Type.(*btf.Var).Name)
replacement, ok := replacements[name]
if !ok {
continue
}
err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement)
if err != nil {
return err
}
}
default:
return fmt.Errorf("patching %T is not supported", typ)
}
if len(replaced) == len(replacements) {
return nil
}
var missing []string
for name := range replacements {
if !replaced[name] {
missing = append(missing, name)
}
}
if len(missing) == 1 {
return fmt.Errorf("unknown field: %s", missing[0])
}
return fmt.Errorf("unknown fields: %s", strings.Join(missing, ","))
}
// MapIterator iterates a Map.
//
// See Map.Iterate.

View File

@@ -99,14 +99,7 @@ var bytesReaderPool = sync.Pool{
func unmarshalBytes(data interface{}, buf []byte) error {
switch value := data.(type) {
case unsafe.Pointer:
var dst []byte
// Use unsafe.Slice when we drop support for pre1.17 (https://github.com/golang/go/issues/19367)
// We could opt for removing unsafe.Pointer support in the lib as well
sh := (*reflect.SliceHeader)(unsafe.Pointer(&dst))
sh.Data = uintptr(value)
sh.Len = len(buf)
sh.Cap = len(buf)
dst := unsafe.Slice((*byte)(value), len(buf))
copy(dst, buf)
runtime.KeepAlive(value)
return nil

252
vendor/github.com/cilium/ebpf/prog.go generated vendored
View File

@@ -5,15 +5,15 @@ import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -21,9 +21,6 @@ import (
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
var ErrNotSupported = internal.ErrNotSupported
var errUnsatisfiedMap = errors.New("unsatisfied map reference")
var errUnsatisfiedProgram = errors.New("unsatisfied program reference")
// ProgramID represents the unique ID of an eBPF program.
type ProgramID uint32
@@ -46,12 +43,13 @@ type ProgramOptions struct {
// Controls the output buffer size for the verifier. Defaults to
// DefaultVerifierLogSize.
LogSize int
// An ELF containing the target BTF for this program. It is used both to
// find the correct function to trace and to apply CO-RE relocations.
// Type information used for CO-RE relocations and when attaching to
// kernel functions.
//
// This is useful in environments where the kernel BTF is not available
// (containers) or where it is in a non-standard location. Defaults to
// use the kernel BTF from a well-known location.
TargetBTF io.ReaderAt
// use the kernel BTF from a well-known location if nil.
KernelTypes *btf.Spec
}
// ProgramSpec defines a Program.
@@ -62,6 +60,11 @@ type ProgramSpec struct {
// Type determines at which hook in the kernel a program will run.
Type ProgramType
// AttachType of the program, needed to differentiate allowed context
// accesses in some newer program types like CGroupSockAddr.
//
// Available on kernels 4.17 and later.
AttachType AttachType
// Name of a kernel data structure or function to attach to. Its
@@ -95,7 +98,7 @@ type ProgramSpec struct {
// The BTF associated with this program. Changing Instructions
// will most likely invalidate the contained data, and may
// result in errors when attempting to load it into the kernel.
BTF *btf.Program
BTF *btf.Spec
// The byte order this program was compiled for, may be nil.
ByteOrder binary.ByteOrder
@@ -160,46 +163,6 @@ func (spec *ProgramSpec) flatten(visited map[*ProgramSpec]bool) (asm.Instruction
return insns, progs
}
// A reference describes a byte offset an Symbol Instruction pointing
// to another ProgramSpec.
type reference struct {
offset uint64
spec *ProgramSpec
}
// layout returns a unique list of programs that must be included
// in spec's instruction stream when inserting it into the kernel.
// Always returns spec itself as the first entry in the chain.
func (spec *ProgramSpec) layout() ([]reference, error) {
out := []reference{{0, spec}}
name := spec.Instructions.Name()
var ins *asm.Instruction
iter := spec.Instructions.Iterate()
for iter.Next() {
ins = iter.Ins
// Skip non-symbols and symbols that describe the ProgramSpec itself,
// which is usually the first instruction in Instructions.
// ProgramSpec itself is already included and not present in references.
if ins.Symbol == "" || ins.Symbol == name {
continue
}
// Failure to look up a reference is not an error. There are existing tests
// with valid progs that contain multiple symbols and don't have references
// populated. Assume ProgramSpec is used similarly in the wild, so don't
// alter this behaviour.
ref := spec.references[ins.Symbol]
if ref != nil {
out = append(out, reference{iter.Offset.Bytes(), ref})
}
}
return out, nil
}
// Program represents BPF program loaded into the kernel.
//
// It is not safe to close a Program which is used by other goroutines.
@@ -235,7 +198,7 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
defer handles.close()
prog, err := newProgramWithOptions(spec, opts, handles)
if errors.Is(err, errUnsatisfiedMap) {
if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
}
return prog, err
@@ -279,29 +242,18 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
attr.ProgName = sys.NewObjName(spec.Name)
}
var err error
var targetBTF *btf.Spec
if opts.TargetBTF != nil {
targetBTF, err = handles.btfSpec(opts.TargetBTF)
if err != nil {
return nil, fmt.Errorf("load target BTF: %w", err)
}
}
kernelTypes := opts.KernelTypes
layout, err := spec.layout()
if err != nil {
return nil, fmt.Errorf("get program layout: %w", err)
}
insns := make(asm.Instructions, len(spec.Instructions))
copy(insns, spec.Instructions)
var btfDisabled bool
var core btf.COREFixups
if spec.BTF != nil {
core, err = spec.BTF.Fixups(targetBTF)
if err != nil {
return nil, fmt.Errorf("CO-RE relocations: %w", err)
if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil {
return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
}
handle, err := handles.btfHandle(spec.BTF.Spec())
handle, err := handles.btfHandle(spec.BTF)
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
@@ -310,35 +262,27 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
if handle != nil {
attr.ProgBtfFd = uint32(handle.FD())
fib, err := marshalFuncInfos(layout)
fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID)
if err != nil {
return nil, err
}
attr.FuncInfoRecSize = uint32(binary.Size(btf.FuncInfo{}))
attr.FuncInfoCnt = uint32(len(fib)) / attr.FuncInfoRecSize
attr.FuncInfoRecSize = btf.FuncInfoSize
attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
attr.FuncInfo = sys.NewSlicePointer(fib)
lib, err := marshalLineInfos(layout)
if err != nil {
return nil, err
}
attr.LineInfoRecSize = uint32(binary.Size(btf.LineInfo{}))
attr.LineInfoCnt = uint32(len(lib)) / attr.LineInfoRecSize
attr.LineInfoRecSize = btf.LineInfoSize
attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
attr.LineInfo = sys.NewSlicePointer(lib)
}
}
insns, err := core.Apply(spec.Instructions)
if err != nil {
return nil, fmt.Errorf("CO-RE fixup: %w", err)
}
if err := fixupJumpsAndCalls(insns); err != nil {
if err := fixupAndValidate(insns); err != nil {
return nil, err
}
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
err = insns.Marshal(buf, internal.NativeEndian)
err := insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
@@ -347,39 +291,24 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
attr.Insns = sys.NewSlicePointer(bytecode)
attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize)
if spec.AttachTo != "" {
if spec.AttachTarget != nil {
info, err := spec.AttachTarget.Info()
targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType)
if err != nil {
return nil, fmt.Errorf("load target BTF: %w", err)
return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
btfID, ok := info.BTFID()
if !ok {
return nil, fmt.Errorf("load target BTF: no BTF info available")
}
btfHandle, err := btf.NewHandleFromID(btfID)
if err != nil {
return nil, fmt.Errorf("load target BTF: %w", err)
}
defer btfHandle.Close()
targetBTF = btfHandle.Spec()
if err != nil {
return nil, fmt.Errorf("load target BTF: %w", err)
}
}
target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType)
if err != nil {
return nil, err
}
if target != nil {
attr.AttachBtfId = uint32(target.ID())
}
if spec.AttachTarget != nil {
attr.AttachBtfId = uint32(targetID)
attr.AttachProgFd = uint32(spec.AttachTarget.FD())
defer runtime.KeepAlive(spec.AttachTarget)
} else if spec.AttachTo != "" {
targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType)
if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
// We ignore errUnrecognizedAttachType since AttachTo may be non-empty
// for programs that don't attach anywhere.
return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
attr.AttachBtfId = uint32(targetID)
}
logSize := DefaultVerifierLogSize
@@ -414,6 +343,12 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
}
}
if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && hasReferences(spec.Instructions) {
if err := haveBPFToBPFCalls(); err != nil {
return nil, fmt.Errorf("load program: %w", internal.ErrorWithLog(err, logBuf, logErr))
}
}
if errors.Is(logErr, unix.EPERM) && len(logBuf) > 0 && logBuf[0] == 0 {
// EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
// check that the log is empty to reduce false positives.
@@ -587,6 +522,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
prog, err := NewProgram(&ProgramSpec{
// SocketFilter does not require privileges on newer kernels.
Type: SocketFilter,
Instructions: asm.Instructions{
asm.LoadImm(asm.R0, 0, asm.DWord),
@@ -609,15 +545,23 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
}
err = sys.ProgRun(&attr)
if errors.Is(err, unix.EINVAL) {
switch {
case errors.Is(err, unix.EINVAL):
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return internal.ErrNotSupported
}
if errors.Is(err, unix.EINTR) {
case errors.Is(err, unix.EINTR):
// We know that PROG_TEST_RUN is supported if we get EINTR.
return nil
case errors.Is(err, unix.ENOTSUPP):
// The first PROG_TEST_RUN patches shipped in 4.12 didn't include
// a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is
// supported, but not for the program type used in the probe.
return nil
}
return err
})
@@ -667,6 +611,10 @@ func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte,
continue
}
if errors.Is(err, unix.ENOTSUPP) {
return 0, nil, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported)
}
return 0, nil, 0, fmt.Errorf("can't run test: %w", err)
}
@@ -810,7 +758,15 @@ func (p *Program) BindMap(m *Map) error {
return sys.ProgBindMap(attr)
}
func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
var errUnrecognizedAttachType = errors.New("unrecognized attach type")
// find an attach target type in the kernel.
//
// spec may be nil and defaults to the canonical kernel BTF. name together with
// progType and attachType determine which type we need to attach to.
//
// Returns errUnrecognizedAttachType.
func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
type match struct {
p ProgramType
a AttachType
@@ -828,9 +784,6 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp
case match{Tracing, AttachTraceIter}:
typeName = "bpf_iter_" + name
featureName = name + " iterator"
case match{Extension, AttachNone}:
typeName = name
featureName = fmt.Sprintf("freplace %s", name)
case match{Tracing, AttachTraceFEntry}:
typeName = name
featureName = fmt.Sprintf("fentry %s", name)
@@ -845,20 +798,15 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp
featureName = fmt.Sprintf("raw_tp %s", name)
isBTFTypeFunc = false
default:
return nil, nil
return 0, errUnrecognizedAttachType
}
var (
target btf.Type
err error
)
if spec == nil {
spec, err = btf.LoadKernelSpec()
spec, err := maybeLoadKernelBTF(spec)
if err != nil {
return nil, fmt.Errorf("load kernel spec: %w", err)
}
return 0, fmt.Errorf("load kernel spec: %w", err)
}
var target btf.Type
if isBTFTypeFunc {
var targetFunc *btf.Func
err = spec.TypeByName(typeName, &targetFunc)
@@ -871,12 +819,56 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp
if err != nil {
if errors.Is(err, btf.ErrNotFound) {
return nil, &internal.UnsupportedFeatureError{
return 0, &internal.UnsupportedFeatureError{
Name: featureName,
}
}
return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err)
return 0, fmt.Errorf("find target for %s: %w", featureName, err)
}
return target, nil
return spec.TypeID(target)
}
// find an attach target type in a program.
//
// Returns errUnrecognizedAttachType.
func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
type match struct {
p ProgramType
a AttachType
}
var typeName string
switch (match{progType, attachType}) {
case match{Extension, AttachNone}:
typeName = name
default:
return 0, errUnrecognizedAttachType
}
info, err := prog.Info()
if err != nil {
return 0, fmt.Errorf("load target BTF: %w", err)
}
btfID, ok := info.BTFID()
if !ok {
return 0, fmt.Errorf("load target BTF: no BTF info available")
}
btfHandle, err := btf.NewHandleFromID(btfID)
if err != nil {
return 0, fmt.Errorf("load target BTF: %w", err)
}
defer btfHandle.Close()
spec := btfHandle.Spec()
var targetFunc *btf.Func
err = spec.TypeByName(typeName, &targetFunc)
if err != nil {
return 0, fmt.Errorf("find target %s: %w", typeName, err)
}
return spec.TypeID(targetFunc)
}

View File

@@ -48,6 +48,7 @@ if [[ "${1:-}" = "--exec-vm" ]]; then
rm "${output}/fake-stdin"
fi
for ((i = 0; i < 3; i++)); do
if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \
--rwdir="${testdir}=${testdir}" \
--rodir=/run/input="${input}" \
@@ -57,12 +58,21 @@ if [[ "${1:-}" = "--exec-vm" ]]; then
exit 23
fi
if [[ ! -e "${output}/success" ]]; then
exit 42
if [[ -e "${output}/status" ]]; then
break
fi
if [[ -v CI ]]; then
echo "Retrying test run due to qemu crash"
continue
fi
exit 42
done
rc=$(<"${output}/status")
$sudo rm -r "$output"
exit 0
exit $rc
elif [[ "${1:-}" = "--exec-test" ]]; then
shift
@@ -73,13 +83,12 @@ elif [[ "${1:-}" = "--exec-test" ]]; then
export KERNEL_SELFTESTS="/run/input/bpf"
fi
dmesg -C
if ! "$@"; then
dmesg --clear
rc=0
"$@" || rc=$?
dmesg
exit 1 # this return code is "swallowed" by qemu
fi
touch "/run/output/success"
exit 0
echo $rc > "/run/output/status"
exit $rc # this return code is "swallowed" by qemu
fi
readonly kernel_version="${1:-}"

View File

@@ -38,6 +38,21 @@ func invalidBPFObjNameChar(char rune) bool {
}
}
func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) {
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
return nil, err
}
bytecode := buf.Bytes()
return sys.ProgLoad(&sys.ProgLoadAttr{
ProgType: sys.ProgType(typ),
License: sys.NewStringPointer(license),
Insns: sys.NewSlicePointer(bytecode),
InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
})
}
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
_, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(ArrayOfMaps),
@@ -226,21 +241,30 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f
asm.FnProbeReadKernel.Call(),
asm.Return(),
}
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
return err
}
bytecode := buf.Bytes()
fd, err := sys.ProgLoad(&sys.ProgLoadAttr{
ProgType: sys.ProgType(Kprobe),
License: sys.NewStringPointer("GPL"),
Insns: sys.NewSlicePointer(bytecode),
InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
})
fd, err := progLoad(insns, Kprobe, "GPL")
if err != nil {
return internal.ErrNotSupported
}
_ = fd.Close()
return nil
})
var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error {
insns := asm.Instructions{
asm.Call.Label("prog2").WithSymbol("prog1"),
asm.Return(),
asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"),
asm.Return(),
}
fd, err := progLoad(insns, SocketFilter, "MIT")
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if err != nil {
return err
}
_ = fd.Close()
return nil
})

6
vendor/modules.txt vendored
View File

@@ -2,12 +2,12 @@
## explicit; go 1.13
github.com/checkpoint-restore/go-criu/v5
github.com/checkpoint-restore/go-criu/v5/rpc
# github.com/cilium/ebpf v0.8.1
## explicit; go 1.16
# github.com/cilium/ebpf v0.9.0
## explicit; go 1.17
github.com/cilium/ebpf
github.com/cilium/ebpf/asm
github.com/cilium/ebpf/btf
github.com/cilium/ebpf/internal
github.com/cilium/ebpf/internal/btf
github.com/cilium/ebpf/internal/sys
github.com/cilium/ebpf/internal/unix
github.com/cilium/ebpf/link