mirror of
https://github.com/opencontainers/runc.git
synced 2025-11-02 03:52:38 +08:00
build(deps): bump github.com/cilium/ebpf from 0.5.0 to 0.6.0
Bumps [github.com/cilium/ebpf](https://github.com/cilium/ebpf) from 0.5.0 to 0.6.0. - [Release notes](https://github.com/cilium/ebpf/releases) - [Commits](https://github.com/cilium/ebpf/compare/v0.5.0...v0.6.0) --- updated-dependencies: - dependency-name: github.com/cilium/ebpf dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
2
go.mod
2
go.mod
@@ -4,7 +4,7 @@ go 1.13
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0
|
||||||
github.com/cilium/ebpf v0.5.0
|
github.com/cilium/ebpf v0.6.0
|
||||||
github.com/containerd/console v1.0.2
|
github.com/containerd/console v1.0.2
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2
|
github.com/coreos/go-systemd/v22 v22.3.2
|
||||||
github.com/cyphar/filepath-securejoin v0.2.2
|
github.com/cyphar/filepath-securejoin v0.2.2
|
||||||
|
|||||||
4
go.sum
4
go.sum
@@ -1,8 +1,8 @@
|
|||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0 h1:TW8f/UvntYoVDMN1K2HlT82qH1rb0sOjpGw3m6Ym+i4=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0 h1:TW8f/UvntYoVDMN1K2HlT82qH1rb0sOjpGw3m6Ym+i4=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||||
github.com/cilium/ebpf v0.5.0 h1:E1KshmrMEtkMP2UjlWzfmUV1owWY+BnbL5FxxuatnrU=
|
github.com/cilium/ebpf v0.6.0 h1:hOQqNhQdMIi0zmjii4jKUnI0i+NB7ApvTXs2MstI5S0=
|
||||||
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.6.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
|
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
|
||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||||
|
|||||||
7
vendor/github.com/cilium/ebpf/Makefile
generated
vendored
7
vendor/github.com/cilium/ebpf/Makefile
generated
vendored
@@ -1,7 +1,7 @@
|
|||||||
# The development version of clang is distributed as the 'clang' binary,
|
# The development version of clang is distributed as the 'clang' binary,
|
||||||
# while stable/released versions have a version number attached.
|
# while stable/released versions have a version number attached.
|
||||||
# Pin the default clang to a stable version.
|
# Pin the default clang to a stable version.
|
||||||
CLANG ?= clang-11
|
CLANG ?= clang-12
|
||||||
CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
|
CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
|
||||||
|
|
||||||
# Obtain an absolute path to the directory of the Makefile.
|
# Obtain an absolute path to the directory of the Makefile.
|
||||||
@@ -17,7 +17,7 @@ VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
|
|||||||
TARGETS := \
|
TARGETS := \
|
||||||
testdata/loader-clang-7 \
|
testdata/loader-clang-7 \
|
||||||
testdata/loader-clang-9 \
|
testdata/loader-clang-9 \
|
||||||
testdata/loader-clang-11 \
|
testdata/loader-$(CLANG) \
|
||||||
testdata/invalid_map \
|
testdata/invalid_map \
|
||||||
testdata/raw_tracepoint \
|
testdata/raw_tracepoint \
|
||||||
testdata/invalid_map_static \
|
testdata/invalid_map_static \
|
||||||
@@ -33,6 +33,7 @@ TARGETS := \
|
|||||||
docker-all:
|
docker-all:
|
||||||
docker run --rm --user "${UIDGID}" \
|
docker run --rm --user "${UIDGID}" \
|
||||||
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
|
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
|
||||||
|
--env CFLAGS="-fdebug-prefix-map=/ebpf=." \
|
||||||
"${IMAGE}:${VERSION}" \
|
"${IMAGE}:${VERSION}" \
|
||||||
make all
|
make all
|
||||||
|
|
||||||
@@ -47,6 +48,8 @@ clean:
|
|||||||
-$(RM) internal/btf/testdata/*.elf
|
-$(RM) internal/btf/testdata/*.elf
|
||||||
|
|
||||||
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
|
all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
|
||||||
|
ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
|
||||||
|
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
|
||||||
|
|
||||||
testdata/loader-%-el.elf: testdata/loader.c
|
testdata/loader-%-el.elf: testdata/loader.c
|
||||||
$* $(CFLAGS) -mlittle-endian -c $< -o $@
|
$* $(CFLAGS) -mlittle-endian -c $< -o $@
|
||||||
|
|||||||
32
vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
32
vendor/github.com/cilium/ebpf/asm/instruction.go
generated
vendored
@@ -57,7 +57,7 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err
|
|||||||
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
return 0, fmt.Errorf("can't unmarshal registers: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bi.OpCode.isDWordLoad() {
|
if !bi.OpCode.IsDWordLoad() {
|
||||||
return InstructionSize, nil
|
return InstructionSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||||||
return 0, errors.New("invalid opcode")
|
return 0, errors.New("invalid opcode")
|
||||||
}
|
}
|
||||||
|
|
||||||
isDWordLoad := ins.OpCode.isDWordLoad()
|
isDWordLoad := ins.OpCode.IsDWordLoad()
|
||||||
|
|
||||||
cons := int32(ins.Constant)
|
cons := int32(ins.Constant)
|
||||||
if isDWordLoad {
|
if isDWordLoad {
|
||||||
@@ -123,7 +123,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
|
|||||||
//
|
//
|
||||||
// Returns an error if the instruction doesn't load a map.
|
// Returns an error if the instruction doesn't load a map.
|
||||||
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
func (ins *Instruction) RewriteMapPtr(fd int) error {
|
||||||
if !ins.OpCode.isDWordLoad() {
|
if !ins.OpCode.IsDWordLoad() {
|
||||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -138,15 +138,19 @@ func (ins *Instruction) RewriteMapPtr(fd int) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ins *Instruction) mapPtr() uint32 {
|
// MapPtr returns the map fd for this instruction.
|
||||||
return uint32(uint64(ins.Constant) & math.MaxUint32)
|
//
|
||||||
|
// The result is undefined if the instruction is not a load from a map,
|
||||||
|
// see IsLoadFromMap.
|
||||||
|
func (ins *Instruction) MapPtr() int {
|
||||||
|
return int(int32(uint64(ins.Constant) & math.MaxUint32))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RewriteMapOffset changes the offset of a direct load from a map.
|
// RewriteMapOffset changes the offset of a direct load from a map.
|
||||||
//
|
//
|
||||||
// Returns an error if the instruction is not a direct load.
|
// Returns an error if the instruction is not a direct load.
|
||||||
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
|
||||||
if !ins.OpCode.isDWordLoad() {
|
if !ins.OpCode.IsDWordLoad() {
|
||||||
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,10 +167,10 @@ func (ins *Instruction) mapOffset() uint32 {
|
|||||||
return uint32(uint64(ins.Constant) >> 32)
|
return uint32(uint64(ins.Constant) >> 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isLoadFromMap returns true if the instruction loads from a map.
|
// IsLoadFromMap returns true if the instruction loads from a map.
|
||||||
//
|
//
|
||||||
// This covers both loading the map pointer and direct map value loads.
|
// This covers both loading the map pointer and direct map value loads.
|
||||||
func (ins *Instruction) isLoadFromMap() bool {
|
func (ins *Instruction) IsLoadFromMap() bool {
|
||||||
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
|
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,6 +181,12 @@ func (ins *Instruction) IsFunctionCall() bool {
|
|||||||
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
|
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsConstantLoad returns true if the instruction loads a constant of the
|
||||||
|
// given size.
|
||||||
|
func (ins *Instruction) IsConstantLoad(size Size) bool {
|
||||||
|
return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
|
||||||
|
}
|
||||||
|
|
||||||
// Format implements fmt.Formatter.
|
// Format implements fmt.Formatter.
|
||||||
func (ins Instruction) Format(f fmt.State, c rune) {
|
func (ins Instruction) Format(f fmt.State, c rune) {
|
||||||
if c != 'v' {
|
if c != 'v' {
|
||||||
@@ -197,8 +207,8 @@ func (ins Instruction) Format(f fmt.State, c rune) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if ins.isLoadFromMap() {
|
if ins.IsLoadFromMap() {
|
||||||
fd := int32(ins.mapPtr())
|
fd := ins.MapPtr()
|
||||||
switch ins.Src {
|
switch ins.Src {
|
||||||
case PseudoMapFD:
|
case PseudoMapFD:
|
||||||
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
|
fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
|
||||||
@@ -403,7 +413,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
|
|||||||
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
|
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
|
||||||
h := sha1.New()
|
h := sha1.New()
|
||||||
for i, ins := range insns {
|
for i, ins := range insns {
|
||||||
if ins.isLoadFromMap() {
|
if ins.IsLoadFromMap() {
|
||||||
ins.Constant = 0
|
ins.Constant = 0
|
||||||
}
|
}
|
||||||
_, err := ins.Marshal(h, bo)
|
_, err := ins.Marshal(h, bo)
|
||||||
|
|||||||
2
vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
2
vendor/github.com/cilium/ebpf/asm/load_store.go
generated
vendored
@@ -111,7 +111,7 @@ func LoadMapPtr(dst Register, fd int) Instruction {
|
|||||||
OpCode: LoadImmOp(DWord),
|
OpCode: LoadImmOp(DWord),
|
||||||
Dst: dst,
|
Dst: dst,
|
||||||
Src: PseudoMapFD,
|
Src: PseudoMapFD,
|
||||||
Constant: int64(fd),
|
Constant: int64(uint32(fd)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
4
vendor/github.com/cilium/ebpf/asm/opcode.go
generated
vendored
@@ -69,13 +69,13 @@ const InvalidOpCode OpCode = 0xff
|
|||||||
// rawInstructions returns the number of BPF instructions required
|
// rawInstructions returns the number of BPF instructions required
|
||||||
// to encode this opcode.
|
// to encode this opcode.
|
||||||
func (op OpCode) rawInstructions() int {
|
func (op OpCode) rawInstructions() int {
|
||||||
if op.isDWordLoad() {
|
if op.IsDWordLoad() {
|
||||||
return 2
|
return 2
|
||||||
}
|
}
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (op OpCode) isDWordLoad() bool {
|
func (op OpCode) IsDWordLoad() bool {
|
||||||
return op == LoadImmOp(DWord)
|
return op == LoadImmOp(DWord)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
59
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
59
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
@@ -3,6 +3,7 @@ package ebpf
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -89,8 +90,8 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
|
|||||||
//
|
//
|
||||||
// The constant must be defined like so in the C program:
|
// The constant must be defined like so in the C program:
|
||||||
//
|
//
|
||||||
// static volatile const type foobar;
|
// volatile const type foobar;
|
||||||
// static volatile const type foobar = default;
|
// volatile const type foobar = default;
|
||||||
//
|
//
|
||||||
// Replacement values must be of the same length as the C sizeof(type).
|
// Replacement values must be of the same length as the C sizeof(type).
|
||||||
// If necessary, they are marshalled according to the same rules as
|
// If necessary, they are marshalled according to the same rules as
|
||||||
@@ -269,11 +270,21 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type btfHandleCache map[*btf.Spec]*btf.Handle
|
type handleCache struct {
|
||||||
|
btfHandles map[*btf.Spec]*btf.Handle
|
||||||
|
btfSpecs map[io.ReaderAt]*btf.Spec
|
||||||
|
}
|
||||||
|
|
||||||
func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
|
func newHandleCache() *handleCache {
|
||||||
if btfs[spec] != nil {
|
return &handleCache{
|
||||||
return btfs[spec], nil
|
btfHandles: make(map[*btf.Spec]*btf.Handle),
|
||||||
|
btfSpecs: make(map[io.ReaderAt]*btf.Spec),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
|
||||||
|
if hc.btfHandles[spec] != nil {
|
||||||
|
return hc.btfHandles[spec], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
handle, err := btf.NewHandle(spec)
|
handle, err := btf.NewHandle(spec)
|
||||||
@@ -281,14 +292,30 @@ func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
btfs[spec] = handle
|
hc.btfHandles[spec] = handle
|
||||||
return handle, nil
|
return handle, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (btfs btfHandleCache) close() {
|
func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) {
|
||||||
for _, handle := range btfs {
|
if hc.btfSpecs[rd] != nil {
|
||||||
|
return hc.btfSpecs[rd], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
spec, err := btf.LoadSpecFromReader(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.btfSpecs[rd] = spec
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc handleCache) close() {
|
||||||
|
for _, handle := range hc.btfHandles {
|
||||||
handle.Close()
|
handle.Close()
|
||||||
}
|
}
|
||||||
|
hc.btfHandles = nil
|
||||||
|
hc.btfSpecs = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
||||||
@@ -300,12 +327,12 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
|||||||
var (
|
var (
|
||||||
maps = make(map[string]*Map)
|
maps = make(map[string]*Map)
|
||||||
progs = make(map[string]*Program)
|
progs = make(map[string]*Program)
|
||||||
btfs = make(btfHandleCache)
|
handles = newHandleCache()
|
||||||
skipMapsAndProgs = false
|
skipMapsAndProgs = false
|
||||||
)
|
)
|
||||||
|
|
||||||
cleanup = func() {
|
cleanup = func() {
|
||||||
btfs.close()
|
handles.close()
|
||||||
|
|
||||||
if skipMapsAndProgs {
|
if skipMapsAndProgs {
|
||||||
return
|
return
|
||||||
@@ -335,7 +362,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
|||||||
return nil, fmt.Errorf("missing map %s", mapName)
|
return nil, fmt.Errorf("missing map %s", mapName)
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := newMapWithOptions(mapSpec, opts.Maps, btfs)
|
m, err := newMapWithOptions(mapSpec, opts.Maps, handles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
return nil, fmt.Errorf("map %s: %w", mapName, err)
|
||||||
}
|
}
|
||||||
@@ -360,7 +387,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
|||||||
for i := range progSpec.Instructions {
|
for i := range progSpec.Instructions {
|
||||||
ins := &progSpec.Instructions[i]
|
ins := &progSpec.Instructions[i]
|
||||||
|
|
||||||
if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" {
|
if !ins.IsLoadFromMap() || ins.Reference == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -372,7 +399,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
|||||||
|
|
||||||
m, err := loadMap(ins.Reference)
|
m, err := loadMap(ins.Reference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("program %s: %s", progName, err)
|
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fd := m.FD()
|
fd := m.FD()
|
||||||
@@ -384,7 +411,7 @@ func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs)
|
prog, err := newProgramWithOptions(progSpec, opts.Programs, handles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("program %s: %w", progName, err)
|
return nil, fmt.Errorf("program %s: %w", progName, err)
|
||||||
}
|
}
|
||||||
@@ -534,7 +561,7 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("field %s: %s", field.Name, err)
|
return fmt.Errorf("field %s: %w", field.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
47
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
47
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
@@ -96,7 +96,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
btfSpec, err := btf.LoadSpecFromReader(rd)
|
btfSpec, err := btf.LoadSpecFromReader(rd)
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, btf.ErrNotFound) {
|
||||||
return nil, fmt.Errorf("load BTF: %w", err)
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,7 +159,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if target.Flags&elf.SHF_STRINGS > 0 {
|
if target.Flags&elf.SHF_STRINGS > 0 {
|
||||||
return nil, fmt.Errorf("section %q: string %q is not stack allocated: %w", section.Name, rel.Name, ErrNotSupported)
|
return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
|
||||||
}
|
}
|
||||||
|
|
||||||
target.references++
|
target.references++
|
||||||
@@ -374,17 +374,25 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
case dataSection:
|
case dataSection:
|
||||||
|
var offset uint32
|
||||||
switch typ {
|
switch typ {
|
||||||
case elf.STT_SECTION:
|
case elf.STT_SECTION:
|
||||||
if bind != elf.STB_LOCAL {
|
if bind != elf.STB_LOCAL {
|
||||||
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is really a reference to a static symbol, which clang doesn't
|
||||||
|
// emit a symbol table entry for. Instead it encodes the offset in
|
||||||
|
// the instruction itself.
|
||||||
|
offset = uint32(uint64(ins.Constant))
|
||||||
|
|
||||||
case elf.STT_OBJECT:
|
case elf.STT_OBJECT:
|
||||||
if bind != elf.STB_GLOBAL {
|
if bind != elf.STB_GLOBAL {
|
||||||
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offset = uint32(rel.Value)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
|
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
|
||||||
}
|
}
|
||||||
@@ -394,10 +402,8 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
|||||||
// it's not clear how to encode that into Instruction.
|
// it's not clear how to encode that into Instruction.
|
||||||
name = target.Name
|
name = target.Name
|
||||||
|
|
||||||
// For some reason, clang encodes the offset of the symbol its
|
// The kernel expects the offset in the second basic BPF instruction.
|
||||||
// section in the first basic BPF instruction, while the kernel
|
ins.Constant = int64(uint64(offset) << 32)
|
||||||
// expects it in the second one.
|
|
||||||
ins.Constant <<= 32
|
|
||||||
ins.Src = asm.PseudoMapValue
|
ins.Src = asm.PseudoMapValue
|
||||||
|
|
||||||
// Mark the instruction as needing an update when creating the
|
// Mark the instruction as needing an update when creating the
|
||||||
@@ -491,33 +497,38 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
|
|||||||
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
if maps[mapSym.Name] != nil {
|
mapName := mapSym.Name
|
||||||
|
if maps[mapName] != nil {
|
||||||
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
|
||||||
}
|
}
|
||||||
|
|
||||||
lr := io.LimitReader(r, int64(size))
|
lr := io.LimitReader(r, int64(size))
|
||||||
|
|
||||||
spec := MapSpec{
|
spec := MapSpec{
|
||||||
Name: SanitizeName(mapSym.Name, -1),
|
Name: SanitizeName(mapName, -1),
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
|
||||||
return fmt.Errorf("map %v: missing type", mapSym)
|
return fmt.Errorf("map %s: missing type", mapName)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
|
||||||
return fmt.Errorf("map %v: missing key size", mapSym)
|
return fmt.Errorf("map %s: missing key size", mapName)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
|
||||||
return fmt.Errorf("map %v: missing value size", mapSym)
|
return fmt.Errorf("map %s: missing value size", mapName)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
|
||||||
return fmt.Errorf("map %v: missing max entries", mapSym)
|
return fmt.Errorf("map %s: missing max entries", mapName)
|
||||||
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
|
||||||
return fmt.Errorf("map %v: missing flags", mapSym)
|
return fmt.Errorf("map %s: missing flags", mapName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
|
||||||
return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
|
return fmt.Errorf("map %s: unknown and non-zero fields in definition", mapName)
|
||||||
}
|
}
|
||||||
|
|
||||||
maps[mapSym.Name] = &spec
|
if err := spec.clampPerfEventArraySize(); err != nil {
|
||||||
|
return fmt.Errorf("map %s: %w", mapName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maps[mapName] = &spec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -565,6 +576,10 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
|
|||||||
return fmt.Errorf("map %v: %w", name, err)
|
return fmt.Errorf("map %v: %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := mapSpec.clampPerfEventArraySize(); err != nil {
|
||||||
|
return fmt.Errorf("map %v: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
maps[name] = mapSpec
|
maps[name] = mapSpec
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -847,6 +862,8 @@ func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
|
|||||||
"uretprobe/": {Kprobe, AttachNone, 0},
|
"uretprobe/": {Kprobe, AttachNone, 0},
|
||||||
"tracepoint/": {TracePoint, AttachNone, 0},
|
"tracepoint/": {TracePoint, AttachNone, 0},
|
||||||
"raw_tracepoint/": {RawTracepoint, AttachNone, 0},
|
"raw_tracepoint/": {RawTracepoint, AttachNone, 0},
|
||||||
|
"raw_tp/": {RawTracepoint, AttachNone, 0},
|
||||||
|
"tp_btf/": {Tracing, AttachTraceRawTp, 0},
|
||||||
"xdp": {XDP, AttachNone, 0},
|
"xdp": {XDP, AttachNone, 0},
|
||||||
"perf_event": {PerfEvent, AttachNone, 0},
|
"perf_event": {PerfEvent, AttachNone, 0},
|
||||||
"lwt_in": {LWTIn, AttachNone, 0},
|
"lwt_in": {LWTIn, AttachNone, 0},
|
||||||
|
|||||||
28
vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
28
vendor/github.com/cilium/ebpf/internal/btf/btf.go
generated
vendored
@@ -35,7 +35,7 @@ type Spec struct {
|
|||||||
namedTypes map[string][]namedType
|
namedTypes map[string][]namedType
|
||||||
funcInfos map[string]extInfo
|
funcInfos map[string]extInfo
|
||||||
lineInfos map[string]extInfo
|
lineInfos map[string]extInfo
|
||||||
coreRelos map[string]bpfCoreRelos
|
coreRelos map[string]coreRelos
|
||||||
byteOrder binary.ByteOrder
|
byteOrder binary.ByteOrder
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ type btfHeader struct {
|
|||||||
|
|
||||||
// LoadSpecFromReader reads BTF sections from an ELF.
|
// LoadSpecFromReader reads BTF sections from an ELF.
|
||||||
//
|
//
|
||||||
// Returns a nil Spec and no error if no BTF was present.
|
// Returns ErrNotFound if the reader contains no BTF.
|
||||||
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
||||||
file, err := internal.NewSafeELFFile(rd)
|
file, err := internal.NewSafeELFFile(rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -67,7 +67,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if btfSection == nil {
|
if btfSection == nil {
|
||||||
return nil, nil
|
return nil, fmt.Errorf("btf: %w", ErrNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
symbols, err := file.Symbols()
|
symbols, err := file.Symbols()
|
||||||
@@ -438,13 +438,13 @@ func (s *Spec) Program(name string, length uint64) (*Program, error) {
|
|||||||
|
|
||||||
funcInfos, funcOK := s.funcInfos[name]
|
funcInfos, funcOK := s.funcInfos[name]
|
||||||
lineInfos, lineOK := s.lineInfos[name]
|
lineInfos, lineOK := s.lineInfos[name]
|
||||||
coreRelos, coreOK := s.coreRelos[name]
|
relos, coreOK := s.coreRelos[name]
|
||||||
|
|
||||||
if !funcOK && !lineOK && !coreOK {
|
if !funcOK && !lineOK && !coreOK {
|
||||||
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
return nil, fmt.Errorf("no extended BTF info for section %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
|
return &Program{s, length, funcInfos, lineInfos, relos}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Datasec returns the BTF required to create maps which represent data sections.
|
// Datasec returns the BTF required to create maps which represent data sections.
|
||||||
@@ -491,7 +491,8 @@ func (s *Spec) FindType(name string, typ Type) error {
|
|||||||
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
return fmt.Errorf("type %s: %w", name, ErrNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
|
cpy, _ := copyType(candidate, nil)
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(cpy))
|
||||||
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
|
reflect.Indirect(reflect.ValueOf(typ)).Set(value)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -606,7 +607,7 @@ type Program struct {
|
|||||||
spec *Spec
|
spec *Spec
|
||||||
length uint64
|
length uint64
|
||||||
funcInfos, lineInfos extInfo
|
funcInfos, lineInfos extInfo
|
||||||
coreRelos bpfCoreRelos
|
coreRelos coreRelos
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
|
// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
|
||||||
@@ -665,16 +666,23 @@ func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
|
|||||||
return s.lineInfos.recordSize, bytes, nil
|
return s.lineInfos.recordSize, bytes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProgramRelocations returns the CO-RE relocations required to adjust the
|
// ProgramFixups returns the changes required to adjust the program to the target.
|
||||||
// program to the target.
|
|
||||||
//
|
//
|
||||||
// This is a free function instead of a method to hide it from users
|
// This is a free function instead of a method to hide it from users
|
||||||
// of package ebpf.
|
// of package ebpf.
|
||||||
func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
|
func ProgramFixups(s *Program, target *Spec) (COREFixups, error) {
|
||||||
if len(s.coreRelos) == 0 {
|
if len(s.coreRelos) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if target == nil {
|
||||||
|
var err error
|
||||||
|
target, err = LoadKernelSpec()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return coreRelocate(s.spec, target, s.coreRelos)
|
return coreRelocate(s.spec, target, s.coreRelos)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
795
vendor/github.com/cilium/ebpf/internal/btf/core.go
generated
vendored
795
vendor/github.com/cilium/ebpf/internal/btf/core.go
generated
vendored
@@ -3,43 +3,160 @@ package btf
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf/asm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Code in this file is derived from libbpf, which is available under a BSD
|
// Code in this file is derived from libbpf, which is available under a BSD
|
||||||
// 2-Clause license.
|
// 2-Clause license.
|
||||||
|
|
||||||
// Relocation describes a CO-RE relocation.
|
// COREFixup is the result of computing a CO-RE relocation for a target.
|
||||||
type Relocation struct {
|
type COREFixup struct {
|
||||||
Current uint32
|
Kind COREKind
|
||||||
New uint32
|
Local uint32
|
||||||
|
Target uint32
|
||||||
|
Poison bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r Relocation) equal(other Relocation) bool {
|
func (f COREFixup) equal(other COREFixup) bool {
|
||||||
return r.Current == other.Current && r.New == other.New
|
return f.Local == other.Local && f.Target == other.Target
|
||||||
}
|
}
|
||||||
|
|
||||||
// coreReloKind is the type of CO-RE relocation
|
func (f COREFixup) String() string {
|
||||||
type coreReloKind uint32
|
if f.Poison {
|
||||||
|
return fmt.Sprintf("%s=poison", f.Kind)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f COREFixup) apply(ins *asm.Instruction) error {
|
||||||
|
if f.Poison {
|
||||||
|
return errors.New("can't poison individual instruction")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch class := ins.OpCode.Class(); class {
|
||||||
|
case asm.LdXClass, asm.StClass, asm.StXClass:
|
||||||
|
if want := int16(f.Local); want != ins.Offset {
|
||||||
|
return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Target > math.MaxInt16 {
|
||||||
|
return fmt.Errorf("offset %d exceeds MaxInt16", f.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Offset = int16(f.Target)
|
||||||
|
|
||||||
|
case asm.LdClass:
|
||||||
|
if !ins.IsConstantLoad(asm.DWord) {
|
||||||
|
return fmt.Errorf("not a dword-sized immediate load")
|
||||||
|
}
|
||||||
|
|
||||||
|
if want := int64(f.Local); want != ins.Constant {
|
||||||
|
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Constant = int64(f.Target)
|
||||||
|
|
||||||
|
case asm.ALUClass:
|
||||||
|
if ins.OpCode.ALUOp() == asm.Swap {
|
||||||
|
return fmt.Errorf("relocation against swap")
|
||||||
|
}
|
||||||
|
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case asm.ALU64Class:
|
||||||
|
if src := ins.OpCode.Source(); src != asm.ImmSource {
|
||||||
|
return fmt.Errorf("invalid source %s", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
if want := int64(f.Local); want != ins.Constant {
|
||||||
|
return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Target > math.MaxInt32 {
|
||||||
|
return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
ins.Constant = int64(f.Target)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid class %s", class)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f COREFixup) isNonExistant() bool {
|
||||||
|
return f.Kind.checksForExistence() && f.Target == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type COREFixups map[uint64]COREFixup
|
||||||
|
|
||||||
|
// Apply a set of CO-RE relocations to a BPF program.
|
||||||
|
func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) {
|
||||||
|
if len(fs) == 0 {
|
||||||
|
cpy := make(asm.Instructions, len(insns))
|
||||||
|
copy(cpy, insns)
|
||||||
|
return insns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cpy := make(asm.Instructions, 0, len(insns))
|
||||||
|
iter := insns.Iterate()
|
||||||
|
for iter.Next() {
|
||||||
|
fixup, ok := fs[iter.Offset.Bytes()]
|
||||||
|
if !ok {
|
||||||
|
cpy = append(cpy, *iter.Ins)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ins := *iter.Ins
|
||||||
|
if fixup.Poison {
|
||||||
|
const badRelo = asm.BuiltinFunc(0xbad2310)
|
||||||
|
|
||||||
|
cpy = append(cpy, badRelo.Call())
|
||||||
|
if ins.OpCode.IsDWordLoad() {
|
||||||
|
// 64 bit constant loads occupy two raw bpf instructions, so
|
||||||
|
// we need to add another instruction as padding.
|
||||||
|
cpy = append(cpy, badRelo.Call())
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fixup.apply(&ins); err != nil {
|
||||||
|
return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cpy = append(cpy, ins)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cpy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// COREKind is the type of CO-RE relocation
|
||||||
|
type COREKind uint32
|
||||||
|
|
||||||
const (
|
const (
|
||||||
reloFieldByteOffset coreReloKind = iota /* field byte offset */
|
reloFieldByteOffset COREKind = iota /* field byte offset */
|
||||||
reloFieldByteSize /* field size in bytes */
|
reloFieldByteSize /* field size in bytes */
|
||||||
reloFieldExists /* field existence in target kernel */
|
reloFieldExists /* field existence in target kernel */
|
||||||
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
|
||||||
reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
reloFieldLShiftU64 /* bitfield-specific left bitshift */
|
||||||
reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
reloFieldRShiftU64 /* bitfield-specific right bitshift */
|
||||||
reloTypeIDLocal /* type ID in local BPF object */
|
reloTypeIDLocal /* type ID in local BPF object */
|
||||||
reloTypeIDTarget /* type ID in target kernel */
|
reloTypeIDTarget /* type ID in target kernel */
|
||||||
reloTypeExists /* type existence in target kernel */
|
reloTypeExists /* type existence in target kernel */
|
||||||
reloTypeSize /* type size in bytes */
|
reloTypeSize /* type size in bytes */
|
||||||
reloEnumvalExists /* enum value existence in target kernel */
|
reloEnumvalExists /* enum value existence in target kernel */
|
||||||
reloEnumvalValue /* enum value integer value */
|
reloEnumvalValue /* enum value integer value */
|
||||||
)
|
)
|
||||||
|
|
||||||
func (k coreReloKind) String() string {
|
func (k COREKind) String() string {
|
||||||
switch k {
|
switch k {
|
||||||
case reloFieldByteOffset:
|
case reloFieldByteOffset:
|
||||||
return "byte_off"
|
return "byte_off"
|
||||||
@@ -70,103 +187,249 @@ func (k coreReloKind) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
|
func (k COREKind) checksForExistence() bool {
|
||||||
if target == nil {
|
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
|
||||||
var err error
|
}
|
||||||
target, err = loadKernelSpec()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
func coreRelocate(local, target *Spec, relos coreRelos) (COREFixups, error) {
|
||||||
if local.byteOrder != target.byteOrder {
|
if local.byteOrder != target.byteOrder {
|
||||||
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
|
||||||
}
|
}
|
||||||
|
|
||||||
relocations := make(map[uint64]Relocation, len(coreRelos))
|
var ids []TypeID
|
||||||
for _, relo := range coreRelos {
|
relosByID := make(map[TypeID]coreRelos)
|
||||||
accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
|
result := make(COREFixups, len(relos))
|
||||||
if err != nil {
|
for _, relo := range relos {
|
||||||
return nil, err
|
if relo.kind == reloTypeIDLocal {
|
||||||
}
|
// Filtering out reloTypeIDLocal here makes our lives a lot easier
|
||||||
|
// down the line, since it doesn't have a target at all.
|
||||||
|
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||||
|
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||||
|
}
|
||||||
|
|
||||||
accessor, err := parseCoreAccessor(accessorStr)
|
result[uint64(relo.insnOff)] = COREFixup{
|
||||||
if err != nil {
|
relo.kind,
|
||||||
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
uint32(relo.typeID),
|
||||||
}
|
uint32(relo.typeID),
|
||||||
|
false,
|
||||||
if int(relo.TypeID) >= len(local.types) {
|
|
||||||
return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
|
|
||||||
}
|
|
||||||
|
|
||||||
typ := local.types[relo.TypeID]
|
|
||||||
|
|
||||||
if relo.ReloKind == reloTypeIDLocal {
|
|
||||||
relocations[uint64(relo.InsnOff)] = Relocation{
|
|
||||||
uint32(typ.ID()),
|
|
||||||
uint32(typ.ID()),
|
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
named, ok := typ.(namedType)
|
relos, ok := relosByID[relo.typeID]
|
||||||
if !ok || named.name() == "" {
|
if !ok {
|
||||||
return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
|
ids = append(ids, relo.typeID)
|
||||||
}
|
}
|
||||||
|
relosByID[relo.typeID] = append(relos, relo)
|
||||||
name := essentialName(named.name())
|
|
||||||
res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("relocate %s: %w", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
relocations[uint64(relo.InsnOff)] = res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return relocations, nil
|
// Ensure we work on relocations in a deterministic order.
|
||||||
|
sort.Slice(ids, func(i, j int) bool {
|
||||||
|
return ids[i] < ids[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, id := range ids {
|
||||||
|
if int(id) >= len(local.types) {
|
||||||
|
return nil, fmt.Errorf("invalid type id %d", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
localType := local.types[id]
|
||||||
|
named, ok := localType.(namedType)
|
||||||
|
if !ok || named.name() == "" {
|
||||||
|
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
relos := relosByID[id]
|
||||||
|
targets := target.namedTypes[named.essentialName()]
|
||||||
|
fixups, err := coreCalculateFixups(localType, targets, relos)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("relocate %s: %w", localType, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, relo := range relos {
|
||||||
|
result[uint64(relo.insnOff)] = fixups[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
var errAmbiguousRelocation = errors.New("ambiguous relocation")
|
||||||
|
var errImpossibleRelocation = errors.New("impossible relocation")
|
||||||
|
|
||||||
|
// coreCalculateFixups calculates the fixups for the given relocations using
|
||||||
|
// the "best" target.
|
||||||
|
//
|
||||||
|
// The best target is determined by scoring: the less poisoning we have to do
|
||||||
|
// the better the target is.
|
||||||
|
func coreCalculateFixups(local Type, targets []namedType, relos coreRelos) ([]COREFixup, error) {
|
||||||
|
localID := local.ID()
|
||||||
|
local, err := copyType(local, skipQualifierAndTypedef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bestScore := len(relos)
|
||||||
|
var bestFixups []COREFixup
|
||||||
|
for i := range targets {
|
||||||
|
targetID := targets[i].ID()
|
||||||
|
target, err := copyType(targets[i], skipQualifierAndTypedef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
score := 0 // lower is better
|
||||||
|
fixups := make([]COREFixup, 0, len(relos))
|
||||||
|
for _, relo := range relos {
|
||||||
|
fixup, err := coreCalculateFixup(local, localID, target, targetID, relo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("target %s: %w", target, err)
|
||||||
|
}
|
||||||
|
if fixup.Poison || fixup.isNonExistant() {
|
||||||
|
score++
|
||||||
|
}
|
||||||
|
fixups = append(fixups, fixup)
|
||||||
|
}
|
||||||
|
|
||||||
|
if score > bestScore {
|
||||||
|
// We have a better target already, ignore this one.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if score < bestScore {
|
||||||
|
// This is the best target yet, use it.
|
||||||
|
bestScore = score
|
||||||
|
bestFixups = fixups
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some other target has the same score as the current one. Make sure
|
||||||
|
// the fixups agree with each other.
|
||||||
|
for i, fixup := range bestFixups {
|
||||||
|
if !fixup.equal(fixups[i]) {
|
||||||
|
return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if bestFixups == nil {
|
||||||
|
// Nothing at all matched, probably because there are no suitable
|
||||||
|
// targets at all. Poison everything!
|
||||||
|
bestFixups = make([]COREFixup, len(relos))
|
||||||
|
for i, relo := range relos {
|
||||||
|
bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return bestFixups, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coreCalculateFixup calculates the fixup for a single local type, target type
|
||||||
|
// and relocation.
|
||||||
|
func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo coreRelo) (COREFixup, error) {
|
||||||
|
fixup := func(local, target uint32) (COREFixup, error) {
|
||||||
|
return COREFixup{relo.kind, local, target, false}, nil
|
||||||
|
}
|
||||||
|
poison := func() (COREFixup, error) {
|
||||||
|
if relo.kind.checksForExistence() {
|
||||||
|
return fixup(1, 0)
|
||||||
|
}
|
||||||
|
return COREFixup{relo.kind, 0, 0, true}, nil
|
||||||
|
}
|
||||||
|
zero := COREFixup{}
|
||||||
|
|
||||||
|
switch relo.kind {
|
||||||
|
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
|
||||||
|
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||||
|
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := coreAreTypesCompatible(local, target)
|
||||||
|
if errors.Is(err, errImpossibleRelocation) {
|
||||||
|
return poison()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch relo.kind {
|
||||||
|
case reloTypeExists:
|
||||||
|
return fixup(1, 1)
|
||||||
|
|
||||||
func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
|
|
||||||
var relos []Relocation
|
|
||||||
var matches []Type
|
|
||||||
for _, target := range targets {
|
|
||||||
switch kind {
|
|
||||||
case reloTypeIDTarget:
|
case reloTypeIDTarget:
|
||||||
if localAccessor[0] != 0 {
|
return fixup(uint32(localID), uint32(targetID))
|
||||||
return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
|
|
||||||
|
case reloTypeSize:
|
||||||
|
localSize, err := Sizeof(local)
|
||||||
|
if err != nil {
|
||||||
|
return zero, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if compat, err := coreAreTypesCompatible(local, target); err != nil {
|
targetSize, err := Sizeof(target)
|
||||||
return Relocation{}, fmt.Errorf("%s: %s", kind, err)
|
if err != nil {
|
||||||
} else if !compat {
|
return zero, err
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
|
return fixup(uint32(localSize), uint32(targetSize))
|
||||||
|
|
||||||
default:
|
|
||||||
return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
|
|
||||||
}
|
}
|
||||||
matches = append(matches, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(relos) == 0 {
|
case reloEnumvalValue, reloEnumvalExists:
|
||||||
// TODO: Add switch for existence checks like reloEnumvalExists here.
|
localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
|
||||||
|
if errors.Is(err, errImpossibleRelocation) {
|
||||||
|
return poison()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: This might have to be poisoned.
|
switch relo.kind {
|
||||||
return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
|
case reloEnumvalExists:
|
||||||
}
|
return fixup(1, 1)
|
||||||
|
|
||||||
|
case reloEnumvalValue:
|
||||||
|
return fixup(uint32(localValue.Value), uint32(targetValue.Value))
|
||||||
|
}
|
||||||
|
|
||||||
|
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists:
|
||||||
|
if _, ok := target.(*Fwd); ok {
|
||||||
|
// We can't relocate fields using a forward declaration, so
|
||||||
|
// skip it. If a non-forward declaration is present in the BTF
|
||||||
|
// we'll find it in one of the other iterations.
|
||||||
|
return poison()
|
||||||
|
}
|
||||||
|
|
||||||
|
localField, targetField, err := coreFindField(local, relo.accessor, target)
|
||||||
|
if errors.Is(err, errImpossibleRelocation) {
|
||||||
|
return poison()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return zero, fmt.Errorf("target %s: %w", target, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch relo.kind {
|
||||||
|
case reloFieldExists:
|
||||||
|
return fixup(1, 1)
|
||||||
|
|
||||||
|
case reloFieldByteOffset:
|
||||||
|
return fixup(localField.offset/8, targetField.offset/8)
|
||||||
|
|
||||||
|
case reloFieldByteSize:
|
||||||
|
localSize, err := Sizeof(localField.Type)
|
||||||
|
if err != nil {
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetSize, err := Sizeof(targetField.Type)
|
||||||
|
if err != nil {
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fixup(uint32(localSize), uint32(targetSize))
|
||||||
|
|
||||||
relo := relos[0]
|
|
||||||
for _, altRelo := range relos[1:] {
|
|
||||||
if !altRelo.equal(relo) {
|
|
||||||
return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return relo, nil
|
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* coreAccessor contains a path through a struct. It contains at least one index.
|
/* coreAccessor contains a path through a struct. It contains at least one index.
|
||||||
@@ -219,6 +482,240 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ca coreAccessor) String() string {
|
||||||
|
strs := make([]string, 0, len(ca))
|
||||||
|
for _, i := range ca {
|
||||||
|
strs = append(strs, strconv.Itoa(i))
|
||||||
|
}
|
||||||
|
return strings.Join(strs, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
|
||||||
|
e, ok := t.(*Enum)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("not an enum: %s", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ca) > 1 {
|
||||||
|
return nil, fmt.Errorf("invalid accessor %s for enum", ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := ca[0]
|
||||||
|
if i >= len(e.Values) {
|
||||||
|
return nil, fmt.Errorf("invalid index %d for %s", i, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &e.Values[i], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type coreField struct {
|
||||||
|
Type Type
|
||||||
|
offset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func adjustOffset(base uint32, t Type, n int) (uint32, error) {
|
||||||
|
size, err := Sizeof(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return base + (uint32(n) * uint32(size) * 8), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coreFindField descends into the local type using the accessor and tries to
|
||||||
|
// find an equivalent field in target at each step.
|
||||||
|
//
|
||||||
|
// Returns the field and the offset of the field from the start of
|
||||||
|
// target in bits.
|
||||||
|
func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) {
|
||||||
|
// The first index is used to offset a pointer of the base type like
|
||||||
|
// when accessing an array.
|
||||||
|
localOffset, err := adjustOffset(0, local, localAcc[0])
|
||||||
|
if err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetOffset, err := adjustOffset(0, target, localAcc[0])
|
||||||
|
if err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := coreAreMembersCompatible(local, target); err != nil {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var localMaybeFlex, targetMaybeFlex bool
|
||||||
|
for _, acc := range localAcc[1:] {
|
||||||
|
switch localType := local.(type) {
|
||||||
|
case composite:
|
||||||
|
// For composite types acc is used to find the field in the local type,
|
||||||
|
// and then we try to find a field in target with the same name.
|
||||||
|
localMembers := localType.members()
|
||||||
|
if acc >= len(localMembers) {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local)
|
||||||
|
}
|
||||||
|
|
||||||
|
localMember := localMembers[acc]
|
||||||
|
if localMember.Name == "" {
|
||||||
|
_, ok := localMember.Type.(composite)
|
||||||
|
if !ok {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is an anonymous struct or union, ignore it.
|
||||||
|
local = localMember.Type
|
||||||
|
localOffset += localMember.Offset
|
||||||
|
localMaybeFlex = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
targetType, ok := target.(composite)
|
||||||
|
if !ok {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
targetMember, last, err := coreFindMember(targetType, localMember.Name)
|
||||||
|
if err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetMember.BitfieldSize > 0 {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
local = localMember.Type
|
||||||
|
localMaybeFlex = acc == len(localMembers)-1
|
||||||
|
localOffset += localMember.Offset
|
||||||
|
target = targetMember.Type
|
||||||
|
targetMaybeFlex = last
|
||||||
|
targetOffset += targetMember.Offset
|
||||||
|
|
||||||
|
case *Array:
|
||||||
|
// For arrays, acc is the index in the target.
|
||||||
|
targetType, ok := target.(*Array)
|
||||||
|
if !ok {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
if localType.Nelems == 0 && !localMaybeFlex {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
|
||||||
|
}
|
||||||
|
if targetType.Nelems == 0 && !targetMaybeFlex {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
|
||||||
|
}
|
||||||
|
|
||||||
|
if localType.Nelems > 0 && acc >= int(localType.Nelems) {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
|
||||||
|
}
|
||||||
|
if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
local = localType.Type
|
||||||
|
localMaybeFlex = false
|
||||||
|
localOffset, err = adjustOffset(localOffset, local, acc)
|
||||||
|
if err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
target = targetType.Type
|
||||||
|
targetMaybeFlex = false
|
||||||
|
targetOffset, err = adjustOffset(targetOffset, target, acc)
|
||||||
|
if err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := coreAreMembersCompatible(local, target); err != nil {
|
||||||
|
return coreField{}, coreField{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return coreField{local, localOffset}, coreField{target, targetOffset}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// coreFindMember finds a member in a composite type while handling anonymous
|
||||||
|
// structs and unions.
|
||||||
|
func coreFindMember(typ composite, name Name) (Member, bool, error) {
|
||||||
|
if name == "" {
|
||||||
|
return Member{}, false, errors.New("can't search for anonymous member")
|
||||||
|
}
|
||||||
|
|
||||||
|
type offsetTarget struct {
|
||||||
|
composite
|
||||||
|
offset uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
targets := []offsetTarget{{typ, 0}}
|
||||||
|
visited := make(map[composite]bool)
|
||||||
|
|
||||||
|
for i := 0; i < len(targets); i++ {
|
||||||
|
target := targets[i]
|
||||||
|
|
||||||
|
// Only visit targets once to prevent infinite recursion.
|
||||||
|
if visited[target] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(visited) >= maxTypeDepth {
|
||||||
|
// This check is different than libbpf, which restricts the entire
|
||||||
|
// path to BPF_CORE_SPEC_MAX_LEN items.
|
||||||
|
return Member{}, false, fmt.Errorf("type is nested too deep")
|
||||||
|
}
|
||||||
|
visited[target] = true
|
||||||
|
|
||||||
|
members := target.members()
|
||||||
|
for j, member := range members {
|
||||||
|
if member.Name == name {
|
||||||
|
// NB: This is safe because member is a copy.
|
||||||
|
member.Offset += target.offset
|
||||||
|
return member, j == len(members)-1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The names don't match, but this member could be an anonymous struct
|
||||||
|
// or union.
|
||||||
|
if member.Name != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
comp, ok := member.Type.(composite)
|
||||||
|
if !ok {
|
||||||
|
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
|
||||||
|
func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
|
||||||
|
localValue, err := localAcc.enumValue(local)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetEnum, ok := target.(*Enum)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, errImpossibleRelocation
|
||||||
|
}
|
||||||
|
|
||||||
|
localName := localValue.Name.essentialName()
|
||||||
|
for i, targetValue := range targetEnum.Values {
|
||||||
|
if targetValue.Name.essentialName() != localName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return localValue, &targetEnum.Values[i], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, errImpossibleRelocation
|
||||||
|
}
|
||||||
|
|
||||||
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
||||||
*
|
*
|
||||||
* Check local and target types for compatibility. This check is used for
|
* Check local and target types for compatibility. This check is used for
|
||||||
@@ -239,8 +736,10 @@ func parseCoreAccessor(accessor string) (coreAccessor, error) {
|
|||||||
* number of input args and compatible return and argument types.
|
* number of input args and compatible return and argument types.
|
||||||
* These rules are not set in stone and probably will be adjusted as we get
|
* These rules are not set in stone and probably will be adjusted as we get
|
||||||
* more experience with using BPF CO-RE relocations.
|
* more experience with using BPF CO-RE relocations.
|
||||||
|
*
|
||||||
|
* Returns errImpossibleRelocation if types are not compatible.
|
||||||
*/
|
*/
|
||||||
func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
func coreAreTypesCompatible(localType Type, targetType Type) error {
|
||||||
var (
|
var (
|
||||||
localTs, targetTs typeDeque
|
localTs, targetTs typeDeque
|
||||||
l, t = &localType, &targetType
|
l, t = &localType, &targetType
|
||||||
@@ -249,14 +748,14 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|||||||
|
|
||||||
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
|
||||||
if depth >= maxTypeDepth {
|
if depth >= maxTypeDepth {
|
||||||
return false, errors.New("types are nested too deep")
|
return errors.New("types are nested too deep")
|
||||||
}
|
}
|
||||||
|
|
||||||
localType = skipQualifierAndTypedef(*l)
|
localType = *l
|
||||||
targetType = skipQualifierAndTypedef(*t)
|
targetType = *t
|
||||||
|
|
||||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||||
return false, nil
|
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch lv := (localType).(type) {
|
switch lv := (localType).(type) {
|
||||||
@@ -266,7 +765,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|||||||
case *Int:
|
case *Int:
|
||||||
tv := targetType.(*Int)
|
tv := targetType.(*Int)
|
||||||
if lv.isBitfield() || tv.isBitfield() {
|
if lv.isBitfield() || tv.isBitfield() {
|
||||||
return false, nil
|
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
||||||
}
|
}
|
||||||
|
|
||||||
case *Pointer, *Array:
|
case *Pointer, *Array:
|
||||||
@@ -277,7 +776,7 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|||||||
case *FuncProto:
|
case *FuncProto:
|
||||||
tv := targetType.(*FuncProto)
|
tv := targetType.(*FuncProto)
|
||||||
if len(lv.Params) != len(tv.Params) {
|
if len(lv.Params) != len(tv.Params) {
|
||||||
return false, nil
|
return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
|
||||||
}
|
}
|
||||||
|
|
||||||
depth++
|
depth++
|
||||||
@@ -285,22 +784,24 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|||||||
targetType.walk(&targetTs)
|
targetType.walk(&targetTs)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return false, fmt.Errorf("unsupported type %T", localType)
|
return fmt.Errorf("unsupported type %T", localType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if l != nil {
|
if l != nil {
|
||||||
return false, fmt.Errorf("dangling local type %T", *l)
|
return fmt.Errorf("dangling local type %T", *l)
|
||||||
}
|
}
|
||||||
|
|
||||||
if t != nil {
|
if t != nil {
|
||||||
return false, fmt.Errorf("dangling target type %T", *t)
|
return fmt.Errorf("dangling target type %T", *t)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
|
||||||
|
*
|
||||||
|
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
||||||
*
|
*
|
||||||
* Check two types for compatibility for the purpose of field access
|
* Check two types for compatibility for the purpose of field access
|
||||||
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
* relocation. const/volatile/restrict and typedefs are skipped to ensure we
|
||||||
@@ -314,65 +815,63 @@ func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
|
|||||||
* - for INT, size and signedness are ignored;
|
* - for INT, size and signedness are ignored;
|
||||||
* - for ARRAY, dimensionality is ignored, element types are checked for
|
* - for ARRAY, dimensionality is ignored, element types are checked for
|
||||||
* compatibility recursively;
|
* compatibility recursively;
|
||||||
|
* [ NB: coreAreMembersCompatible doesn't recurse, this check is done
|
||||||
|
* by coreFindField. ]
|
||||||
* - everything else shouldn't be ever a target of relocation.
|
* - everything else shouldn't be ever a target of relocation.
|
||||||
* These rules are not set in stone and probably will be adjusted as we get
|
* These rules are not set in stone and probably will be adjusted as we get
|
||||||
* more experience with using BPF CO-RE relocations.
|
* more experience with using BPF CO-RE relocations.
|
||||||
|
*
|
||||||
|
* Returns errImpossibleRelocation if the members are not compatible.
|
||||||
*/
|
*/
|
||||||
func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
|
func coreAreMembersCompatible(localType Type, targetType Type) error {
|
||||||
doNamesMatch := func(a, b string) bool {
|
doNamesMatch := func(a, b string) error {
|
||||||
if a == "" || b == "" {
|
if a == "" || b == "" {
|
||||||
// allow anonymous and named type to match
|
// allow anonymous and named type to match
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return essentialName(a) == essentialName(b)
|
if essentialName(a) == essentialName(b) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
|
||||||
}
|
}
|
||||||
|
|
||||||
for depth := 0; depth <= maxTypeDepth; depth++ {
|
_, lok := localType.(composite)
|
||||||
localType = skipQualifierAndTypedef(localType)
|
_, tok := targetType.(composite)
|
||||||
targetType = skipQualifierAndTypedef(targetType)
|
if lok && tok {
|
||||||
|
return nil
|
||||||
_, lok := localType.(composite)
|
|
||||||
_, tok := targetType.(composite)
|
|
||||||
if lok && tok {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch lv := localType.(type) {
|
|
||||||
case *Pointer:
|
|
||||||
return true, nil
|
|
||||||
|
|
||||||
case *Enum:
|
|
||||||
tv := targetType.(*Enum)
|
|
||||||
return doNamesMatch(lv.name(), tv.name()), nil
|
|
||||||
|
|
||||||
case *Fwd:
|
|
||||||
tv := targetType.(*Fwd)
|
|
||||||
return doNamesMatch(lv.name(), tv.name()), nil
|
|
||||||
|
|
||||||
case *Int:
|
|
||||||
tv := targetType.(*Int)
|
|
||||||
return !lv.isBitfield() && !tv.isBitfield(), nil
|
|
||||||
|
|
||||||
case *Array:
|
|
||||||
tv := targetType.(*Array)
|
|
||||||
|
|
||||||
localType = lv.Type
|
|
||||||
targetType = tv.Type
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false, fmt.Errorf("unsupported type %T", localType)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, errors.New("types are nested too deep")
|
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||||
|
return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch lv := localType.(type) {
|
||||||
|
case *Array, *Pointer:
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case *Enum:
|
||||||
|
tv := targetType.(*Enum)
|
||||||
|
return doNamesMatch(lv.name(), tv.name())
|
||||||
|
|
||||||
|
case *Fwd:
|
||||||
|
tv := targetType.(*Fwd)
|
||||||
|
return doNamesMatch(lv.name(), tv.name())
|
||||||
|
|
||||||
|
case *Int:
|
||||||
|
tv := targetType.(*Int)
|
||||||
|
if lv.isBitfield() || tv.isBitfield() {
|
||||||
|
return fmt.Errorf("bitfield: %w", errImpossibleRelocation)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func skipQualifierAndTypedef(typ Type) Type {
|
func skipQualifierAndTypedef(typ Type) (Type, error) {
|
||||||
result := typ
|
result := typ
|
||||||
for depth := 0; depth <= maxTypeDepth; depth++ {
|
for depth := 0; depth <= maxTypeDepth; depth++ {
|
||||||
switch v := (result).(type) {
|
switch v := (result).(type) {
|
||||||
@@ -381,8 +880,8 @@ func skipQualifierAndTypedef(typ Type) Type {
|
|||||||
case *Typedef:
|
case *Typedef:
|
||||||
result = v.Type
|
result = v.Type
|
||||||
default:
|
default:
|
||||||
return result
|
return result, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return typ
|
return nil, errors.New("exceeded type depth")
|
||||||
}
|
}
|
||||||
|
|||||||
46
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
46
vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
generated
vendored
@@ -30,7 +30,7 @@ type btfExtCoreHeader struct {
|
|||||||
CoreReloLen uint32
|
CoreReloLen uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
|
func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, relos map[string]coreRelos, err error) {
|
||||||
var header btfExtHeader
|
var header btfExtHeader
|
||||||
var coreHeader btfExtCoreHeader
|
var coreHeader btfExtCoreHeader
|
||||||
if err := binary.Read(r, bo, &header); err != nil {
|
if err := binary.Read(r, bo, &header); err != nil {
|
||||||
@@ -94,13 +94,13 @@ func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (f
|
|||||||
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
|
return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
|
relos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
|
return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return funcInfo, lineInfo, coreRelos, nil
|
return funcInfo, lineInfo, relos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type btfExtInfoSec struct {
|
type btfExtInfoSec struct {
|
||||||
@@ -208,18 +208,25 @@ type bpfCoreRelo struct {
|
|||||||
InsnOff uint32
|
InsnOff uint32
|
||||||
TypeID TypeID
|
TypeID TypeID
|
||||||
AccessStrOff uint32
|
AccessStrOff uint32
|
||||||
ReloKind coreReloKind
|
Kind COREKind
|
||||||
}
|
}
|
||||||
|
|
||||||
type bpfCoreRelos []bpfCoreRelo
|
type coreRelo struct {
|
||||||
|
insnOff uint32
|
||||||
|
typeID TypeID
|
||||||
|
accessor coreAccessor
|
||||||
|
kind COREKind
|
||||||
|
}
|
||||||
|
|
||||||
|
type coreRelos []coreRelo
|
||||||
|
|
||||||
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
|
// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
|
||||||
// by offset.
|
// by offset.
|
||||||
func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
|
func (r coreRelos) append(other coreRelos, offset uint64) coreRelos {
|
||||||
result := make([]bpfCoreRelo, 0, len(r)+len(other))
|
result := make([]coreRelo, 0, len(r)+len(other))
|
||||||
result = append(result, r...)
|
result = append(result, r...)
|
||||||
for _, relo := range other {
|
for _, relo := range other {
|
||||||
relo.InsnOff += uint32(offset)
|
relo.insnOff += uint32(offset)
|
||||||
result = append(result, relo)
|
result = append(result, relo)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
@@ -227,7 +234,7 @@ func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
|
|||||||
|
|
||||||
var extInfoReloSize = binary.Size(bpfCoreRelo{})
|
var extInfoReloSize = binary.Size(bpfCoreRelo{})
|
||||||
|
|
||||||
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
|
func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]coreRelos, error) {
|
||||||
var recordSize uint32
|
var recordSize uint32
|
||||||
if err := binary.Read(r, bo, &recordSize); err != nil {
|
if err := binary.Read(r, bo, &recordSize); err != nil {
|
||||||
return nil, fmt.Errorf("read record size: %v", err)
|
return nil, fmt.Errorf("read record size: %v", err)
|
||||||
@@ -237,14 +244,14 @@ func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (m
|
|||||||
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
|
return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
result := make(map[string]bpfCoreRelos)
|
result := make(map[string]coreRelos)
|
||||||
for {
|
for {
|
||||||
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
|
||||||
if errors.Is(err, io.EOF) {
|
if errors.Is(err, io.EOF) {
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var relos []bpfCoreRelo
|
var relos coreRelos
|
||||||
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
for i := uint32(0); i < infoHeader.NumInfo; i++ {
|
||||||
var relo bpfCoreRelo
|
var relo bpfCoreRelo
|
||||||
if err := binary.Read(r, bo, &relo); err != nil {
|
if err := binary.Read(r, bo, &relo); err != nil {
|
||||||
@@ -255,7 +262,22 @@ func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (m
|
|||||||
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
|
return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
|
||||||
}
|
}
|
||||||
|
|
||||||
relos = append(relos, relo)
|
accessorStr, err := strings.Lookup(relo.AccessStrOff)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
accessor, err := parseCoreAccessor(accessorStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
relos = append(relos, coreRelo{
|
||||||
|
relo.InsnOff,
|
||||||
|
relo.TypeID,
|
||||||
|
accessor,
|
||||||
|
relo.Kind,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
result[secName] = relos
|
result[secName] = relos
|
||||||
|
|||||||
46
vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
46
vendor/github.com/cilium/ebpf/internal/btf/types.go
generated
vendored
@@ -1,7 +1,6 @@
|
|||||||
package btf
|
package btf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -37,6 +36,7 @@ type Type interface {
|
|||||||
type namedType interface {
|
type namedType interface {
|
||||||
Type
|
Type
|
||||||
name() string
|
name() string
|
||||||
|
essentialName() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name identifies a type.
|
// Name identifies a type.
|
||||||
@@ -48,6 +48,10 @@ func (n Name) name() string {
|
|||||||
return string(n)
|
return string(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n Name) essentialName() string {
|
||||||
|
return essentialName(string(n))
|
||||||
|
}
|
||||||
|
|
||||||
// Void is the unit type of BTF.
|
// Void is the unit type of BTF.
|
||||||
type Void struct{}
|
type Void struct{}
|
||||||
|
|
||||||
@@ -174,8 +178,7 @@ func (s *Struct) walk(tdq *typeDeque) {
|
|||||||
|
|
||||||
func (s *Struct) copy() Type {
|
func (s *Struct) copy() Type {
|
||||||
cpy := *s
|
cpy := *s
|
||||||
cpy.Members = make([]Member, len(s.Members))
|
cpy.Members = copyMembers(s.Members)
|
||||||
copy(cpy.Members, s.Members)
|
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -206,8 +209,7 @@ func (u *Union) walk(tdq *typeDeque) {
|
|||||||
|
|
||||||
func (u *Union) copy() Type {
|
func (u *Union) copy() Type {
|
||||||
cpy := *u
|
cpy := *u
|
||||||
cpy.Members = make([]Member, len(u.Members))
|
cpy.Members = copyMembers(u.Members)
|
||||||
copy(cpy.Members, u.Members)
|
|
||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -215,6 +217,12 @@ func (u *Union) members() []Member {
|
|||||||
return u.Members
|
return u.Members
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyMembers(orig []Member) []Member {
|
||||||
|
cpy := make([]Member, len(orig))
|
||||||
|
copy(cpy, orig)
|
||||||
|
return cpy
|
||||||
|
}
|
||||||
|
|
||||||
type composite interface {
|
type composite interface {
|
||||||
members() []Member
|
members() []Member
|
||||||
}
|
}
|
||||||
@@ -511,7 +519,7 @@ func Sizeof(typ Type) (int, error) {
|
|||||||
switch v := typ.(type) {
|
switch v := typ.(type) {
|
||||||
case *Array:
|
case *Array:
|
||||||
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
|
if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
|
||||||
return 0, errors.New("overflow")
|
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Arrays may be of zero length, which allows
|
// Arrays may be of zero length, which allows
|
||||||
@@ -532,28 +540,30 @@ func Sizeof(typ Type) (int, error) {
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("unrecognized type %T", typ)
|
return 0, fmt.Errorf("unsized type %T", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n > 0 && elem > math.MaxInt64/n {
|
if n > 0 && elem > math.MaxInt64/n {
|
||||||
return 0, errors.New("overflow")
|
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
size := n * elem
|
size := n * elem
|
||||||
if int64(int(size)) != size {
|
if int64(int(size)) != size {
|
||||||
return 0, errors.New("overflow")
|
return 0, fmt.Errorf("type %s: overflow", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
return int(size), nil
|
return int(size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, errors.New("exceeded type depth")
|
return 0, fmt.Errorf("type %s: exceeded type depth", typ)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy a Type recursively.
|
// copy a Type recursively.
|
||||||
//
|
//
|
||||||
// typ may form a cycle.
|
// typ may form a cycle.
|
||||||
func copyType(typ Type) Type {
|
//
|
||||||
|
// Returns any errors from transform verbatim.
|
||||||
|
func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) {
|
||||||
var (
|
var (
|
||||||
copies = make(map[Type]Type)
|
copies = make(map[Type]Type)
|
||||||
work typeDeque
|
work typeDeque
|
||||||
@@ -566,7 +576,17 @@ func copyType(typ Type) Type {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
cpy := (*t).copy()
|
var cpy Type
|
||||||
|
if transform != nil {
|
||||||
|
tf, err := transform(*t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("copy %s: %w", typ, err)
|
||||||
|
}
|
||||||
|
cpy = tf.copy()
|
||||||
|
} else {
|
||||||
|
cpy = (*t).copy()
|
||||||
|
}
|
||||||
|
|
||||||
copies[*t] = cpy
|
copies[*t] = cpy
|
||||||
*t = cpy
|
*t = cpy
|
||||||
|
|
||||||
@@ -574,7 +594,7 @@ func copyType(typ Type) Type {
|
|||||||
cpy.walk(&work)
|
cpy.walk(&work)
|
||||||
}
|
}
|
||||||
|
|
||||||
return typ
|
return typ, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// typeDeque keeps track of pointers to types which still
|
// typeDeque keeps track of pointers to types which still
|
||||||
|
|||||||
16
vendor/github.com/cilium/ebpf/internal/elf.go
generated
vendored
16
vendor/github.com/cilium/ebpf/internal/elf.go
generated
vendored
@@ -50,3 +50,19 @@ func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
|
|||||||
syms, err = se.File.Symbols()
|
syms, err = se.File.Symbols()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
|
||||||
|
func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
|
||||||
|
defer func() {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
syms = nil
|
||||||
|
err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
|
||||||
|
}()
|
||||||
|
|
||||||
|
syms, err = se.File.DynamicSymbols()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
5
vendor/github.com/cilium/ebpf/internal/endian.go
generated
vendored
5
vendor/github.com/cilium/ebpf/internal/endian.go
generated
vendored
@@ -9,11 +9,16 @@ import (
|
|||||||
// depending on the host's endianness.
|
// depending on the host's endianness.
|
||||||
var NativeEndian binary.ByteOrder
|
var NativeEndian binary.ByteOrder
|
||||||
|
|
||||||
|
// Clang is set to either "el" or "eb" depending on the host's endianness.
|
||||||
|
var ClangEndian string
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if isBigEndian() {
|
if isBigEndian() {
|
||||||
NativeEndian = binary.BigEndian
|
NativeEndian = binary.BigEndian
|
||||||
|
ClangEndian = "eb"
|
||||||
} else {
|
} else {
|
||||||
NativeEndian = binary.LittleEndian
|
NativeEndian = binary.LittleEndian
|
||||||
|
ClangEndian = "el"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
4
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
4
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
@@ -29,6 +29,10 @@ type VerifierError struct {
|
|||||||
log string
|
log string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (le *VerifierError) Unwrap() error {
|
||||||
|
return le.cause
|
||||||
|
}
|
||||||
|
|
||||||
func (le *VerifierError) Error() string {
|
func (le *VerifierError) Error() string {
|
||||||
if le.log == "" {
|
if le.log == "" {
|
||||||
return le.cause.Error()
|
return le.cause.Error()
|
||||||
|
|||||||
4
vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
4
vendor/github.com/cilium/ebpf/internal/ptr.go
generated
vendored
@@ -22,10 +22,6 @@ func NewSlicePointer(buf []byte) Pointer {
|
|||||||
|
|
||||||
// NewStringPointer creates a 64-bit pointer from a string.
|
// NewStringPointer creates a 64-bit pointer from a string.
|
||||||
func NewStringPointer(str string) Pointer {
|
func NewStringPointer(str string) Pointer {
|
||||||
if str == "" {
|
|
||||||
return Pointer{}
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := unix.BytePtrFromString(str)
|
p, err := unix.BytePtrFromString(str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Pointer{}
|
return Pointer{}
|
||||||
|
|||||||
1
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
1
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
@@ -42,6 +42,7 @@ const (
|
|||||||
PROT_READ = linux.PROT_READ
|
PROT_READ = linux.PROT_READ
|
||||||
PROT_WRITE = linux.PROT_WRITE
|
PROT_WRITE = linux.PROT_WRITE
|
||||||
MAP_SHARED = linux.MAP_SHARED
|
MAP_SHARED = linux.MAP_SHARED
|
||||||
|
PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
|
||||||
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
|
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
|
||||||
PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
|
PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
|
||||||
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
|
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
|
||||||
|
|||||||
1
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
1
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
@@ -43,6 +43,7 @@ const (
|
|||||||
PROT_READ = 0x1
|
PROT_READ = 0x1
|
||||||
PROT_WRITE = 0x2
|
PROT_WRITE = 0x2
|
||||||
MAP_SHARED = 0x1
|
MAP_SHARED = 0x1
|
||||||
|
PERF_ATTR_SIZE_VER1 = 0
|
||||||
PERF_TYPE_SOFTWARE = 0x1
|
PERF_TYPE_SOFTWARE = 0x1
|
||||||
PERF_TYPE_TRACEPOINT = 0
|
PERF_TYPE_TRACEPOINT = 0
|
||||||
PERF_COUNT_SW_BPF_OUTPUT = 0xa
|
PERF_COUNT_SW_BPF_OUTPUT = 0xa
|
||||||
|
|||||||
274
vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
274
vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
@@ -1,12 +1,16 @@
|
|||||||
package link
|
package link
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cilium/ebpf"
|
"github.com/cilium/ebpf"
|
||||||
"github.com/cilium/ebpf/internal"
|
"github.com/cilium/ebpf/internal"
|
||||||
@@ -15,13 +19,60 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
|
kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
|
||||||
|
|
||||||
|
kprobeRetprobeBit = struct {
|
||||||
|
once sync.Once
|
||||||
|
value uint64
|
||||||
|
err error
|
||||||
|
}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type probeType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
kprobeType probeType = iota
|
||||||
|
uprobeType
|
||||||
|
)
|
||||||
|
|
||||||
|
func (pt probeType) String() string {
|
||||||
|
if pt == kprobeType {
|
||||||
|
return "kprobe"
|
||||||
|
}
|
||||||
|
return "uprobe"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt probeType) EventsPath() string {
|
||||||
|
if pt == kprobeType {
|
||||||
|
return kprobeEventsPath
|
||||||
|
}
|
||||||
|
return uprobeEventsPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt probeType) PerfEventType(ret bool) perfEventType {
|
||||||
|
if pt == kprobeType {
|
||||||
|
if ret {
|
||||||
|
return kretprobeEvent
|
||||||
|
}
|
||||||
|
return kprobeEvent
|
||||||
|
}
|
||||||
|
if ret {
|
||||||
|
return uretprobeEvent
|
||||||
|
}
|
||||||
|
return uprobeEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pt probeType) RetprobeBit() (uint64, error) {
|
||||||
|
if pt == kprobeType {
|
||||||
|
return kretprobeBit()
|
||||||
|
}
|
||||||
|
return uretprobeBit()
|
||||||
|
}
|
||||||
|
|
||||||
// Kprobe attaches the given eBPF program to a perf event that fires when the
|
// Kprobe attaches the given eBPF program to a perf event that fires when the
|
||||||
// given kernel symbol starts executing. See /proc/kallsyms for available
|
// given kernel symbol starts executing. See /proc/kallsyms for available
|
||||||
// symbols. For example, printk():
|
// symbols. For example, printk():
|
||||||
//
|
//
|
||||||
// Kprobe("printk")
|
// Kprobe("printk", prog)
|
||||||
//
|
//
|
||||||
// The resulting Link must be Closed during program shutdown to avoid leaking
|
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||||
// system resources.
|
// system resources.
|
||||||
@@ -44,7 +95,7 @@ func Kprobe(symbol string, prog *ebpf.Program) (Link, error) {
|
|||||||
// before the given kernel symbol exits, with the function stack left intact.
|
// before the given kernel symbol exits, with the function stack left intact.
|
||||||
// See /proc/kallsyms for available symbols. For example, printk():
|
// See /proc/kallsyms for available symbols. For example, printk():
|
||||||
//
|
//
|
||||||
// Kretprobe("printk")
|
// Kretprobe("printk", prog)
|
||||||
//
|
//
|
||||||
// The resulting Link must be Closed during program shutdown to avoid leaking
|
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||||
// system resources.
|
// system resources.
|
||||||
@@ -97,36 +148,70 @@ func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
|
|||||||
return tp, nil
|
return tp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// pmuKprobe opens a perf event based on a Performance Monitoring Unit.
|
// pmuKprobe opens a perf event based on the kprobe PMU.
|
||||||
// Requires at least 4.17 (e12f03d7031a "perf/core: Implement the
|
// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
|
||||||
// 'perf_kprobe' PMU").
|
|
||||||
// Returns ErrNotSupported if the kernel doesn't support perf_kprobe PMU,
|
|
||||||
// or os.ErrNotExist if the given symbol does not exist in the kernel.
|
|
||||||
func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
|
func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
|
||||||
|
return pmuProbe(kprobeType, symbol, "", 0, ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pmuProbe opens a perf event based on a Performance Monitoring Unit.
|
||||||
|
//
|
||||||
|
// Requires at least a 4.17 kernel.
|
||||||
|
// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
|
||||||
|
// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
|
||||||
|
//
|
||||||
|
// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
|
||||||
|
func pmuProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||||
// Getting the PMU type will fail if the kernel doesn't support
|
// Getting the PMU type will fail if the kernel doesn't support
|
||||||
// the perf_kprobe PMU.
|
// the perf_[k,u]probe PMU.
|
||||||
et, err := getPMUEventType("kprobe")
|
et, err := getPMUEventType(typ)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a pointer to a NUL-terminated string for the kernel.
|
var config uint64
|
||||||
sp, err := unsafeStringPtr(symbol)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Parse the position of the bit from /sys/bus/event_source/devices/%s/format/retprobe.
|
|
||||||
config := 0
|
|
||||||
if ret {
|
if ret {
|
||||||
config = 1
|
bit, err := typ.RetprobeBit()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
config |= 1 << bit
|
||||||
}
|
}
|
||||||
|
|
||||||
attr := unix.PerfEventAttr{
|
var (
|
||||||
Type: uint32(et), // PMU event type read from sysfs
|
attr unix.PerfEventAttr
|
||||||
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
|
sp unsafe.Pointer
|
||||||
Config: uint64(config), // perf_kprobe PMU treats config as flags
|
)
|
||||||
|
switch typ {
|
||||||
|
case kprobeType:
|
||||||
|
// Create a pointer to a NUL-terminated string for the kernel.
|
||||||
|
sp, err := unsafeStringPtr(symbol)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr = unix.PerfEventAttr{
|
||||||
|
Type: uint32(et), // PMU event type read from sysfs
|
||||||
|
Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
|
||||||
|
Config: config, // Retprobe flag
|
||||||
|
}
|
||||||
|
case uprobeType:
|
||||||
|
sp, err := unsafeStringPtr(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr = unix.PerfEventAttr{
|
||||||
|
// The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
|
||||||
|
// since it added the config2 (Ext2) field. The Size field controls the
|
||||||
|
// size of the internal buffer the kernel allocates for reading the
|
||||||
|
// perf_event_attr argument from userspace.
|
||||||
|
Size: unix.PERF_ATTR_SIZE_VER1,
|
||||||
|
Type: uint32(et), // PMU event type read from sysfs
|
||||||
|
Ext1: uint64(uintptr(sp)), // Uprobe path
|
||||||
|
Ext2: offset, // Uprobe offset
|
||||||
|
Config: config, // Retprobe flag
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
fd, err := unix.PerfEventOpen(&attr, perfAllThreads, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||||
@@ -144,22 +229,27 @@ func pmuKprobe(symbol string, ret bool) (*perfEvent, error) {
|
|||||||
// Ensure the string pointer is not collected before PerfEventOpen returns.
|
// Ensure the string pointer is not collected before PerfEventOpen returns.
|
||||||
runtime.KeepAlive(sp)
|
runtime.KeepAlive(sp)
|
||||||
|
|
||||||
// Kernel has perf_kprobe PMU available, initialize perf event.
|
// Kernel has perf_[k,u]probe PMU available, initialize perf event.
|
||||||
return &perfEvent{
|
return &perfEvent{
|
||||||
fd: internal.NewFD(uint32(fd)),
|
fd: internal.NewFD(uint32(fd)),
|
||||||
pmuID: et,
|
pmuID: et,
|
||||||
name: symbol,
|
name: symbol,
|
||||||
ret: ret,
|
typ: typ.PerfEventType(ret),
|
||||||
progType: ebpf.Kprobe,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// tracefsKprobe creates a trace event by writing an entry to <tracefs>/kprobe_events.
|
// tracefsKprobe creates a Kprobe tracefs entry.
|
||||||
// A new trace event group name is generated on every call to support creating
|
|
||||||
// multiple trace events for the same kernel symbol. A perf event is then opened
|
|
||||||
// on the newly-created trace event and returned to the caller.
|
|
||||||
func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
|
func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
|
||||||
|
return tracefsProbe(kprobeType, symbol, "", 0, ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
|
||||||
|
// A new trace event group name is generated on every call to support creating
|
||||||
|
// multiple trace events for the same kernel or userspace symbol.
|
||||||
|
// Path and offset are only set in the case of uprobe(s) and are used to set
|
||||||
|
// the executable/library path on the filesystem and the offset where the probe is inserted.
|
||||||
|
// A perf event is then opened on the newly-created trace event and returned to the caller.
|
||||||
|
func tracefsProbe(typ probeType, symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||||
// Generate a random string for each trace event we attempt to create.
|
// Generate a random string for each trace event we attempt to create.
|
||||||
// This value is used as the 'group' token in tracefs to allow creating
|
// This value is used as the 'group' token in tracefs to allow creating
|
||||||
// multiple kprobe trace events with the same name.
|
// multiple kprobe trace events with the same name.
|
||||||
@@ -176,14 +266,13 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol)
|
return nil, fmt.Errorf("trace event already exists: %s/%s", group, symbol)
|
||||||
}
|
}
|
||||||
// The read is expected to fail with ErrNotSupported due to a non-existing event.
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
if err != nil && !errors.Is(err, ErrNotSupported) {
|
|
||||||
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err)
|
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, symbol, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the kprobe trace event using tracefs.
|
// Create the [k,u]probe trace event using tracefs.
|
||||||
if err := createTraceFSKprobeEvent(group, symbol, ret); err != nil {
|
if err := createTraceFSProbeEvent(typ, group, symbol, path, offset, ret); err != nil {
|
||||||
return nil, fmt.Errorf("creating kprobe event on tracefs: %w", err)
|
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the newly-created trace event's id.
|
// Get the newly-created trace event's id.
|
||||||
@@ -202,65 +291,83 @@ func tracefsKprobe(symbol string, ret bool) (*perfEvent, error) {
|
|||||||
fd: fd,
|
fd: fd,
|
||||||
group: group,
|
group: group,
|
||||||
name: symbol,
|
name: symbol,
|
||||||
ret: ret,
|
|
||||||
tracefsID: tid,
|
tracefsID: tid,
|
||||||
progType: ebpf.Kprobe, // kernel only allows attaching kprobe programs to kprobe events
|
typ: typ.PerfEventType(ret),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createTraceFSKprobeEvent creates a new ephemeral trace event by writing to
|
// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
|
||||||
// <tracefs>/kprobe_events. Returns ErrNotSupported if symbol is not a valid
|
// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
|
||||||
// kernel symbol, or if it is not traceable with kprobes.
|
// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
|
||||||
func createTraceFSKprobeEvent(group, symbol string, ret bool) error {
|
// if a probe with the same group and symbol already exists.
|
||||||
|
func createTraceFSProbeEvent(typ probeType, group, symbol, path string, offset uint64, ret bool) error {
|
||||||
// Open the kprobe_events file in tracefs.
|
// Open the kprobe_events file in tracefs.
|
||||||
f, err := os.OpenFile(kprobeEventsPath, os.O_APPEND|os.O_WRONLY, 0666)
|
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening kprobe_events: %w", err)
|
return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
|
var pe string
|
||||||
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
switch typ {
|
||||||
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
case kprobeType:
|
||||||
// -:[GRP/]EVENT : Clear a probe
|
// The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
|
||||||
//
|
// p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||||
// Some examples:
|
// r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
||||||
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
|
// -:[GRP/]EVENT : Clear a probe
|
||||||
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
|
//
|
||||||
//
|
// Some examples:
|
||||||
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
|
// r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
|
||||||
// kernel default to NR_CPUS. This is desired in most eBPF cases since
|
// p:ebpf_5678/p_my_kprobe __x64_sys_execve
|
||||||
// subsampling or rate limiting logic can be more accurately implemented in
|
//
|
||||||
// the eBPF program itself. See Documentation/kprobes.txt for more details.
|
// Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
|
||||||
pe := fmt.Sprintf("%s:%s/%s %s", kprobePrefix(ret), group, symbol, symbol)
|
// kernel default to NR_CPUS. This is desired in most eBPF cases since
|
||||||
|
// subsampling or rate limiting logic can be more accurately implemented in
|
||||||
|
// the eBPF program itself.
|
||||||
|
// See Documentation/kprobes.txt for more details.
|
||||||
|
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, symbol)
|
||||||
|
case uprobeType:
|
||||||
|
// The uprobe_events syntax is as follows:
|
||||||
|
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
|
||||||
|
// r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
|
||||||
|
// -:[GRP/]EVENT : Clear a probe
|
||||||
|
//
|
||||||
|
// Some examples:
|
||||||
|
// r:ebpf_1234/readline /bin/bash:0x12345
|
||||||
|
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345
|
||||||
|
//
|
||||||
|
// See Documentation/trace/uprobetracer.txt for more details.
|
||||||
|
pathOffset := uprobePathOffset(path, offset)
|
||||||
|
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(ret), group, symbol, pathOffset)
|
||||||
|
}
|
||||||
_, err = f.WriteString(pe)
|
_, err = f.WriteString(pe)
|
||||||
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
|
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
|
||||||
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
|
// when trying to create a kretprobe for a missing symbol. Make sure ENOENT
|
||||||
// is returned to the caller.
|
// is returned to the caller.
|
||||||
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
|
if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
|
||||||
return fmt.Errorf("kernel symbol %s not found: %w", symbol, os.ErrNotExist)
|
return fmt.Errorf("symbol %s not found: %w", symbol, os.ErrNotExist)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writing '%s' to kprobe_events: %w", pe, err)
|
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeTraceFSKprobeEvent removes the kprobe with the given group, symbol and kind
|
// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
|
||||||
// from <tracefs>/kprobe_events.
|
// from <tracefs>/[k,u]probe_events.
|
||||||
func closeTraceFSKprobeEvent(group, symbol string) error {
|
func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
|
||||||
f, err := os.OpenFile(kprobeEventsPath, os.O_APPEND|os.O_WRONLY, 0666)
|
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening kprobe_events: %w", err)
|
return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// See kprobe_events syntax above. Kprobe type does not need to be specified
|
// See [k,u]probe_events syntax above. The probe type does not need to be specified
|
||||||
// for removals.
|
// for removals.
|
||||||
pe := fmt.Sprintf("-:%s/%s", group, symbol)
|
pe := fmt.Sprintf("-:%s/%s", group, symbol)
|
||||||
if _, err = f.WriteString(pe); err != nil {
|
if _, err = f.WriteString(pe); err != nil {
|
||||||
return fmt.Errorf("writing '%s' to kprobe_events: %w", pe, err)
|
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -288,9 +395,38 @@ func randomGroup(prefix string) (string, error) {
|
|||||||
return group, nil
|
return group, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func kprobePrefix(ret bool) string {
|
func probePrefix(ret bool) string {
|
||||||
if ret {
|
if ret {
|
||||||
return "r"
|
return "r"
|
||||||
}
|
}
|
||||||
return "p"
|
return "p"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
|
||||||
|
// from /sys/bus/event_source/devices/<pmu>/format/retprobe.
|
||||||
|
func determineRetprobeBit(typ probeType) (uint64, error) {
|
||||||
|
p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(p)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rp uint64
|
||||||
|
n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("parse retprobe bit: %w", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func kretprobeBit() (uint64, error) {
|
||||||
|
kprobeRetprobeBit.once.Do(func() {
|
||||||
|
kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
|
||||||
|
})
|
||||||
|
return kprobeRetprobeBit.value, kprobeRetprobeBit.err
|
||||||
|
}
|
||||||
|
|||||||
78
vendor/github.com/cilium/ebpf/link/perf_event.go
generated
vendored
78
vendor/github.com/cilium/ebpf/link/perf_event.go
generated
vendored
@@ -31,6 +31,10 @@ import (
|
|||||||
// exported kernel symbols. kprobe-based (tracefs) trace events can be
|
// exported kernel symbols. kprobe-based (tracefs) trace events can be
|
||||||
// created system-wide by writing to the <tracefs>/kprobe_events file, or
|
// created system-wide by writing to the <tracefs>/kprobe_events file, or
|
||||||
// they can be scoped to the current process by creating PMU perf events.
|
// they can be scoped to the current process by creating PMU perf events.
|
||||||
|
// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
|
||||||
|
// and offsets. uprobe-based (tracefs) trace events can be
|
||||||
|
// created system-wide by writing to the <tracefs>/uprobe_events file, or
|
||||||
|
// they can be scoped to the current process by creating PMU perf events.
|
||||||
// - perf event: An object instantiated based on an existing trace event or
|
// - perf event: An object instantiated based on an existing trace event or
|
||||||
// kernel symbol. Referred to by fd in userspace.
|
// kernel symbol. Referred to by fd in userspace.
|
||||||
// Exactly one eBPF program can be attached to a perf event. Multiple perf
|
// Exactly one eBPF program can be attached to a perf event. Multiple perf
|
||||||
@@ -52,6 +56,16 @@ const (
|
|||||||
perfAllThreads = -1
|
perfAllThreads = -1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type perfEventType uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
tracepointEvent perfEventType = iota
|
||||||
|
kprobeEvent
|
||||||
|
kretprobeEvent
|
||||||
|
uprobeEvent
|
||||||
|
uretprobeEvent
|
||||||
|
)
|
||||||
|
|
||||||
// A perfEvent represents a perf event kernel object. Exactly one eBPF program
|
// A perfEvent represents a perf event kernel object. Exactly one eBPF program
|
||||||
// can be attached to it. It is created based on a tracefs trace event or a
|
// can be attached to it. It is created based on a tracefs trace event or a
|
||||||
// Performance Monitoring Unit (PMU).
|
// Performance Monitoring Unit (PMU).
|
||||||
@@ -66,11 +80,10 @@ type perfEvent struct {
|
|||||||
// ID of the trace event read from tracefs. Valid IDs are non-zero.
|
// ID of the trace event read from tracefs. Valid IDs are non-zero.
|
||||||
tracefsID uint64
|
tracefsID uint64
|
||||||
|
|
||||||
// True for kretprobes/uretprobes.
|
// The event type determines the types of programs that can be attached.
|
||||||
ret bool
|
typ perfEventType
|
||||||
|
|
||||||
fd *internal.FD
|
fd *internal.FD
|
||||||
progType ebpf.ProgramType
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pe *perfEvent) isLink() {}
|
func (pe *perfEvent) isLink() {}
|
||||||
@@ -117,13 +130,18 @@ func (pe *perfEvent) Close() error {
|
|||||||
return fmt.Errorf("closing perf event fd: %w", err)
|
return fmt.Errorf("closing perf event fd: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t := pe.progType; t {
|
switch pe.typ {
|
||||||
case ebpf.Kprobe:
|
case kprobeEvent, kretprobeEvent:
|
||||||
// For kprobes created using tracefs, clean up the <tracefs>/kprobe_events entry.
|
// Clean up kprobe tracefs entry.
|
||||||
if pe.tracefsID != 0 {
|
if pe.tracefsID != 0 {
|
||||||
return closeTraceFSKprobeEvent(pe.group, pe.name)
|
return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
|
||||||
}
|
}
|
||||||
case ebpf.TracePoint:
|
case uprobeEvent, uretprobeEvent:
|
||||||
|
// Clean up uprobe tracefs entry.
|
||||||
|
if pe.tracefsID != 0 {
|
||||||
|
return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
|
||||||
|
}
|
||||||
|
case tracepointEvent:
|
||||||
// Tracepoint trace events don't hold any extra resources.
|
// Tracepoint trace events don't hold any extra resources.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -141,12 +159,21 @@ func (pe *perfEvent) attach(prog *ebpf.Program) error {
|
|||||||
if pe.fd == nil {
|
if pe.fd == nil {
|
||||||
return errors.New("cannot attach to nil perf event")
|
return errors.New("cannot attach to nil perf event")
|
||||||
}
|
}
|
||||||
if t := prog.Type(); t != pe.progType {
|
|
||||||
return fmt.Errorf("invalid program type (expected %s): %s", pe.progType, t)
|
|
||||||
}
|
|
||||||
if prog.FD() < 0 {
|
if prog.FD() < 0 {
|
||||||
return fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
|
return fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
|
||||||
}
|
}
|
||||||
|
switch pe.typ {
|
||||||
|
case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
|
||||||
|
if t := prog.Type(); t != ebpf.Kprobe {
|
||||||
|
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
|
||||||
|
}
|
||||||
|
case tracepointEvent:
|
||||||
|
if t := prog.Type(); t != ebpf.TracePoint {
|
||||||
|
return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown perf event type: %d", pe.typ)
|
||||||
|
}
|
||||||
|
|
||||||
// The ioctl below will fail when the fd is invalid.
|
// The ioctl below will fail when the fd is invalid.
|
||||||
kfd, _ := pe.fd.Value()
|
kfd, _ := pe.fd.Value()
|
||||||
@@ -180,8 +207,8 @@ func unsafeStringPtr(str string) (unsafe.Pointer, error) {
|
|||||||
// group and name must be alphanumeric or underscore, as required by the kernel.
|
// group and name must be alphanumeric or underscore, as required by the kernel.
|
||||||
func getTraceEventID(group, name string) (uint64, error) {
|
func getTraceEventID(group, name string) (uint64, error) {
|
||||||
tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
|
tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
|
||||||
if errors.Is(err, ErrNotSupported) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, ErrNotSupported)
|
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
|
return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
|
||||||
@@ -192,20 +219,22 @@ func getTraceEventID(group, name string) (uint64, error) {
|
|||||||
|
|
||||||
// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
|
// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
|
||||||
// from /sys/bus/event_source/devices/<pmu>/type.
|
// from /sys/bus/event_source/devices/<pmu>/type.
|
||||||
func getPMUEventType(pmu string) (uint64, error) {
|
//
|
||||||
et, err := uint64FromFile("/sys/bus/event_source/devices", pmu, "type")
|
// Returns ErrNotSupported if the pmu type is not supported.
|
||||||
if errors.Is(err, ErrNotSupported) {
|
func getPMUEventType(typ probeType) (uint64, error) {
|
||||||
return 0, fmt.Errorf("pmu type %s: %w", pmu, ErrNotSupported)
|
et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type")
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("reading pmu type %s: %w", pmu, err)
|
return 0, fmt.Errorf("reading pmu type %s: %w", typ, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return et, nil
|
return et, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
|
// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
|
||||||
// kprobes created by writing to <tracefs>/kprobe_events are tracepoints
|
// [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
|
||||||
// behind the scenes, and can be attached to using these perf events.
|
// behind the scenes, and can be attached to using these perf events.
|
||||||
func openTracepointPerfEvent(tid uint64) (*internal.FD, error) {
|
func openTracepointPerfEvent(tid uint64) (*internal.FD, error) {
|
||||||
attr := unix.PerfEventAttr{
|
attr := unix.PerfEventAttr{
|
||||||
@@ -228,22 +257,13 @@ func openTracepointPerfEvent(tid uint64) (*internal.FD, error) {
|
|||||||
// and joined onto base. Returns error if base no longer prefixes the path after
|
// and joined onto base. Returns error if base no longer prefixes the path after
|
||||||
// joining all components.
|
// joining all components.
|
||||||
func uint64FromFile(base string, path ...string) (uint64, error) {
|
func uint64FromFile(base string, path ...string) (uint64, error) {
|
||||||
|
|
||||||
// Resolve leaf path separately for error feedback. Makes the join onto
|
|
||||||
// base more readable (can't mix with variadic args).
|
|
||||||
l := filepath.Join(path...)
|
l := filepath.Join(path...)
|
||||||
|
|
||||||
p := filepath.Join(base, l)
|
p := filepath.Join(base, l)
|
||||||
if !strings.HasPrefix(p, base) {
|
if !strings.HasPrefix(p, base) {
|
||||||
return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
|
return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(p)
|
data, err := ioutil.ReadFile(p)
|
||||||
if os.IsNotExist(err) {
|
|
||||||
// Only echo leaf path, the base path can be prepended at the call site
|
|
||||||
// if more verbosity is required.
|
|
||||||
return 0, fmt.Errorf("symbol %s: %w", l, ErrNotSupported)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("reading file %s: %w", p, err)
|
return 0, fmt.Errorf("reading file %s: %w", p, err)
|
||||||
}
|
}
|
||||||
|
|||||||
4
vendor/github.com/cilium/ebpf/link/tracepoint.go
generated
vendored
4
vendor/github.com/cilium/ebpf/link/tracepoint.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
|||||||
// tracepoints. The top-level directory is the group, the event's subdirectory
|
// tracepoints. The top-level directory is the group, the event's subdirectory
|
||||||
// is the name. Example:
|
// is the name. Example:
|
||||||
//
|
//
|
||||||
// Tracepoint("syscalls", "sys_enter_fork")
|
// Tracepoint("syscalls", "sys_enter_fork", prog)
|
||||||
//
|
//
|
||||||
// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
|
// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
|
||||||
// only possible as of kernel 4.14 (commit cf5f5ce).
|
// only possible as of kernel 4.14 (commit cf5f5ce).
|
||||||
@@ -44,7 +44,7 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) {
|
|||||||
tracefsID: tid,
|
tracefsID: tid,
|
||||||
group: group,
|
group: group,
|
||||||
name: name,
|
name: name,
|
||||||
progType: ebpf.TracePoint,
|
typ: tracepointEvent,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := pe.attach(prog); err != nil {
|
if err := pe.attach(prog); err != nil {
|
||||||
|
|||||||
207
vendor/github.com/cilium/ebpf/link/uprobe.go
generated
vendored
Normal file
207
vendor/github.com/cilium/ebpf/link/uprobe.go
generated
vendored
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"debug/elf"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cilium/ebpf"
|
||||||
|
"github.com/cilium/ebpf/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
|
||||||
|
|
||||||
|
// rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol
|
||||||
|
// as they are not allowed to be used as the EVENT token in tracefs.
|
||||||
|
rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+")
|
||||||
|
|
||||||
|
uprobeRetprobeBit = struct {
|
||||||
|
once sync.Once
|
||||||
|
value uint64
|
||||||
|
err error
|
||||||
|
}{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Executable defines an executable program on the filesystem.
|
||||||
|
type Executable struct {
|
||||||
|
// Path of the executable on the filesystem.
|
||||||
|
path string
|
||||||
|
// Parsed ELF symbols and dynamic symbols.
|
||||||
|
symbols map[string]elf.Symbol
|
||||||
|
}
|
||||||
|
|
||||||
|
// To open a new Executable, use:
|
||||||
|
//
|
||||||
|
// OpenExecutable("/bin/bash")
|
||||||
|
//
|
||||||
|
// The returned value can then be used to open Uprobe(s).
|
||||||
|
func OpenExecutable(path string) (*Executable, error) {
|
||||||
|
if path == "" {
|
||||||
|
return nil, fmt.Errorf("path cannot be empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("open file '%s': %w", path, err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
se, err := internal.NewSafeELFFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse ELF file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ex = Executable{
|
||||||
|
path: path,
|
||||||
|
symbols: make(map[string]elf.Symbol),
|
||||||
|
}
|
||||||
|
if err := ex.addSymbols(se.Symbols); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ex.addSymbols(se.DynamicSymbols); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ex, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ex *Executable) addSymbols(f func() ([]elf.Symbol, error)) error {
|
||||||
|
// elf.Symbols and elf.DynamicSymbols return ErrNoSymbols if the section is not found.
|
||||||
|
syms, err := f()
|
||||||
|
if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, s := range syms {
|
||||||
|
ex.symbols[s.Name] = s
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ex *Executable) symbol(symbol string) (*elf.Symbol, error) {
|
||||||
|
if s, ok := ex.symbols[symbol]; ok {
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("symbol %s not found", symbol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uprobe attaches the given eBPF program to a perf event that fires when the
|
||||||
|
// given symbol starts executing in the given Executable.
|
||||||
|
// For example, /bin/bash::main():
|
||||||
|
//
|
||||||
|
// ex, _ = OpenExecutable("/bin/bash")
|
||||||
|
// ex.Uprobe("main", prog)
|
||||||
|
//
|
||||||
|
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||||
|
// system resources. Functions provided by shared libraries can currently not
|
||||||
|
// be traced and will result in an ErrNotSupported.
|
||||||
|
func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program) (Link, error) {
|
||||||
|
u, err := ex.uprobe(symbol, prog, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = u.attach(prog)
|
||||||
|
if err != nil {
|
||||||
|
u.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uretprobe attaches the given eBPF program to a perf event that fires right
|
||||||
|
// before the given symbol exits. For example, /bin/bash::main():
|
||||||
|
//
|
||||||
|
// ex, _ = OpenExecutable("/bin/bash")
|
||||||
|
// ex.Uretprobe("main", prog)
|
||||||
|
//
|
||||||
|
// The resulting Link must be Closed during program shutdown to avoid leaking
|
||||||
|
// system resources. Functions provided by shared libraries can currently not
|
||||||
|
// be traced and will result in an ErrNotSupported.
|
||||||
|
func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program) (Link, error) {
|
||||||
|
u, err := ex.uprobe(symbol, prog, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = u.attach(prog)
|
||||||
|
if err != nil {
|
||||||
|
u.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
|
||||||
|
// If ret is true, create a uretprobe.
|
||||||
|
func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) {
|
||||||
|
if prog == nil {
|
||||||
|
return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
|
||||||
|
}
|
||||||
|
if prog.Type() != ebpf.Kprobe {
|
||||||
|
return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
|
||||||
|
}
|
||||||
|
|
||||||
|
sym, err := ex.symbol(symbol)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("symbol '%s' not found in '%s': %w", symbol, ex.path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symbols with location 0 from section undef are shared library calls and
|
||||||
|
// are relocated before the binary is executed. Dynamic linking is not
|
||||||
|
// implemented by the library, so mark this as unsupported for now.
|
||||||
|
if sym.Section == elf.SHN_UNDEF && sym.Value == 0 {
|
||||||
|
return nil, fmt.Errorf("cannot resolve %s library call '%s': %w", ex.path, symbol, ErrNotSupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use uprobe PMU if the kernel has it available.
|
||||||
|
tp, err := pmuUprobe(sym.Name, ex.path, sym.Value, ret)
|
||||||
|
if err == nil {
|
||||||
|
return tp, nil
|
||||||
|
}
|
||||||
|
if err != nil && !errors.Is(err, ErrNotSupported) {
|
||||||
|
return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use tracefs if uprobe PMU is missing.
|
||||||
|
tp, err = tracefsUprobe(uprobeSanitizedSymbol(sym.Name), ex.path, sym.Value, ret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pmuUprobe opens a perf event based on the uprobe PMU.
|
||||||
|
func pmuUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||||
|
return pmuProbe(uprobeType, symbol, path, offset, ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tracefsUprobe creates a Uprobe tracefs entry.
|
||||||
|
func tracefsUprobe(symbol, path string, offset uint64, ret bool) (*perfEvent, error) {
|
||||||
|
return tracefsProbe(uprobeType, symbol, path, offset, ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore.
|
||||||
|
func uprobeSanitizedSymbol(symbol string) string {
|
||||||
|
return rgxUprobeSymbol.ReplaceAllString(symbol, "_")
|
||||||
|
}
|
||||||
|
|
||||||
|
// uprobePathOffset creates the PATH:OFFSET token for the tracefs api.
|
||||||
|
func uprobePathOffset(path string, offset uint64) string {
|
||||||
|
return fmt.Sprintf("%s:%#x", path, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func uretprobeBit() (uint64, error) {
|
||||||
|
uprobeRetprobeBit.once.Do(func() {
|
||||||
|
uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType)
|
||||||
|
})
|
||||||
|
return uprobeRetprobeBit.value, uprobeRetprobeBit.err
|
||||||
|
}
|
||||||
11
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
11
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
@@ -108,12 +108,16 @@ func fixupJumpsAndCalls(insns asm.Instructions) error {
|
|||||||
offset := iter.Offset
|
offset := iter.Offset
|
||||||
ins := iter.Ins
|
ins := iter.Ins
|
||||||
|
|
||||||
|
if ins.Reference == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case ins.IsFunctionCall() && ins.Constant == -1:
|
case ins.IsFunctionCall() && ins.Constant == -1:
|
||||||
// Rewrite bpf to bpf call
|
// Rewrite bpf to bpf call
|
||||||
callOffset, ok := symbolOffsets[ins.Reference]
|
callOffset, ok := symbolOffsets[ins.Reference]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
|
return fmt.Errorf("call at %d: reference to missing symbol %q", i, ins.Reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Constant = int64(callOffset - offset - 1)
|
ins.Constant = int64(callOffset - offset - 1)
|
||||||
@@ -122,10 +126,13 @@ func fixupJumpsAndCalls(insns asm.Instructions) error {
|
|||||||
// Rewrite jump to label
|
// Rewrite jump to label
|
||||||
jumpOffset, ok := symbolOffsets[ins.Reference]
|
jumpOffset, ok := symbolOffsets[ins.Reference]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
|
return fmt.Errorf("jump at %d: reference to missing symbol %q", i, ins.Reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
ins.Offset = int16(jumpOffset - offset - 1)
|
ins.Offset = int16(jumpOffset - offset - 1)
|
||||||
|
|
||||||
|
case ins.IsLoadFromMap() && ins.MapPtr() == -1:
|
||||||
|
return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedReference)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
50
vendor/github.com/cilium/ebpf/map.go
generated
vendored
50
vendor/github.com/cilium/ebpf/map.go
generated
vendored
@@ -18,6 +18,7 @@ var (
|
|||||||
ErrKeyNotExist = errors.New("key does not exist")
|
ErrKeyNotExist = errors.New("key does not exist")
|
||||||
ErrKeyExist = errors.New("key already exists")
|
ErrKeyExist = errors.New("key already exists")
|
||||||
ErrIterationAborted = errors.New("iteration aborted")
|
ErrIterationAborted = errors.New("iteration aborted")
|
||||||
|
ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map")
|
||||||
)
|
)
|
||||||
|
|
||||||
// MapOptions control loading a map into the kernel.
|
// MapOptions control loading a map into the kernel.
|
||||||
@@ -87,6 +88,23 @@ func (ms *MapSpec) Copy() *MapSpec {
|
|||||||
return &cpy
|
return &cpy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ms *MapSpec) clampPerfEventArraySize() error {
|
||||||
|
if ms.Type != PerfEventArray {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := internal.PossibleCPUs()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("perf event array: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := uint32(n); ms.MaxEntries > n {
|
||||||
|
ms.MaxEntries = n
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// MapKV is used to initialize the contents of a Map.
|
// MapKV is used to initialize the contents of a Map.
|
||||||
type MapKV struct {
|
type MapKV struct {
|
||||||
Key interface{}
|
Key interface{}
|
||||||
@@ -96,19 +114,19 @@ type MapKV struct {
|
|||||||
func (ms *MapSpec) checkCompatibility(m *Map) error {
|
func (ms *MapSpec) checkCompatibility(m *Map) error {
|
||||||
switch {
|
switch {
|
||||||
case m.typ != ms.Type:
|
case m.typ != ms.Type:
|
||||||
return fmt.Errorf("expected type %v, got %v", ms.Type, m.typ)
|
return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
|
||||||
|
|
||||||
case m.keySize != ms.KeySize:
|
case m.keySize != ms.KeySize:
|
||||||
return fmt.Errorf("expected key size %v, got %v", ms.KeySize, m.keySize)
|
return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible)
|
||||||
|
|
||||||
case m.valueSize != ms.ValueSize:
|
case m.valueSize != ms.ValueSize:
|
||||||
return fmt.Errorf("expected value size %v, got %v", ms.ValueSize, m.valueSize)
|
return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible)
|
||||||
|
|
||||||
case m.maxEntries != ms.MaxEntries:
|
case m.maxEntries != ms.MaxEntries:
|
||||||
return fmt.Errorf("expected max entries %v, got %v", ms.MaxEntries, m.maxEntries)
|
return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
|
||||||
|
|
||||||
case m.flags != ms.Flags:
|
case m.flags != ms.Flags:
|
||||||
return fmt.Errorf("expected flags %v, got %v", ms.Flags, m.flags)
|
return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -171,14 +189,16 @@ func NewMap(spec *MapSpec) (*Map, error) {
|
|||||||
// The caller is responsible for ensuring the process' rlimit is set
|
// The caller is responsible for ensuring the process' rlimit is set
|
||||||
// sufficiently high for locking memory during map creation. This can be done
|
// sufficiently high for locking memory during map creation. This can be done
|
||||||
// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions.
|
// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions.
|
||||||
|
//
|
||||||
|
// May return an error wrapping ErrMapIncompatible.
|
||||||
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
|
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
|
||||||
btfs := make(btfHandleCache)
|
handles := newHandleCache()
|
||||||
defer btfs.close()
|
defer handles.close()
|
||||||
|
|
||||||
return newMapWithOptions(spec, opts, btfs)
|
return newMapWithOptions(spec, opts, handles)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *Map, err error) {
|
func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) {
|
||||||
closeOnError := func(c io.Closer) {
|
closeOnError := func(c io.Closer) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Close()
|
c.Close()
|
||||||
@@ -202,7 +222,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
|
|||||||
defer closeOnError(m)
|
defer closeOnError(m)
|
||||||
|
|
||||||
if err := spec.checkCompatibility(m); err != nil {
|
if err := spec.checkCompatibility(m); err != nil {
|
||||||
return nil, fmt.Errorf("use pinned map %s: %s", spec.Name, err)
|
return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@@ -211,7 +231,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
|
|||||||
// Nothing to do here
|
// Nothing to do here
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported pin type %d", int(spec.Pinning))
|
return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported)
|
||||||
}
|
}
|
||||||
|
|
||||||
var innerFd *internal.FD
|
var innerFd *internal.FD
|
||||||
@@ -224,7 +244,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
|
|||||||
return nil, errors.New("inner maps cannot be pinned")
|
return nil, errors.New("inner maps cannot be pinned")
|
||||||
}
|
}
|
||||||
|
|
||||||
template, err := createMap(spec.InnerMap, nil, opts, btfs)
|
template, err := createMap(spec.InnerMap, nil, opts, handles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -233,7 +253,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
|
|||||||
innerFd = template.fd
|
innerFd = template.fd
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := createMap(spec, innerFd, opts, btfs)
|
m, err := createMap(spec, innerFd, opts, handles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -249,7 +269,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (_ *
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandleCache) (_ *Map, err error) {
|
func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) {
|
||||||
closeOnError := func(closer io.Closer) {
|
closeOnError := func(closer io.Closer) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
closer.Close()
|
closer.Close()
|
||||||
@@ -320,7 +340,7 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandl
|
|||||||
|
|
||||||
var btfDisabled bool
|
var btfDisabled bool
|
||||||
if spec.BTF != nil {
|
if spec.BTF != nil {
|
||||||
handle, err := btfs.load(btf.MapSpec(spec.BTF))
|
handle, err := handles.btfHandle(btf.MapSpec(spec.BTF))
|
||||||
btfDisabled = errors.Is(err, btf.ErrNotSupported)
|
btfDisabled = errors.Is(err, btf.ErrNotSupported)
|
||||||
if err != nil && !btfDisabled {
|
if err != nil && !btfDisabled {
|
||||||
return nil, fmt.Errorf("load BTF: %w", err)
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
|
|||||||
149
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
149
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -19,6 +20,8 @@ import (
|
|||||||
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
|
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
|
||||||
var ErrNotSupported = internal.ErrNotSupported
|
var ErrNotSupported = internal.ErrNotSupported
|
||||||
|
|
||||||
|
var errUnsatisfiedReference = errors.New("unsatisfied reference")
|
||||||
|
|
||||||
// ProgramID represents the unique ID of an eBPF program.
|
// ProgramID represents the unique ID of an eBPF program.
|
||||||
type ProgramID uint32
|
type ProgramID uint32
|
||||||
|
|
||||||
@@ -41,6 +44,12 @@ type ProgramOptions struct {
|
|||||||
// Controls the output buffer size for the verifier. Defaults to
|
// Controls the output buffer size for the verifier. Defaults to
|
||||||
// DefaultVerifierLogSize.
|
// DefaultVerifierLogSize.
|
||||||
LogSize int
|
LogSize int
|
||||||
|
// An ELF containing the target BTF for this program. It is used both to
|
||||||
|
// find the correct function to trace and to apply CO-RE relocations.
|
||||||
|
// This is useful in environments where the kernel BTF is not available
|
||||||
|
// (containers) or where it is in a non-standard location. Defaults to
|
||||||
|
// use the kernel BTF from a well-known location.
|
||||||
|
TargetBTF io.ReaderAt
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProgramSpec defines a Program.
|
// ProgramSpec defines a Program.
|
||||||
@@ -125,21 +134,21 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
|
|||||||
// Loading a program for the first time will perform
|
// Loading a program for the first time will perform
|
||||||
// feature detection by loading small, temporary programs.
|
// feature detection by loading small, temporary programs.
|
||||||
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
|
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
|
||||||
btfs := make(btfHandleCache)
|
handles := newHandleCache()
|
||||||
defer btfs.close()
|
defer handles.close()
|
||||||
|
|
||||||
return newProgramWithOptions(spec, opts, btfs)
|
prog, err := newProgramWithOptions(spec, opts, handles)
|
||||||
|
if errors.Is(err, errUnsatisfiedReference) {
|
||||||
|
return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
|
||||||
|
}
|
||||||
|
return prog, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) {
|
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) {
|
||||||
if len(spec.Instructions) == 0 {
|
if len(spec.Instructions) == 0 {
|
||||||
return nil, errors.New("Instructions cannot be empty")
|
return nil, errors.New("Instructions cannot be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(spec.License) == 0 {
|
|
||||||
return nil, errors.New("License cannot be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
|
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
|
||||||
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
|
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
|
||||||
}
|
}
|
||||||
@@ -157,27 +166,10 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
|
|||||||
kv = v.Kernel()
|
kv = v.Kernel()
|
||||||
}
|
}
|
||||||
|
|
||||||
insns := make(asm.Instructions, len(spec.Instructions))
|
|
||||||
copy(insns, spec.Instructions)
|
|
||||||
|
|
||||||
if err := fixupJumpsAndCalls(insns); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
|
|
||||||
err := insns.Marshal(buf, internal.NativeEndian)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bytecode := buf.Bytes()
|
|
||||||
insCount := uint32(len(bytecode) / asm.InstructionSize)
|
|
||||||
attr := &bpfProgLoadAttr{
|
attr := &bpfProgLoadAttr{
|
||||||
progType: spec.Type,
|
progType: spec.Type,
|
||||||
progFlags: spec.Flags,
|
progFlags: spec.Flags,
|
||||||
expectedAttachType: spec.AttachType,
|
expectedAttachType: spec.AttachType,
|
||||||
insCount: insCount,
|
|
||||||
instructions: internal.NewSlicePointer(bytecode),
|
|
||||||
license: internal.NewStringPointer(spec.License),
|
license: internal.NewStringPointer(spec.License),
|
||||||
kernelVersion: kv,
|
kernelVersion: kv,
|
||||||
}
|
}
|
||||||
@@ -186,15 +178,24 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
|
|||||||
attr.progName = newBPFObjName(spec.Name)
|
attr.progName = newBPFObjName(spec.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var targetBTF *btf.Spec
|
||||||
|
if opts.TargetBTF != nil {
|
||||||
|
targetBTF, err = handles.btfSpec(opts.TargetBTF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load target BTF: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var btfDisabled bool
|
var btfDisabled bool
|
||||||
|
var core btf.COREFixups
|
||||||
if spec.BTF != nil {
|
if spec.BTF != nil {
|
||||||
if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil {
|
core, err = btf.ProgramFixups(spec.BTF, targetBTF)
|
||||||
return nil, fmt.Errorf("CO-RE relocations: %s", err)
|
if err != nil {
|
||||||
} else if len(relos) > 0 {
|
return nil, fmt.Errorf("CO-RE relocations: %w", err)
|
||||||
return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
handle, err := btfs.load(btf.ProgramSpec(spec.BTF))
|
handle, err := handles.btfHandle(btf.ProgramSpec(spec.BTF))
|
||||||
btfDisabled = errors.Is(err, btf.ErrNotSupported)
|
btfDisabled = errors.Is(err, btf.ErrNotSupported)
|
||||||
if err != nil && !btfDisabled {
|
if err != nil && !btfDisabled {
|
||||||
return nil, fmt.Errorf("load BTF: %w", err)
|
return nil, fmt.Errorf("load BTF: %w", err)
|
||||||
@@ -221,8 +222,27 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
insns, err := core.Apply(spec.Instructions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("CO-RE fixup: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fixupJumpsAndCalls(insns); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
|
||||||
|
err = insns.Marshal(buf, internal.NativeEndian)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bytecode := buf.Bytes()
|
||||||
|
attr.instructions = internal.NewSlicePointer(bytecode)
|
||||||
|
attr.insCount = uint32(len(bytecode) / asm.InstructionSize)
|
||||||
|
|
||||||
if spec.AttachTo != "" {
|
if spec.AttachTo != "" {
|
||||||
target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType)
|
target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -250,7 +270,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
|
|||||||
}
|
}
|
||||||
|
|
||||||
logErr := err
|
logErr := err
|
||||||
if opts.LogLevel == 0 {
|
if opts.LogLevel == 0 && opts.LogSize >= 0 {
|
||||||
// Re-run with the verifier enabled to get better error messages.
|
// Re-run with the verifier enabled to get better error messages.
|
||||||
logBuf = make([]byte, logSize)
|
logBuf = make([]byte, logSize)
|
||||||
attr.logLevel = 1
|
attr.logLevel = 1
|
||||||
@@ -664,52 +684,45 @@ func (p *Program) ID() (ProgramID, error) {
|
|||||||
return ProgramID(info.id), nil
|
return ProgramID(info.id), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func findKernelType(name string, typ btf.Type) error {
|
func resolveBTFType(kernel *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
|
||||||
kernel, err := btf.LoadKernelSpec()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't load kernel spec: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return kernel.FindType(name, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
|
|
||||||
type match struct {
|
type match struct {
|
||||||
p ProgramType
|
p ProgramType
|
||||||
a AttachType
|
a AttachType
|
||||||
}
|
}
|
||||||
|
|
||||||
target := match{progType, attachType}
|
var target btf.Type
|
||||||
switch target {
|
var typeName, featureName string
|
||||||
|
switch (match{progType, attachType}) {
|
||||||
case match{LSM, AttachLSMMac}:
|
case match{LSM, AttachLSMMac}:
|
||||||
var target btf.Func
|
target = new(btf.Func)
|
||||||
err := findKernelType("bpf_lsm_"+name, &target)
|
typeName = "bpf_lsm_" + name
|
||||||
if errors.Is(err, btf.ErrNotFound) {
|
featureName = name + " LSM hook"
|
||||||
return nil, &internal.UnsupportedFeatureError{
|
|
||||||
Name: name + " LSM hook",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &target, nil
|
|
||||||
|
|
||||||
case match{Tracing, AttachTraceIter}:
|
case match{Tracing, AttachTraceIter}:
|
||||||
var target btf.Func
|
target = new(btf.Func)
|
||||||
err := findKernelType("bpf_iter_"+name, &target)
|
typeName = "bpf_iter_" + name
|
||||||
if errors.Is(err, btf.ErrNotFound) {
|
featureName = name + " iterator"
|
||||||
return nil, &internal.UnsupportedFeatureError{
|
|
||||||
Name: name + " iterator",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &target, nil
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if kernel == nil {
|
||||||
|
var err error
|
||||||
|
kernel, err = btf.LoadKernelSpec()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("load kernel spec: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := kernel.FindType(typeName, target)
|
||||||
|
if errors.Is(err, btf.ErrNotFound) {
|
||||||
|
return nil, &internal.UnsupportedFeatureError{
|
||||||
|
Name: featureName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err)
|
||||||
|
}
|
||||||
|
return target, nil
|
||||||
}
|
}
|
||||||
|
|||||||
125
vendor/github.com/cilium/ebpf/run-tests.sh
generated
vendored
125
vendor/github.com/cilium/ebpf/run-tests.sh
generated
vendored
@@ -1,56 +1,95 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Test the current package under a different kernel.
|
# Test the current package under a different kernel.
|
||||||
# Requires virtme and qemu to be installed.
|
# Requires virtme and qemu to be installed.
|
||||||
|
# Examples:
|
||||||
|
# Run all tests on a 5.4 kernel
|
||||||
|
# $ ./run-tests.sh 5.4
|
||||||
|
# Run a subset of tests:
|
||||||
|
# $ ./run-tests.sh 5.4 go test ./link
|
||||||
|
|
||||||
set -eu
|
set -euo pipefail
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
if [[ "${1:-}" = "--in-vm" ]]; then
|
script="$(realpath "$0")"
|
||||||
|
readonly script
|
||||||
|
|
||||||
|
# This script is a bit like a Matryoshka doll since it keeps re-executing itself
|
||||||
|
# in various different contexts:
|
||||||
|
#
|
||||||
|
# 1. invoked by the user like run-tests.sh 5.4
|
||||||
|
# 2. invoked by go test like run-tests.sh --exec-vm
|
||||||
|
# 3. invoked by init in the vm like run-tests.sh --exec-test
|
||||||
|
#
|
||||||
|
# This allows us to use all available CPU on the host machine to compile our
|
||||||
|
# code, and then only use the VM to execute the test. This is because the VM
|
||||||
|
# is usually slower at compiling than the host.
|
||||||
|
if [[ "${1:-}" = "--exec-vm" ]]; then
|
||||||
|
shift
|
||||||
|
|
||||||
|
input="$1"
|
||||||
|
shift
|
||||||
|
|
||||||
|
# Use sudo if /dev/kvm isn't accessible by the current user.
|
||||||
|
sudo=""
|
||||||
|
if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
|
||||||
|
sudo="sudo"
|
||||||
|
fi
|
||||||
|
readonly sudo
|
||||||
|
|
||||||
|
testdir="$(dirname "$1")"
|
||||||
|
output="$(mktemp -d)"
|
||||||
|
printf -v cmd "%q " "$@"
|
||||||
|
|
||||||
|
if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then
|
||||||
|
# stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a
|
||||||
|
# blocking substitute.
|
||||||
|
mkfifo "${output}/fake-stdin"
|
||||||
|
# Open for reading and writing to avoid blocking.
|
||||||
|
exec 0<> "${output}/fake-stdin"
|
||||||
|
rm "${output}/fake-stdin"
|
||||||
|
fi
|
||||||
|
|
||||||
|
$sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \
|
||||||
|
--rwdir="${testdir}=${testdir}" \
|
||||||
|
--rodir=/run/input="${input}" \
|
||||||
|
--rwdir=/run/output="${output}" \
|
||||||
|
--script-sh "PATH=\"$PATH\" \"$script\" --exec-test $cmd" \
|
||||||
|
--qemu-opts -smp 2 # need at least two CPUs for some tests
|
||||||
|
|
||||||
|
if [[ ! -e "${output}/success" ]]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
$sudo rm -r "$output"
|
||||||
|
exit 0
|
||||||
|
elif [[ "${1:-}" = "--exec-test" ]]; then
|
||||||
shift
|
shift
|
||||||
|
|
||||||
mount -t bpf bpf /sys/fs/bpf
|
mount -t bpf bpf /sys/fs/bpf
|
||||||
mount -t tracefs tracefs /sys/kernel/debug/tracing
|
mount -t tracefs tracefs /sys/kernel/debug/tracing
|
||||||
export CGO_ENABLED=0
|
|
||||||
export GOFLAGS=-mod=readonly
|
|
||||||
export GOPATH=/run/go-path
|
|
||||||
export GOPROXY=file:///run/go-path/pkg/mod/cache/download
|
|
||||||
export GOSUMDB=off
|
|
||||||
export GOCACHE=/run/go-cache
|
|
||||||
|
|
||||||
if [[ -d "/run/input/bpf" ]]; then
|
if [[ -d "/run/input/bpf" ]]; then
|
||||||
export KERNEL_SELFTESTS="/run/input/bpf"
|
export KERNEL_SELFTESTS="/run/input/bpf"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
readonly output="${1}"
|
dmesg -C
|
||||||
shift
|
if ! "$@"; then
|
||||||
|
dmesg
|
||||||
echo Running tests...
|
exit 1
|
||||||
go test -v -coverpkg=./... -coverprofile="$output/coverage.txt" -count 1 ./...
|
fi
|
||||||
touch "$output/success"
|
touch "/run/output/success"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Pull all dependencies, so that we can run tests without the
|
|
||||||
# vm having network access.
|
|
||||||
go mod download
|
|
||||||
|
|
||||||
# Use sudo if /dev/kvm isn't accessible by the current user.
|
|
||||||
sudo=""
|
|
||||||
if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
|
|
||||||
sudo="sudo"
|
|
||||||
fi
|
|
||||||
readonly sudo
|
|
||||||
|
|
||||||
readonly kernel_version="${1:-}"
|
readonly kernel_version="${1:-}"
|
||||||
if [[ -z "${kernel_version}" ]]; then
|
if [[ -z "${kernel_version}" ]]; then
|
||||||
echo "Expecting kernel version as first argument"
|
echo "Expecting kernel version as first argument"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
shift
|
||||||
|
|
||||||
readonly kernel="linux-${kernel_version}.bz"
|
readonly kernel="linux-${kernel_version}.bz"
|
||||||
readonly selftests="linux-${kernel_version}-selftests-bpf.bz"
|
readonly selftests="linux-${kernel_version}-selftests-bpf.bz"
|
||||||
readonly input="$(mktemp -d)"
|
readonly input="$(mktemp -d)"
|
||||||
readonly output="$(mktemp -d)"
|
|
||||||
readonly tmp_dir="${TMPDIR:-/tmp}"
|
readonly tmp_dir="${TMPDIR:-/tmp}"
|
||||||
readonly branch="${BRANCH:-master}"
|
readonly branch="${BRANCH:-master}"
|
||||||
|
|
||||||
@@ -60,6 +99,7 @@ fetch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fetch "${kernel}"
|
fetch "${kernel}"
|
||||||
|
cp "${tmp_dir}/${kernel}" "${input}/bzImage"
|
||||||
|
|
||||||
if fetch "${selftests}"; then
|
if fetch "${selftests}"; then
|
||||||
mkdir "${input}/bpf"
|
mkdir "${input}/bpf"
|
||||||
@@ -68,25 +108,16 @@ else
|
|||||||
echo "No selftests found, disabling"
|
echo "No selftests found, disabling"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo Testing on "${kernel_version}"
|
args=(-v -short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
|
||||||
$sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 512M --pwd \
|
if (( $# > 0 )); then
|
||||||
--rw \
|
args=("$@")
|
||||||
--rwdir=/run/input="${input}" \
|
|
||||||
--rwdir=/run/output="${output}" \
|
|
||||||
--rodir=/run/go-path="$(go env GOPATH)" \
|
|
||||||
--rwdir=/run/go-cache="$(go env GOCACHE)" \
|
|
||||||
--script-sh "PATH=\"$PATH\" $(realpath "$0") --in-vm /run/output" \
|
|
||||||
--qemu-opts -smp 2 # need at least two CPUs for some tests
|
|
||||||
|
|
||||||
if [[ ! -e "${output}/success" ]]; then
|
|
||||||
echo "Test failed on ${kernel_version}"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Test successful on ${kernel_version}"
|
|
||||||
if [[ -v COVERALLS_TOKEN ]]; then
|
|
||||||
goveralls -coverprofile="${output}/coverage.txt" -service=semaphore -repotoken "$COVERALLS_TOKEN"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$sudo rm -r "${input}"
|
export GOFLAGS=-mod=readonly
|
||||||
$sudo rm -r "${output}"
|
export CGO_ENABLED=0
|
||||||
|
|
||||||
|
echo Testing on "${kernel_version}"
|
||||||
|
go test -exec "$script --exec-vm $input" "${args[@]}"
|
||||||
|
echo "Test successful on ${kernel_version}"
|
||||||
|
|
||||||
|
rm -r "${input}"
|
||||||
|
|||||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1,7 +1,7 @@
|
|||||||
# github.com/checkpoint-restore/go-criu/v5 v5.0.0
|
# github.com/checkpoint-restore/go-criu/v5 v5.0.0
|
||||||
github.com/checkpoint-restore/go-criu/v5
|
github.com/checkpoint-restore/go-criu/v5
|
||||||
github.com/checkpoint-restore/go-criu/v5/rpc
|
github.com/checkpoint-restore/go-criu/v5/rpc
|
||||||
# github.com/cilium/ebpf v0.5.0
|
# github.com/cilium/ebpf v0.6.0
|
||||||
github.com/cilium/ebpf
|
github.com/cilium/ebpf
|
||||||
github.com/cilium/ebpf/asm
|
github.com/cilium/ebpf/asm
|
||||||
github.com/cilium/ebpf/internal
|
github.com/cilium/ebpf/internal
|
||||||
|
|||||||
Reference in New Issue
Block a user