build(deps): bump github.com/cilium/ebpf from 0.9.3 to 0.10.0

Bumps [github.com/cilium/ebpf](https://github.com/cilium/ebpf) from 0.9.3 to 0.10.0.
- [Release notes](https://github.com/cilium/ebpf/releases)
- [Commits](https://github.com/cilium/ebpf/compare/v0.9.3...v0.10.0)

---
updated-dependencies:
- dependency-name: github.com/cilium/ebpf
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-01-17 23:24:59 +00:00
committed by GitHub
parent 3c12cbda39
commit cc63d074e6
42 changed files with 1769 additions and 728 deletions

2
go.mod
View File

@@ -4,7 +4,7 @@ go 1.18
require (
github.com/checkpoint-restore/go-criu/v6 v6.3.0
github.com/cilium/ebpf v0.9.3
github.com/cilium/ebpf v0.10.0
github.com/containerd/console v1.0.3
github.com/coreos/go-systemd/v22 v22.5.0
github.com/cyphar/filepath-securejoin v0.2.3

13
go.sum
View File

@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA=
github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI=
github.com/cilium/ebpf v0.9.3 h1:5KtxXZU+scyERvkJMEm16TbScVvuuMrlhPly78ZMbSc=
github.com/cilium/ebpf v0.9.3/go.mod h1:w27N4UjpaQ9X/DGrSugxUG+H+NhgntDuPb5lCzxCn8A=
github.com/cilium/ebpf v0.10.0 h1:nk5HPMeoBXtOzbkZBWym+ZWq1GIiHUsBFXxwewXAHLQ=
github.com/cilium/ebpf v0.10.0/go.mod h1:DPiVdY/kT534dgc9ERmvP8mWA+9gvwgKfRvk4nNWnoE=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -17,15 +17,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
@@ -37,7 +37,7 @@ github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -67,7 +67,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=

View File

@@ -77,9 +77,7 @@ all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) gene
generate: export BPF_CLANG := $(CLANG)
generate: export BPF_CFLAGS := $(CFLAGS)
generate:
go generate ./cmd/bpf2go/test
go generate ./internal/sys
go generate ./examples/...
go generate ./...
testdata/loader-%-el.elf: testdata/loader.c
$* $(CFLAGS) -target bpfel -c $< -o $@

View File

@@ -21,7 +21,7 @@ eBPF and the library, and help shape the future of the project.
## Getting Help
The community actively monitors our [GitHub Discussions](discussions/) page.
The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page.
Please search for existing threads before starting a new one. Refrain from
opening issues on the bug tracker if you're just starting out or if you're not
sure if something is a bug in the library code.

View File

@@ -354,6 +354,13 @@ func (ins Instruction) Size() uint64 {
return uint64(InstructionSize * ins.OpCode.rawInstructions())
}
// WithMetadata sets the given Metadata on the Instruction. e.g. to copy
// Metadata from another Instruction when replacing it.
func (ins Instruction) WithMetadata(meta Metadata) Instruction {
ins.Metadata = meta
return ins
}
type symbolMeta struct{}
// WithSymbol marks the Instruction as a Symbol, which other Instructions

View File

@@ -11,6 +11,7 @@ import (
"math"
"os"
"reflect"
"sync"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
@@ -21,9 +22,10 @@ const btfMagic = 0xeB9F
// Errors returned by BTF functions.
var (
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrNotSupported = internal.ErrNotSupported
ErrNotFound = errors.New("not found")
ErrNoExtendedInfo = errors.New("no extended info")
ErrMultipleMatches = errors.New("multiple matching types")
)
// ID represents the unique ID of a BTF object.
@@ -32,12 +34,11 @@ type ID = sys.BTFID
// Spec represents decoded BTF.
type Spec struct {
// Data from .BTF.
rawTypes []rawType
strings *stringTable
strings *stringTable
// All types contained by the spec. For the base type, the position of
// a type in the slice is its ID.
types types
// All types contained by the spec, not including types from the base in
// case the spec was parsed from split BTF.
types []Type
// Type IDs indexed by type.
typeIDs map[Type]TypeID
@@ -49,6 +50,8 @@ type Spec struct {
byteOrder binary.ByteOrder
}
var btfHeaderLen = binary.Size(&btfHeader{})
type btfHeader struct {
Magic uint16
Version uint8
@@ -92,10 +95,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
file, err := internal.NewSafeELFFile(rd)
if err != nil {
if bo := guessRawBTFByteOrder(rd); bo != nil {
// Try to parse a naked BTF blob. This will return an error if
// we encounter a Datasec, since we can't fix it up.
spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
return spec, err
return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
}
return nil, err
@@ -106,7 +106,7 @@ func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
// LoadSpecAndExtInfosFromReader reads from an ELF.
//
// ExtInfos may be nil if the ELF doesn't contain section metadta.
// ExtInfos may be nil if the ELF doesn't contain section metadata.
// Returns ErrNotFound if the ELF contains no BTF.
func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
file, err := internal.NewSafeELFFile(rd)
@@ -127,40 +127,40 @@ func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
return spec, extInfos, nil
}
// variableOffsets extracts all symbols offsets from an ELF and indexes them by
// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
// section and variable name.
//
// References to variables in BTF data sections carry unsigned 32-bit offsets.
// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
// beyond this range. Since these symbols cannot be described by BTF info,
// ignore them here.
func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
symbols, err := file.Symbols()
if err != nil {
return nil, fmt.Errorf("can't read symbols: %v", err)
}
variableOffsets := make(map[variable]uint32)
for _, symbol := range symbols {
if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
offsets := make(map[symbol]uint32)
for _, sym := range symbols {
if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
// Ignore things like SHN_ABS
continue
}
if symbol.Value > math.MaxUint32 {
if sym.Value > math.MaxUint32 {
// VarSecinfo offset is u32, cannot reference symbols in higher regions.
continue
}
if int(symbol.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
if int(sym.Section) >= len(file.Sections) {
return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
}
secName := file.Sections[symbol.Section].Name
variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
secName := file.Sections[sym.Section].Name
offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
}
return variableOffsets, nil
return offsets, nil
}
func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
@@ -190,7 +190,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("btf: %w", ErrNotFound)
}
vars, err := variableOffsets(file)
offsets, err := symbolOffsets(file)
if err != nil {
return nil, err
}
@@ -199,17 +199,17 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
return nil, fmt.Errorf("compressed BTF is not supported")
}
rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil, nil)
if err != nil {
return nil, err
}
err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
err = fixupDatasec(spec.types, sectionSizes, offsets)
if err != nil {
return nil, err
}
return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
return spec, nil
}
func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
@@ -220,12 +220,6 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
return nil, err
}
return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
}
func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
baseTypes types) (*Spec, error) {
types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
if err != nil {
return nil, err
@@ -234,7 +228,6 @@ func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrde
typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
return &Spec{
rawTypes: rawTypes,
namedTypes: typesByName,
typeIDs: typeIDs,
types: types,
@@ -272,20 +265,67 @@ func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essenti
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
spec, _, err := kernelSpec()
return spec, err
}
var kernelBTF struct {
sync.RWMutex
spec *Spec
// True if the spec was read from an ELF instead of raw BTF in /sys.
fallback bool
}
// FlushKernelSpec removes any cached kernel type information.
func FlushKernelSpec() {
kernelBTF.Lock()
defer kernelBTF.Unlock()
kernelBTF.spec, kernelBTF.fallback = nil, false
}
func kernelSpec() (*Spec, bool, error) {
kernelBTF.RLock()
spec, fallback := kernelBTF.spec, kernelBTF.fallback
kernelBTF.RUnlock()
if spec == nil {
kernelBTF.Lock()
defer kernelBTF.Unlock()
spec, fallback = kernelBTF.spec, kernelBTF.fallback
}
if spec != nil {
return spec.Copy(), fallback, nil
}
spec, fallback, err := loadKernelSpec()
if err != nil {
return nil, false, err
}
kernelBTF.spec, kernelBTF.fallback = spec, fallback
return spec.Copy(), fallback, nil
}
func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()
return loadRawSpec(fh, internal.NativeEndian, nil, nil)
spec, err := loadRawSpec(fh, internal.NativeEndian, nil, nil)
return spec, false, err
}
file, err := findVMLinux()
if err != nil {
return nil, err
return nil, false, err
}
defer file.Close()
return loadSpecFromELF(file)
spec, err := loadSpecFromELF(file)
return spec, true, err
}
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
@@ -388,55 +428,38 @@ func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([
return rawTypes, rawStrings, nil
}
type variable struct {
type symbol struct {
section string
name string
}
func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
for i, rawType := range rawTypes {
if rawType.Kind() != kindDatasec {
func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
for _, typ := range types {
ds, ok := typ.(*Datasec)
if !ok {
continue
}
name, err := rawStrings.Lookup(rawType.NameOff)
if err != nil {
return err
}
name := ds.Name
if name == ".kconfig" || name == ".ksyms" {
return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
}
if rawTypes[i].SizeType != 0 {
if ds.Size != 0 {
continue
}
size, ok := sectionSizes[name]
ds.Size, ok = sectionSizes[name]
if !ok {
return fmt.Errorf("data section %s: missing size", name)
}
rawTypes[i].SizeType = size
secinfos := rawType.data.([]btfVarSecinfo)
for j, secInfo := range secinfos {
id := int(secInfo.Type - 1)
if id >= len(rawTypes) {
return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
}
varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
if err != nil {
return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
}
offset, ok := variableOffsets[variable{name, varName}]
for i := range ds.Vars {
symName := ds.Vars[i].Type.TypeName()
ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
if !ok {
return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
}
secinfos[j].Offset = offset
}
}
@@ -447,15 +470,10 @@ func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[
func (s *Spec) Copy() *Spec {
types := copyTypes(s.types, nil)
typeIDOffset := TypeID(0)
if len(s.types) != 0 {
typeIDOffset = s.typeIDs[s.types[0]]
}
typeIDs, typesByName := indexTypes(types, typeIDOffset)
typeIDs, typesByName := indexTypes(types, s.firstTypeID())
// NB: Other parts of spec are not copied since they are immutable.
return &Spec{
s.rawTypes,
s.strings,
types,
typeIDs,
@@ -464,67 +482,6 @@ func (s *Spec) Copy() *Spec {
}
}
type marshalOpts struct {
ByteOrder binary.ByteOrder
StripFuncLinkage bool
}
func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
var (
buf bytes.Buffer
header = new(btfHeader)
headerLen = binary.Size(header)
stringsLen int
)
// Reserve space for the header. We have to write it last since
// we don't know the size of the type section yet.
_, _ = buf.Write(make([]byte, headerLen))
// Write type section, just after the header.
for _, raw := range s.rawTypes {
switch {
case opts.StripFuncLinkage && raw.Kind() == kindFunc:
raw.SetLinkage(StaticFunc)
}
if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
}
typeLen := uint32(buf.Len() - headerLen)
// Write string section after type section.
if s.strings != nil {
stringsLen = s.strings.Length()
buf.Grow(stringsLen)
if err := s.strings.Marshal(&buf); err != nil {
return nil, err
}
}
// Fill out the header, and write it out.
header = &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(headerLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringsLen),
}
raw := buf.Bytes()
err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
if err != nil {
return nil, fmt.Errorf("can't write header: %v", err)
}
return raw, nil
}
type sliceWriter []byte
func (sw sliceWriter) Write(p []byte) (int, error) {
@@ -540,7 +497,14 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
// Returns an error wrapping ErrNotFound if a Type with the given ID
// does not exist in the Spec.
func (s *Spec) TypeByID(id TypeID) (Type, error) {
return s.types.ByID(id)
firstID := s.firstTypeID()
lastID := firstID + TypeID(len(s.types))
if id < firstID || id >= lastID {
return nil, fmt.Errorf("expected type ID between %d and %d, got %d: %w", firstID, lastID, id, ErrNotFound)
}
return s.types[id-firstID], nil
}
// TypeID returns the ID for a given Type.
@@ -601,16 +565,15 @@ func (s *Spec) AnyTypeByName(name string) (Type, error) {
return types[0], nil
}
// TypeByName searches for a Type with a specific name. Since multiple
// Types with the same name can exist, the parameter typ is taken to
// narrow down the search in case of a clash.
// TypeByName searches for a Type with a specific name. Since multiple Types
// with the same name can exist, the parameter typ is taken to narrow down the
// search in case of a clash.
//
// typ must be a non-nil pointer to an implementation of a Type.
// On success, the address of the found Type will be copied to typ.
// typ must be a non-nil pointer to an implementation of a Type. On success, the
// address of the found Type will be copied to typ.
//
// Returns an error wrapping ErrNotFound if no matching
// Type exists in the Spec. If multiple candidates are found,
// an error is returned.
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
func (s *Spec) TypeByName(name string, typ interface{}) error {
typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
@@ -647,7 +610,7 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
}
if candidate != nil {
return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
}
candidate = typ
@@ -662,6 +625,14 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
return nil
}
// firstTypeID returns the first type ID or zero.
func (s *Spec) firstTypeID() TypeID {
if len(s.types) > 0 {
return s.typeIDs[s.types[0]]
}
return 0
}
// LoadSplitSpecFromReader loads split BTF from a reader.
//
// Types from base are used to resolve references in the split BTF.
@@ -694,128 +665,6 @@ func (iter *TypesIterator) Next() bool {
return true
}
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
}
// NewHandle loads BTF into the kernel.
//
// Returns ErrNotSupported if BTF is not supported.
func NewHandle(spec *Spec) (*Handle, error) {
if err := haveBTF(); err != nil {
return nil, err
}
if spec.byteOrder != nil && spec.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
btf, err := spec.marshal(marshalOpts{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
})
if err != nil {
return nil, fmt.Errorf("can't marshal BTF: %w", err)
}
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
}
fd, err := sys.BtfLoad(attr)
if err != nil {
logBuf := make([]byte, 64*1024)
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
// if there are other verification errors. ENOSPC is only returned when
// the BTF blob is correct, a log was requested, and the provided buffer
// is too small.
_, ve := sys.BtfLoad(attr)
return nil, internal.ErrorWithLog(err, logBuf, errors.Is(ve, unix.ENOSPC))
}
return &Handle{fd, attr.BtfSize}, nil
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size}, nil
}
// Spec parses the kernel BTF into Go types.
//
// base is used to decode split BTF and may be nil.
func (h *Handle) Spec(base *Spec) (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
var baseTypes types
var baseStrings *stringTable
if base != nil {
baseTypes = base.types
baseStrings = base.strings
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
const minHeaderLength = 24
@@ -838,19 +687,52 @@ func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
return buf.Bytes()
}
var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
// kernel that supports BPF_BTF_LOAD.
var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
var (
types struct {
Integer btfType
btfInt
}
strings = []byte{0}
)
types.Integer.SetKind(kindInt) // 0-length anonymous integer
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
})
// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
// used as a proxy for .bss, .data and .rodata map support, which generally
// come with a Var and Datasec. These were introduced in Linux 5.2.
var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
if err := haveBTF(); err != nil {
return err
}
var (
types struct {
Integer btfType
Var btfType
btfVar struct{ Linkage uint32 }
btfVariable
}
strings = []byte{0, 'a', 0}
)
// We use a BTF_KIND_VAR here, to make sure that
// the kernel understands BTF at least as well as we
// do. BTF_KIND_VAR was introduced ~5.1.
types.Integer.SetKind(kindPointer)
types.Var.NameOff = 1
types.Var.SetKind(kindVar)
@@ -863,8 +745,8 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
BtfSize: uint32(len(btf)),
})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
// Treat both EINVAL and EPERM as not supported: loading the program
// might still succeed without BTF.
// Treat both EINVAL and EPERM as not supported: creating the map may still
// succeed without Btf* attrs.
return internal.ErrNotSupported
}
if err != nil {
@@ -875,7 +757,10 @@ var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
return nil
})
var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
// is used as a proxy for ext_info (func_info) support, which depends on
// Func(Proto) by definition.
var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
if err := haveBTF(); err != nil {
return err
}
@@ -888,6 +773,41 @@ var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() err
strings = []byte{0, 'a', 0}
)
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto
types.Func.NameOff = 1
btf := marshalBTF(&types, strings, internal.NativeEndian)
fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
})
if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
return internal.ErrNotSupported
}
if err != nil {
return err
}
fd.Close()
return nil
})
var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
if err := haveProgBTF(); err != nil {
return err
}
var (
types struct {
FuncProto btfType
Func btfType
}
strings = []byte{0, 'a', 0}
)
types.FuncProto.SetKind(kindFuncProto)
types.Func.SetKind(kindFunc)
types.Func.SizeType = 1 // aka FuncProto

View File

@@ -36,6 +36,8 @@ const (
// Added 5.16
kindDeclTag // DeclTag
kindTypeTag // TypeTag
// Added 6.0
kindEnum64 // Enum64
)
// FuncLinkage describes BTF function linkage metadata.
@@ -66,6 +68,8 @@ const (
btfTypeKindFlagMask = 1
)
var btfTypeLen = binary.Size(btfType{})
// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
type btfType struct {
NameOff uint32
@@ -126,10 +130,43 @@ func (bt *btfType) SetVlen(vlen int) {
bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
}
func (bt *btfType) KindFlag() bool {
func (bt *btfType) kindFlagBool() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
func (bt *btfType) setKindFlagBool(set bool) {
var value uint32
if set {
value = 1
}
bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
}
// Bitfield returns true if the struct or union contain a bitfield.
func (bt *btfType) Bitfield() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetBitfield(isBitfield bool) {
bt.setKindFlagBool(isBitfield)
}
func (bt *btfType) FwdKind() FwdKind {
return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
}
func (bt *btfType) SetFwdKind(kind FwdKind) {
bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
}
func (bt *btfType) Signed() bool {
return bt.kindFlagBool()
}
func (bt *btfType) SetSigned(signed bool) {
bt.setKindFlagBool(signed)
}
func (bt *btfType) Linkage() FuncLinkage {
return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
@@ -143,6 +180,10 @@ func (bt *btfType) Type() TypeID {
return TypeID(bt.SizeType)
}
func (bt *btfType) SetType(id TypeID) {
bt.SizeType = uint32(id)
}
func (bt *btfType) Size() uint32 {
// TODO: Panic here if wrong kind?
return bt.SizeType
@@ -240,6 +281,12 @@ type btfEnum struct {
Val uint32
}
type btfEnum64 struct {
NameOff uint32
ValLo32 uint32
ValHi32 uint32
}
type btfParam struct {
NameOff uint32
Type TypeID
@@ -254,7 +301,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, err
// because of the interleaving between types and struct members it is difficult to
// precompute the numbers of raw types this will parse
// this "guess" is a good first estimation
sizeOfbtfType := uintptr(binary.Size(btfType{}))
sizeOfbtfType := uintptr(btfTypeLen)
tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
types := make([]rawType, 0, tyMaxCount)
@@ -294,6 +341,8 @@ func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, err
case kindDeclTag:
data = new(btfDeclTag)
case kindTypeTag:
case kindEnum64:
data = make([]btfEnum64, header.Vlen())
default:
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}

View File

@@ -65,11 +65,12 @@ func _() {
_ = x[kindFloat-16]
_ = x[kindDeclTag-17]
_ = x[kindTypeTag-18]
_ = x[kindEnum64-19]
}
const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTag"
const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114}
var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
func (i btfKind) String() string {
if i >= btfKind(len(_btfKind_index)-1) {

View File

@@ -251,7 +251,7 @@ func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Ty
for _, relo := range relos {
fixup, err := coreCalculateFixup(relo, target, targetID, bo)
if err != nil {
return nil, fmt.Errorf("target %s: %w", target, err)
return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
}
if fixup.poison || fixup.isNonExistant() {
score++
@@ -320,7 +320,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
switch relo.kind {
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
}
err := coreAreTypesCompatible(local, target)
@@ -328,7 +328,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
return zero, err
}
switch relo.kind {
@@ -358,7 +358,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
return poison()
}
if err != nil {
return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
return zero, err
}
switch relo.kind {
@@ -395,7 +395,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
return poison()
}
if err != nil {
return zero, fmt.Errorf("target %s: %w", target, err)
return zero, err
}
maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
@@ -451,7 +451,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
}
}
return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
return zero, ErrNotSupported
}
/* coreAccessor contains a path through a struct. It contains at least one index.
@@ -552,6 +552,10 @@ type coreField struct {
}
func (cf *coreField) adjustOffsetToNthElement(n int) error {
if n == 0 {
return nil
}
size, err := Sizeof(cf.Type)
if err != nil {
return err
@@ -608,6 +612,10 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
local := coreField{Type: localT}
target := coreField{Type: targetT}
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
// The first index is used to offset a pointer of the base type like
// when accessing an array.
if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
@@ -618,10 +626,6 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
return coreField{}, coreField{}, err
}
if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
}
var localMaybeFlex, targetMaybeFlex bool
for i, acc := range localAcc[1:] {
switch localType := local.Type.(type) {

View File

@@ -8,6 +8,7 @@ import (
"io"
"math"
"sort"
"sync"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
@@ -114,7 +115,7 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
iter := insns.Iterate()
for iter.Next() {
if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
*iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].fn)
funcInfos = funcInfos[1:]
}
@@ -130,19 +131,50 @@ func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
}
}
var nativeEncoderPool = sync.Pool{
New: func() any {
return newEncoder(kernelEncoderOptions, nil)
},
}
// MarshalExtInfos encodes function and line info embedded in insns into kernel
// wire format.
func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
//
// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
// Bail out early if the kernel doesn't support Func(Proto). If this is the
// case, func_info will also be unsupported.
if err := haveProgBTF(); err != nil {
return nil, nil, nil, err
}
iter := insns.Iterate()
var fiBuf, liBuf bytes.Buffer
for iter.Next() {
_, ok := iter.Ins.Source().(*Line)
fn := FuncMetadata(iter.Ins)
if ok || fn != nil {
goto marshal
}
}
// Avoid allocating encoder, etc. if there is no BTF at all.
return nil, nil, nil, nil
marshal:
enc := nativeEncoderPool.Get().(*encoder)
defer nativeEncoderPool.Put(enc)
enc.Reset()
var fiBuf, liBuf bytes.Buffer
for {
if fn := FuncMetadata(iter.Ins); fn != nil {
fi := &funcInfo{
fn: fn,
offset: iter.Offset,
}
if err := fi.marshal(&fiBuf, typeID); err != nil {
return nil, nil, fmt.Errorf("write func info: %w", err)
if err := fi.marshal(&fiBuf, enc); err != nil {
return nil, nil, nil, fmt.Errorf("write func info: %w", err)
}
}
@@ -151,12 +183,23 @@ func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error))
line: line,
offset: iter.Offset,
}
if err := li.marshal(&liBuf); err != nil {
return nil, nil, fmt.Errorf("write line info: %w", err)
if err := li.marshal(&liBuf, enc.strings); err != nil {
return nil, nil, nil, fmt.Errorf("write line info: %w", err)
}
}
if !iter.Next() {
break
}
}
return fiBuf.Bytes(), liBuf.Bytes(), nil
btf, err := enc.Encode()
if err != nil {
return nil, nil, nil, err
}
handle, err := newHandleFromRawBTF(btf)
return handle, fiBuf.Bytes(), liBuf.Bytes(), err
}
// btfExtHeader is found at the start of the .BTF.ext section.
@@ -349,8 +392,8 @@ func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
}
// marshal into the BTF wire format.
func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
id, err := typeID(fi.fn)
func (fi *funcInfo) marshal(w *bytes.Buffer, enc *encoder) error {
id, err := enc.Add(fi.fn)
if err != nil {
return err
}
@@ -358,7 +401,11 @@ func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) erro
InsnOff: uint32(fi.offset),
TypeID: id,
}
return binary.Write(w, internal.NativeEndian, &bfi)
buf := make([]byte, FuncInfoSize)
internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
_, err = w.Write(buf)
return err
}
// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
@@ -428,12 +475,6 @@ type Line struct {
line string
lineNumber uint32
lineColumn uint32
// TODO: We should get rid of the fields below, but for that we need to be
// able to write BTF.
fileNameOff uint32
lineOff uint32
}
func (li *Line) FileName() string {
@@ -496,8 +537,6 @@ func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
line,
lineNumber,
lineColumn,
li.FileNameOff,
li.LineOff,
},
asm.RawInstructionOffset(li.InsnOff),
}, nil
@@ -519,7 +558,7 @@ func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error)
}
// marshal writes the binary representation of the LineInfo to w.
func (li *lineInfo) marshal(w io.Writer) error {
func (li *lineInfo) marshal(w *bytes.Buffer, stb *stringTableBuilder) error {
line := li.line
if line.lineNumber > bpfLineMax {
return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
@@ -529,13 +568,30 @@ func (li *lineInfo) marshal(w io.Writer) error {
return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
}
fileNameOff, err := stb.Add(line.fileName)
if err != nil {
return fmt.Errorf("file name %q: %w", line.fileName, err)
}
lineOff, err := stb.Add(line.line)
if err != nil {
return fmt.Errorf("line %q: %w", line.line, err)
}
bli := bpfLineInfo{
uint32(li.offset),
line.fileNameOff,
line.lineOff,
fileNameOff,
lineOff,
(line.lineNumber << bpfLineShift) | line.lineColumn,
}
return binary.Write(w, internal.NativeEndian, &bli)
buf := make([]byte, LineInfoSize)
internal.NativeEndian.PutUint32(buf, bli.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
_, err = w.Write(buf)
return err
}
// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of

View File

@@ -293,7 +293,11 @@ func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
prevOffset := uint32(0)
for i, vsi := range ds.Vars {
v := vsi.Type.(*Var)
v, ok := vsi.Type.(*Var)
if !ok {
return fmt.Errorf("can't format %s as part of data section", vsi.Type)
}
if v.Linkage != GlobalVar {
// Ignore static, extern, etc. for now.
continue

View File

@@ -1,14 +1,155 @@
package btf
import (
"bytes"
"errors"
"fmt"
"math"
"os"
"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// Handle is a reference to BTF loaded into the kernel.
type Handle struct {
fd *sys.FD
// Size of the raw BTF in bytes.
size uint32
needsKernelBase bool
}
// NewHandle loads BTF into the kernel.
//
// Returns ErrNotSupported if BTF is not supported.
func NewHandle(spec *Spec) (*Handle, error) {
if spec.byteOrder != nil && spec.byteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
}
enc := newEncoder(kernelEncoderOptions, newStringTableBuilderFromTable(spec.strings))
for _, typ := range spec.types {
_, err := enc.Add(typ)
if err != nil {
return nil, fmt.Errorf("add %s: %w", typ, err)
}
}
btf, err := enc.Encode()
if err != nil {
return nil, fmt.Errorf("marshal BTF: %w", err)
}
return newHandleFromRawBTF(btf)
}
func newHandleFromRawBTF(btf []byte) (*Handle, error) {
if uint64(len(btf)) > math.MaxUint32 {
return nil, errors.New("BTF exceeds the maximum size")
}
attr := &sys.BtfLoadAttr{
Btf: sys.NewSlicePointer(btf),
BtfSize: uint32(len(btf)),
}
fd, err := sys.BtfLoad(attr)
if err == nil {
return &Handle{fd, attr.BtfSize, false}, nil
}
if err := haveBTF(); err != nil {
return nil, err
}
logBuf := make([]byte, 64*1024)
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
attr.BtfLogSize = uint32(len(logBuf))
attr.BtfLogLevel = 1
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
// if there are other verification errors. ENOSPC is only returned when
// the BTF blob is correct, a log was requested, and the provided buffer
// is too small.
_, ve := sys.BtfLoad(attr)
return nil, internal.ErrorWithLog("load btf", err, logBuf, errors.Is(ve, unix.ENOSPC))
}
// NewHandleFromID returns the BTF handle for a given id.
//
// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
//
// Returns ErrNotExist, if there is no BTF with the given id.
//
// Requires CAP_SYS_ADMIN.
func NewHandleFromID(id ID) (*Handle, error) {
fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
Id: uint32(id),
})
if err != nil {
return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
}
info, err := newHandleInfoFromFD(fd)
if err != nil {
_ = fd.Close()
return nil, err
}
return &Handle{fd, info.size, info.IsModule()}, nil
}
// Spec parses the kernel BTF into Go types.
func (h *Handle) Spec() (*Spec, error) {
var btfInfo sys.BtfInfo
btfBuffer := make([]byte, h.size)
btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
return nil, err
}
if !h.needsKernelBase {
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil)
}
base, fallback, err := kernelSpec()
if err != nil {
return nil, fmt.Errorf("load BTF base: %w", err)
}
if fallback {
return nil, fmt.Errorf("can't load split BTF without access to /sys")
}
return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base.types, base.strings)
}
// Close destroys the handle.
//
// Subsequent calls to FD will return an invalid value.
func (h *Handle) Close() error {
if h == nil {
return nil
}
return h.fd.Close()
}
// FD returns the file descriptor for the handle.
func (h *Handle) FD() int {
return h.fd.Int()
}
// Info returns metadata about the handle.
func (h *Handle) Info() (*HandleInfo, error) {
return newHandleInfoFromFD(h.fd)
}
// HandleInfo describes a Handle.
type HandleInfo struct {
// ID of this handle in the kernel. The ID is only valid as long as the

422
vendor/github.com/cilium/ebpf/btf/marshal.go generated vendored Normal file
View File

@@ -0,0 +1,422 @@
package btf
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/cilium/ebpf/internal"
)
type encoderOptions struct {
ByteOrder binary.ByteOrder
// Remove function linkage information for compatibility with <5.6 kernels.
StripFuncLinkage bool
}
// kernelEncoderOptions will generate BTF suitable for the current kernel.
var kernelEncoderOptions encoderOptions
func init() {
kernelEncoderOptions = encoderOptions{
ByteOrder: internal.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
}
}
// encoder turns Types into raw BTF.
type encoder struct {
opts encoderOptions
buf *bytes.Buffer
strings *stringTableBuilder
allocatedIDs map[Type]TypeID
nextID TypeID
// Temporary storage for Add.
pending internal.Deque[Type]
// Temporary storage for deflateType.
raw rawType
}
// newEncoder returns a new builder for the given byte order.
//
// See [KernelEncoderOptions] to build BTF for the current system.
func newEncoder(opts encoderOptions, strings *stringTableBuilder) *encoder {
enc := &encoder{
opts: opts,
buf: bytes.NewBuffer(make([]byte, btfHeaderLen)),
}
enc.reset(strings)
return enc
}
// Reset internal state to be able to reuse the Encoder.
func (e *encoder) Reset() {
e.reset(nil)
}
func (e *encoder) reset(strings *stringTableBuilder) {
if strings == nil {
strings = newStringTableBuilder()
}
e.buf.Truncate(btfHeaderLen)
e.strings = strings
e.allocatedIDs = make(map[Type]TypeID)
e.nextID = 1
}
// Add a Type.
//
// Adding the same Type multiple times is valid and will return a stable ID.
//
// Calling the method has undefined behaviour if it previously returned an error.
func (e *encoder) Add(typ Type) (TypeID, error) {
if typ == nil {
return 0, errors.New("cannot Add a nil Type")
}
hasID := func(t Type) (skip bool) {
_, isVoid := t.(*Void)
_, alreadyEncoded := e.allocatedIDs[t]
return isVoid || alreadyEncoded
}
e.pending.Reset()
allocateID := func(typ Type) {
e.pending.Push(typ)
e.allocatedIDs[typ] = e.nextID
e.nextID++
}
iter := postorderTraversal(typ, hasID)
for iter.Next() {
if hasID(iter.Type) {
// This type is part of a cycle and we've already deflated it.
continue
}
// Allocate an ID for the next type.
allocateID(iter.Type)
for !e.pending.Empty() {
t := e.pending.Shift()
// Ensure that all direct descendants have been allocated an ID
// before calling deflateType.
walkType(t, func(child *Type) {
if !hasID(*child) {
// t refers to a type which hasn't been allocated an ID
// yet, which only happens for circular types.
allocateID(*child)
}
})
if err := e.deflateType(t); err != nil {
return 0, fmt.Errorf("deflate %s: %w", t, err)
}
}
}
return e.allocatedIDs[typ], nil
}
// Encode the raw BTF blob.
//
// The returned slice is valid until the next call to Add.
func (e *encoder) Encode() ([]byte, error) {
length := e.buf.Len()
// Truncate the string table on return to allow adding more types.
defer e.buf.Truncate(length)
typeLen := uint32(length - btfHeaderLen)
// Reserve space for the string table.
stringLen := e.strings.Length()
e.buf.Grow(stringLen)
buf := e.buf.Bytes()[:length+stringLen]
e.strings.MarshalBuffer(buf[length:])
// Fill out the header, and write it out.
header := &btfHeader{
Magic: btfMagic,
Version: 1,
Flags: 0,
HdrLen: uint32(btfHeaderLen),
TypeOff: 0,
TypeLen: typeLen,
StringOff: typeLen,
StringLen: uint32(stringLen),
}
err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.opts.ByteOrder, header)
if err != nil {
return nil, fmt.Errorf("can't write header: %v", err)
}
return buf, nil
}
func (e *encoder) deflateType(typ Type) (err error) {
raw := &e.raw
*raw = rawType{}
raw.NameOff, err = e.strings.Add(typ.TypeName())
if err != nil {
return err
}
switch v := typ.(type) {
case *Int:
raw.SetKind(kindInt)
raw.SetSize(v.Size)
var bi btfInt
bi.SetEncoding(v.Encoding)
// We need to set bits in addition to size, since btf_type_int_is_regular
// otherwise flags this as a bitfield.
bi.SetBits(byte(v.Size) * 8)
raw.data = bi
case *Pointer:
raw.SetKind(kindPointer)
raw.SetType(e.allocatedIDs[v.Target])
case *Array:
raw.SetKind(kindArray)
raw.data = &btfArray{
e.allocatedIDs[v.Type],
e.allocatedIDs[v.Index],
v.Nelems,
}
case *Struct:
raw.SetKind(kindStruct)
raw.SetSize(v.Size)
raw.data, err = e.convertMembers(&raw.btfType, v.Members)
case *Union:
raw.SetKind(kindUnion)
raw.SetSize(v.Size)
raw.data, err = e.convertMembers(&raw.btfType, v.Members)
case *Enum:
raw.SetSize(v.size())
raw.SetVlen(len(v.Values))
raw.SetSigned(v.Signed)
if v.has64BitValues() {
raw.SetKind(kindEnum64)
raw.data, err = e.deflateEnum64Values(v.Values)
} else {
raw.SetKind(kindEnum)
raw.data, err = e.deflateEnumValues(v.Values)
}
case *Fwd:
raw.SetKind(kindForward)
raw.SetFwdKind(v.Kind)
case *Typedef:
raw.SetKind(kindTypedef)
raw.SetType(e.allocatedIDs[v.Type])
case *Volatile:
raw.SetKind(kindVolatile)
raw.SetType(e.allocatedIDs[v.Type])
case *Const:
raw.SetKind(kindConst)
raw.SetType(e.allocatedIDs[v.Type])
case *Restrict:
raw.SetKind(kindRestrict)
raw.SetType(e.allocatedIDs[v.Type])
case *Func:
raw.SetKind(kindFunc)
raw.SetType(e.allocatedIDs[v.Type])
if !e.opts.StripFuncLinkage {
raw.SetLinkage(v.Linkage)
}
case *FuncProto:
raw.SetKind(kindFuncProto)
raw.SetType(e.allocatedIDs[v.Return])
raw.SetVlen(len(v.Params))
raw.data, err = e.deflateFuncParams(v.Params)
case *Var:
raw.SetKind(kindVar)
raw.SetType(e.allocatedIDs[v.Type])
raw.data = btfVariable{uint32(v.Linkage)}
case *Datasec:
raw.SetKind(kindDatasec)
raw.SetSize(v.Size)
raw.SetVlen(len(v.Vars))
raw.data = e.deflateVarSecinfos(v.Vars)
case *Float:
raw.SetKind(kindFloat)
raw.SetSize(v.Size)
case *declTag:
raw.SetKind(kindDeclTag)
raw.data = &btfDeclTag{uint32(v.Index)}
case *typeTag:
raw.SetKind(kindTypeTag)
raw.NameOff, err = e.strings.Add(v.Value)
default:
return fmt.Errorf("don't know how to deflate %T", v)
}
if err != nil {
return err
}
return raw.Marshal(e.buf, e.opts.ByteOrder)
}
func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
bms := make([]btfMember, 0, len(members))
isBitfield := false
for _, member := range members {
isBitfield = isBitfield || member.BitfieldSize > 0
offset := member.Offset
if isBitfield {
offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
}
nameOff, err := e.strings.Add(member.Name)
if err != nil {
return nil, err
}
bms = append(bms, btfMember{
nameOff,
e.allocatedIDs[member.Type],
uint32(offset),
})
}
header.SetVlen(len(members))
header.SetBitfield(isBitfield)
return bms, nil
}
func (e *encoder) deflateEnumValues(values []EnumValue) ([]btfEnum, error) {
bes := make([]btfEnum, 0, len(values))
for _, value := range values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
if value.Value > math.MaxUint32 {
return nil, fmt.Errorf("value of enum %q exceeds 32 bits", value.Name)
}
bes = append(bes, btfEnum{
nameOff,
uint32(value.Value),
})
}
return bes, nil
}
func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) {
bes := make([]btfEnum64, 0, len(values))
for _, value := range values {
nameOff, err := e.strings.Add(value.Name)
if err != nil {
return nil, err
}
bes = append(bes, btfEnum64{
nameOff,
uint32(value.Value),
uint32(value.Value >> 32),
})
}
return bes, nil
}
func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
bps := make([]btfParam, 0, len(params))
for _, param := range params {
nameOff, err := e.strings.Add(param.Name)
if err != nil {
return nil, err
}
bps = append(bps, btfParam{
nameOff,
e.allocatedIDs[param.Type],
})
}
return bps, nil
}
func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
vsis := make([]btfVarSecinfo, 0, len(vars))
for _, v := range vars {
vsis = append(vsis, btfVarSecinfo{
e.allocatedIDs[v.Type],
v.Offset,
v.Size,
})
}
return vsis
}
// MarshalMapKV creates a BTF object containing a map key and value.
//
// The function is intended for the use of the ebpf package and may be removed
// at any point in time.
func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, _ error) {
enc := nativeEncoderPool.Get().(*encoder)
defer nativeEncoderPool.Put(enc)
enc.Reset()
var err error
if key != nil {
keyID, err = enc.Add(key)
if err != nil {
return nil, 0, 0, fmt.Errorf("adding map key to BTF encoder: %w", err)
}
}
if value != nil {
valueID, err = enc.Add(value)
if err != nil {
return nil, 0, 0, fmt.Errorf("adding map value to BTF encoder: %w", err)
}
}
btf, err := enc.Encode()
if err != nil {
return nil, 0, 0, fmt.Errorf("marshal BTF: %w", err)
}
handle, err := newHandleFromRawBTF(btf)
if err != nil {
// Check for 'full' map BTF support, since kernels between 4.18 and 5.2
// already support BTF blobs for maps without Var or Datasec just fine.
if err := haveMapBTF(); err != nil {
return nil, 0, 0, err
}
}
return handle, keyID, valueID, err
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"strings"
)
type stringTable struct {
@@ -130,3 +131,91 @@ func search(ints []uint32, needle uint32) int {
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}
// stringTableBuilder builds BTF string tables.
type stringTableBuilder struct {
length uint32
strings map[string]uint32
}
// newStringTableBuilder creates a builder with the given capacity.
//
// capacity may be zero.
func newStringTableBuilder() *stringTableBuilder {
stb := &stringTableBuilder{0, make(map[string]uint32)}
// Ensure that the empty string is at index 0.
stb.append("")
return stb
}
// newStringTableBuilderFromTable creates a new builder from an existing string table.
func newStringTableBuilderFromTable(contents *stringTable) *stringTableBuilder {
stb := &stringTableBuilder{0, make(map[string]uint32, len(contents.strings)+1)}
stb.append("")
for _, str := range contents.strings {
if str != "" {
stb.append(str)
}
}
return stb
}
// Add a string to the table.
//
// Adding the same string multiple times will only store it once.
func (stb *stringTableBuilder) Add(str string) (uint32, error) {
if strings.IndexByte(str, 0) != -1 {
return 0, fmt.Errorf("string contains null: %q", str)
}
offset, ok := stb.strings[str]
if ok {
return offset, nil
}
return stb.append(str), nil
}
func (stb *stringTableBuilder) append(str string) uint32 {
offset := stb.length
stb.length += uint32(len(str)) + 1
stb.strings[str] = offset
return offset
}
// Lookup finds the offset of a string in the table.
//
// Returns an error if str hasn't been added yet.
func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
offset, ok := stb.strings[str]
if !ok {
return 0, fmt.Errorf("string %q is not in table", str)
}
return offset, nil
}
// Length returns the length in bytes.
func (stb *stringTableBuilder) Length() int {
return int(stb.length)
}
// Marshal a string table into its binary representation.
func (stb *stringTableBuilder) Marshal() []byte {
buf := make([]byte, stb.Length())
stb.MarshalBuffer(buf)
return buf
}
// Marshal a string table into a pre-allocated buffer.
//
// The buffer must be at least of size Length().
func (stb *stringTableBuilder) MarshalBuffer(buf []byte) {
for str, offset := range stb.strings {
n := copy(buf[offset:], str)
buf[offset+uint32(n)] = 0
}
}

View File

@@ -1,6 +1,92 @@
package btf
import "fmt"
import (
"fmt"
"github.com/cilium/ebpf/internal"
)
// Functions to traverse a cyclic graph of types. The below was very useful:
// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
type postorderIterator struct {
// Iteration skips types for which this function returns true.
skip func(Type) bool
// The root type. May be nil if skip(root) is true.
root Type
// Contains types which need to be either walked or passed to the callback.
types typeDeque
// Contains a boolean whether the type has been walked or not.
walked internal.Deque[bool]
// The set of types which has been pushed onto types.
pushed map[Type]struct{}
// The current type. Only valid after a call to Next().
Type Type
}
// postorderTraversal calls fn for all types reachable from root.
//
// fn is invoked on children of root before root itself.
//
// Types for which skip returns true are ignored. skip may be nil.
func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
// Avoid allocations for the common case of a skipped root.
if skip != nil && skip(root) {
return postorderIterator{}
}
po := postorderIterator{root: root, skip: skip}
walkType(root, po.push)
return po
}
func (po *postorderIterator) push(t *Type) {
if _, ok := po.pushed[*t]; ok || *t == po.root {
return
}
if po.skip != nil && po.skip(*t) {
return
}
if po.pushed == nil {
// Lazily allocate pushed to avoid an allocation for Types without children.
po.pushed = make(map[Type]struct{})
}
po.pushed[*t] = struct{}{}
po.types.Push(t)
po.walked.Push(false)
}
// Next returns true if there is another Type to traverse.
func (po *postorderIterator) Next() bool {
for !po.types.Empty() {
t := po.types.Pop()
if !po.walked.Pop() {
// Push the type again, so that we re-evaluate it in done state
// after all children have been handled.
po.types.Push(t)
po.walked.Push(true)
// Add all direct children to todo.
walkType(*t, po.push)
} else {
// We've walked this type previously, so we now know that all
// children have been handled.
po.Type = *t
return true
}
}
// Only return root once.
po.Type, po.root = po.root, nil
return po.Type != nil
}
// walkType calls fn on each child of typ.
func walkType(typ Type, fn func(*Type)) {

View File

@@ -276,6 +276,21 @@ func (e *Enum) copy() Type {
return &cpy
}
// has64BitValues returns true if the Enum contains a value larger than 32 bits.
// Kernels before 6.0 have enum values that overrun u32 replaced with zeroes.
//
// 64-bit enums have their Enum.Size attributes correctly set to 8, but if we
// use the size attribute as a heuristic during BTF marshaling, we'll emit
// ENUM64s to kernels that don't support them.
func (e *Enum) has64BitValues() bool {
for _, v := range e.Values {
if v.Value > math.MaxUint32 {
return true
}
}
return false
}
// FwdKind is the type of forward declaration.
type FwdKind int
@@ -393,6 +408,12 @@ func FuncMetadata(ins *asm.Instruction) *Func {
return fn
}
// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
ins.Metadata.Set(funcInfoMeta{}, fn)
return ins
}
func (f *Func) Format(fs fmt.State, verb rune) {
formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
}
@@ -472,6 +493,7 @@ func (ds *Datasec) copy() Type {
//
// It is not a valid Type.
type VarSecinfo struct {
// Var or Func.
Type Type
Offset uint32
Size uint32
@@ -723,10 +745,10 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
var fixups []fixupDef
fixup := func(id TypeID, typ *Type) {
fixup := func(id TypeID, typ *Type) bool {
if id < TypeID(len(baseTypes)) {
*typ = baseTypes[id]
return
return true
}
idx := id
@@ -736,26 +758,29 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
if idx < TypeID(len(types)) {
// We've already inflated this type, fix it up immediately.
*typ = types[idx]
return
return true
}
fixups = append(fixups, fixupDef{id, typ})
return false
}
type assertion struct {
id TypeID
typ *Type
want reflect.Type
}
var assertions []assertion
assert := func(typ *Type, want reflect.Type) error {
if *typ != nil {
// The type has already been fixed up, check the type immediately.
if reflect.TypeOf(*typ) != want {
return fmt.Errorf("expected %s, got %T", want, *typ)
}
fixupAndAssert := func(id TypeID, typ *Type, want reflect.Type) error {
if !fixup(id, typ) {
assertions = append(assertions, assertion{id, typ, want})
return nil
}
assertions = append(assertions, assertion{typ, want})
// The type has already been fixed up, check the type immediately.
if reflect.TypeOf(*typ) != want {
return fmt.Errorf("type ID %d: expected %s, got %T", id, want, *typ)
}
return nil
}
@@ -857,14 +882,14 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
typ = arr
case kindStruct:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
}
typ = &Struct{name, raw.Size(), members}
case kindUnion:
members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
if err != nil {
return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
}
@@ -873,7 +898,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindEnum:
rawvals := raw.data.([]btfEnum)
vals := make([]EnumValue, 0, len(rawvals))
signed := raw.KindFlag()
signed := raw.Signed()
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
@@ -889,11 +914,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
typ = &Enum{name, raw.Size(), signed, vals}
case kindForward:
if raw.KindFlag() {
typ = &Fwd{name, FwdUnion}
} else {
typ = &Fwd{name, FwdStruct}
}
typ = &Fwd{name, raw.FwdKind()}
case kindTypedef:
typedef := &Typedef{name, nil}
@@ -917,8 +938,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
case kindFunc:
fn := &Func{name, nil, raw.Linkage()}
fixup(raw.Type(), &fn.Type)
if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
if err := fixupAndAssert(raw.Type(), &fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
return nil, err
}
typ = fn
@@ -960,11 +980,8 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
}
for i := range vars {
fixup(btfVars[i].Type, &vars[i].Type)
if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
return nil, err
}
}
typ = &Datasec{name, raw.SizeType, vars}
typ = &Datasec{name, raw.Size(), vars}
case kindFloat:
typ = &Float{name, raw.Size()}
@@ -975,12 +992,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
return nil, fmt.Errorf("type id %d: index exceeds int", id)
}
index := int(btfIndex)
if btfIndex == math.MaxUint32 {
index = -1
}
dt := &declTag{nil, name, index}
dt := &declTag{nil, name, int(int32(btfIndex))}
fixup(raw.Type(), &dt.Type)
typ = dt
@@ -991,6 +1003,19 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
fixup(raw.Type(), &tt.Type)
typ = tt
case kindEnum64:
rawvals := raw.data.([]btfEnum64)
vals := make([]EnumValue, 0, len(rawvals))
for i, btfVal := range rawvals {
name, err := rawStrings.Lookup(btfVal.NameOff)
if err != nil {
return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
}
value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32)
vals = append(vals, EnumValue{name, value})
}
typ = &Enum{name, raw.Size(), raw.Signed(), vals}
default:
return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
}
@@ -1025,7 +1050,7 @@ func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTabl
for _, assertion := range assertions {
if reflect.TypeOf(*assertion.typ) != assertion.want {
return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
return nil, fmt.Errorf("type ID %d: expected %s, got %T", assertion.id, assertion.want, *assertion.typ)
}
}

View File

@@ -151,6 +151,10 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
continue
}
if _, ok := v.Type.(*btf.Var); !ok {
return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname)
}
if replaced[vname] {
return fmt.Errorf("section %s: duplicate variable %s", name, vname)
}
@@ -386,42 +390,11 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co
}, nil
}
type handleCache struct {
btfHandles map[*btf.Spec]*btf.Handle
}
func newHandleCache() *handleCache {
return &handleCache{
btfHandles: make(map[*btf.Spec]*btf.Handle),
}
}
func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
if hc.btfHandles[spec] != nil {
return hc.btfHandles[spec], nil
}
handle, err := btf.NewHandle(spec)
if err != nil {
return nil, err
}
hc.btfHandles[spec] = handle
return handle, nil
}
func (hc handleCache) close() {
for _, handle := range hc.btfHandles {
handle.Close()
}
}
type collectionLoader struct {
coll *CollectionSpec
opts *CollectionOptions
maps map[string]*Map
programs map[string]*Program
handles *handleCache
}
func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
@@ -436,7 +409,7 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec
return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
}
if err := spec.checkCompatibility(m); err != nil {
if err := spec.Compatible(m); err != nil {
return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
}
}
@@ -446,13 +419,11 @@ func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collec
opts,
make(map[string]*Map),
make(map[string]*Program),
newHandleCache(),
}, nil
}
// close all resources left over in the collectionLoader.
func (cl *collectionLoader) close() {
cl.handles.close()
for _, m := range cl.maps {
m.Close()
}
@@ -471,10 +442,6 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
return nil, fmt.Errorf("missing map %s", mapName)
}
if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF {
return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName)
}
if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
// Clone the map to avoid closing user's map later on.
m, err := replaceMap.Clone()
@@ -486,7 +453,7 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
return m, nil
}
m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles)
m, err := newMapWithOptions(mapSpec, cl.opts.Maps)
if err != nil {
return nil, fmt.Errorf("map %s: %w", mapName, err)
}
@@ -511,10 +478,6 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
}
if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF {
return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName)
}
progSpec = progSpec.Copy()
// Rewrite any reference to a valid map in the program's instructions,
@@ -543,7 +506,7 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
}
}
prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles)
prog, err := newProgramWithOptions(progSpec, cl.opts.Programs)
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
@@ -559,17 +522,22 @@ func (cl *collectionLoader) populateMaps() error {
return fmt.Errorf("missing map spec %s", mapName)
}
mapSpec = mapSpec.Copy()
// MapSpecs that refer to inner maps or programs within the same
// CollectionSpec do so using strings. These strings are used as the key
// to look up the respective object in the Maps or Programs fields.
// Resolve those references to actual Map or Program resources that
// have been loaded into the kernel.
for i, kv := range mapSpec.Contents {
if objName, ok := kv.Value.(string); ok {
switch mapSpec.Type {
case ProgramArray:
if mapSpec.Type.canStoreMap() || mapSpec.Type.canStoreProgram() {
mapSpec = mapSpec.Copy()
for i, kv := range mapSpec.Contents {
objName, ok := kv.Value.(string)
if !ok {
continue
}
switch t := mapSpec.Type; {
case t.canStoreProgram():
// loadProgram is idempotent and could return an existing Program.
prog, err := cl.loadProgram(objName)
if err != nil {
@@ -577,7 +545,7 @@ func (cl *collectionLoader) populateMaps() error {
}
mapSpec.Contents[i] = MapKV{kv.Key, prog}
case ArrayOfMaps, HashOfMaps:
case t.canStoreMap():
// loadMap is idempotent and could return an existing Map.
innerMap, err := cl.loadMap(objName)
if err != nil {

View File

@@ -51,6 +51,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, err
}
// Checks if the ELF file is for BPF data.
// Old LLVM versions set e_machine to EM_NONE.
if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF {
return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine)
}
var (
licenseSection *elf.Section
versionSection *elf.Section
@@ -308,7 +314,6 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
KernelVersion: ec.version,
Instructions: insns,
ByteOrder: ec.ByteOrder,
BTF: ec.btf,
}
// Function names must be unique within a single ELF blob.
@@ -897,13 +902,6 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
}
}
if key == nil {
key = &btf.Void{}
}
if value == nil {
value = &btf.Void{}
}
return &MapSpec{
Name: SanitizeName(name, -1),
Type: MapType(mapType),
@@ -913,7 +911,6 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
Flags: flags,
Key: key,
Value: value,
BTF: spec,
Pinning: pinType,
InnerMap: innerMapSpec,
Contents: contents,
@@ -1058,7 +1055,6 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
var ds *btf.Datasec
if ec.btf.TypeByName(sec.Name, &ds) == nil {
// Assign the spec's key and BTF only if the Datasec lookup was successful.
mapSpec.BTF = ec.btf
mapSpec.Key = &btf.Void{}
mapSpec.Value = ds
}

View File

@@ -9,6 +9,17 @@ type Deque[T any] struct {
mask uint64
}
// Reset clears the contents of the deque while retaining the backing buffer.
func (dq *Deque[T]) Reset() {
var zero T
for i := dq.read; i < dq.write; i++ {
dq.elems[i&dq.mask] = zero
}
dq.read, dq.write = 0, 0
}
func (dq *Deque[T]) Empty() bool {
return dq.read == dq.write
}

View File

@@ -12,7 +12,7 @@ import (
//
// The default error output is a summary of the full log. The latter can be
// accessed via VerifierError.Log or by formatting the error, see Format.
func ErrorWithLog(err error, log []byte, truncated bool) *VerifierError {
func ErrorWithLog(source string, err error, log []byte, truncated bool) *VerifierError {
const whitespace = "\t\r\v\n "
// Convert verifier log C string by truncating it on the first 0 byte
@@ -23,7 +23,7 @@ func ErrorWithLog(err error, log []byte, truncated bool) *VerifierError {
log = bytes.Trim(log, whitespace)
if len(log) == 0 {
return &VerifierError{err, nil, truncated}
return &VerifierError{source, err, nil, truncated}
}
logLines := bytes.Split(log, []byte{'\n'})
@@ -34,13 +34,14 @@ func ErrorWithLog(err error, log []byte, truncated bool) *VerifierError {
lines = append(lines, string(bytes.TrimRight(line, whitespace)))
}
return &VerifierError{err, lines, truncated}
return &VerifierError{source, err, lines, truncated}
}
// VerifierError includes information from the eBPF verifier.
//
// It summarises the log output, see Format if you want to output the full contents.
type VerifierError struct {
source string
// The error which caused this error.
Cause error
// The verifier output split into lines.
@@ -60,9 +61,12 @@ func (le *VerifierError) Error() string {
log = log[:n-1]
}
var b strings.Builder
fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error())
n := len(log)
if n == 0 {
return le.Cause.Error()
return b.String()
}
lines := log[n-1:]
@@ -71,14 +75,9 @@ func (le *VerifierError) Error() string {
lines = log[n-2:]
}
var b strings.Builder
fmt.Fprintf(&b, "%s: ", le.Cause.Error())
for i, line := range lines {
for _, line := range lines {
b.WriteString(": ")
b.WriteString(strings.TrimSpace(line))
if i != len(lines)-1 {
b.WriteString(": ")
}
}
omitted := len(le.Log) - len(lines)
@@ -167,7 +166,7 @@ func (le *VerifierError) Format(f fmt.State, verb rune) {
return
}
fmt.Fprintf(f, "%s:", le.Cause.Error())
fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error())
omitted := len(le.Log) - n
lines := le.Log[:n]

View File

@@ -31,10 +31,20 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool {
return target == ErrNotSupported
}
type featureTest struct {
sync.RWMutex
successful bool
result error
// FeatureTest caches the result of a [FeatureTestFn].
//
// Fields should not be modified after creation.
type FeatureTest struct {
// The name of the feature being detected.
Name string
// Version in in the form Major.Minor[.Patch].
Version string
// The feature test itself.
Fn FeatureTestFn
mu sync.RWMutex
done bool
result error
}
// FeatureTestFn is used to determine whether the kernel supports
@@ -47,54 +57,128 @@ type featureTest struct {
// err != nil: the test couldn't be executed
type FeatureTestFn func() error
// FeatureTest wraps a function so that it is run at most once.
// NewFeatureTest is a convenient way to create a single [FeatureTest].
func NewFeatureTest(name, version string, fn FeatureTestFn) func() error {
ft := &FeatureTest{
Name: name,
Version: version,
Fn: fn,
}
return ft.execute
}
// execute the feature test.
//
// name should identify the tested feature, while version must be in the
// form Major.Minor[.Patch].
// The result is cached if the test is conclusive.
//
// Returns an error wrapping ErrNotSupported if the feature is not supported.
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
ft := new(featureTest)
return func() error {
ft.RLock()
if ft.successful {
defer ft.RUnlock()
return ft.result
}
ft.RUnlock()
ft.Lock()
defer ft.Unlock()
// check one more time on the off
// chance that two go routines
// were able to call into the write
// lock
if ft.successful {
return ft.result
}
err := fn()
switch {
case errors.Is(err, ErrNotSupported):
v, err := NewVersion(version)
// See [FeatureTestFn] for the meaning of the returned error.
func (ft *FeatureTest) execute() error {
ft.mu.RLock()
result, done := ft.result, ft.done
ft.mu.RUnlock()
if done {
return result
}
ft.mu.Lock()
defer ft.mu.Unlock()
// The test may have been executed by another caller while we were
// waiting to acquire ft.mu.
if ft.done {
return ft.result
}
err := ft.Fn()
if err == nil {
ft.done = true
return nil
}
if errors.Is(err, ErrNotSupported) {
var v Version
if ft.Version != "" {
v, err = NewVersion(ft.Version)
if err != nil {
return err
return fmt.Errorf("feature %s: %w", ft.Name, err)
}
}
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: name,
}
fallthrough
case err == nil:
ft.successful = true
default:
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("detect support for %s: %w", name, err)
ft.done = true
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: ft.Name,
}
return ft.result
}
// We couldn't execute the feature test to a point
// where it could make a determination.
// Don't cache the result, just return it.
return fmt.Errorf("detect support for %s: %w", ft.Name, err)
}
// FeatureMatrix groups multiple related feature tests into a map.
//
// Useful when there is a small number of discrete features which are known
// at compile time.
//
// It must not be modified concurrently with calling [FeatureMatrix.Result].
type FeatureMatrix[K comparable] map[K]*FeatureTest
// Result returns the outcome of the feature test for the given key.
//
// It's safe to call this function concurrently.
func (fm FeatureMatrix[K]) Result(key K) error {
ft, ok := fm[key]
if !ok {
return fmt.Errorf("no feature probe for %v", key)
}
return ft.execute()
}
// FeatureCache caches a potentially unlimited number of feature probes.
//
// Useful when there is a high cardinality for a feature test.
type FeatureCache[K comparable] struct {
mu sync.RWMutex
newTest func(K) *FeatureTest
features map[K]*FeatureTest
}
func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] {
return &FeatureCache[K]{
newTest: newTest,
features: make(map[K]*FeatureTest),
}
}
func (fc *FeatureCache[K]) Result(key K) error {
// NB: Executing the feature test happens without fc.mu taken.
return fc.retrieve(key).execute()
}
func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest {
fc.mu.RLock()
ft := fc.features[key]
fc.mu.RUnlock()
if ft != nil {
return ft
}
fc.mu.Lock()
defer fc.mu.Unlock()
if ft := fc.features[key]; ft != nil {
return ft
}
ft = fc.newTest(key)
fc.features[key] = ft
return ft
}

View File

@@ -0,0 +1,49 @@
// Code generated by "stringer -type MapFlags"; DO NOT EDIT.
package sys
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[BPF_F_NO_PREALLOC-1]
_ = x[BPF_F_NO_COMMON_LRU-2]
_ = x[BPF_F_NUMA_NODE-4]
_ = x[BPF_F_RDONLY-8]
_ = x[BPF_F_WRONLY-16]
_ = x[BPF_F_STACK_BUILD_ID-32]
_ = x[BPF_F_ZERO_SEED-64]
_ = x[BPF_F_RDONLY_PROG-128]
_ = x[BPF_F_WRONLY_PROG-256]
_ = x[BPF_F_CLONE-512]
_ = x[BPF_F_MMAPABLE-1024]
_ = x[BPF_F_PRESERVE_ELEMS-2048]
_ = x[BPF_F_INNER_MAP-4096]
}
const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP"
var _MapFlags_map = map[MapFlags]string{
1: _MapFlags_name[0:17],
2: _MapFlags_name[17:36],
4: _MapFlags_name[36:51],
8: _MapFlags_name[51:63],
16: _MapFlags_name[63:75],
32: _MapFlags_name[75:95],
64: _MapFlags_name[95:110],
128: _MapFlags_name[110:127],
256: _MapFlags_name[127:144],
512: _MapFlags_name[144:155],
1024: _MapFlags_name[155:169],
2048: _MapFlags_name[169:189],
4096: _MapFlags_name[189:204],
}
func (i MapFlags) String() string {
if str, ok := _MapFlags_map[i]; ok {
return str
}
return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")"
}

View File

@@ -19,7 +19,7 @@ var ENOTSUPP = syscall.Errno(524)
func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
// Prevent the Go profiler from repeatedly interrupting the verifier,
// which could otherwise lead to a livelock due to receiving EAGAIN.
if cmd == BPF_PROG_LOAD {
if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN {
maskProfilerSignal()
defer unmaskProfilerSignal()
}
@@ -120,6 +120,24 @@ type BTFID uint32
// MapFlags control map behaviour.
type MapFlags uint32
//go:generate stringer -type MapFlags
const (
BPF_F_NO_PREALLOC MapFlags = 1 << iota
BPF_F_NO_COMMON_LRU
BPF_F_NUMA_NODE
BPF_F_RDONLY
BPF_F_WRONLY
BPF_F_STACK_BUILD_ID
BPF_F_ZERO_SEED
BPF_F_RDONLY_PROG
BPF_F_WRONLY_PROG
BPF_F_CLONE
BPF_F_MMAPABLE
BPF_F_PRESERVE_ELEMS
BPF_F_INNER_MAP
)
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
// syscall.E* or unix.E* constants.
//

View File

@@ -1003,6 +1003,21 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
return NewFD(int(fd))
}
type ProgQueryAttr struct {
TargetFd uint32
AttachType AttachType
QueryFlags uint32
AttachFlags uint32
ProgIds Pointer
ProgCount uint32
_ [4]byte
}
func ProgQuery(attr *ProgQueryAttr) error {
_, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
return err
}
type ProgRunAttr struct {
ProgFd uint32
Retval uint32

View File

@@ -9,21 +9,22 @@ import (
)
const (
ENOENT = linux.ENOENT
EEXIST = linux.EEXIST
EAGAIN = linux.EAGAIN
ENOSPC = linux.ENOSPC
EINVAL = linux.EINVAL
EPOLLIN = linux.EPOLLIN
EINTR = linux.EINTR
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
EBADF = linux.EBADF
E2BIG = linux.E2BIG
EFAULT = linux.EFAULT
EACCES = linux.EACCES
EILSEQ = linux.EILSEQ
ENOENT = linux.ENOENT
EEXIST = linux.EEXIST
EAGAIN = linux.EAGAIN
ENOSPC = linux.ENOSPC
EINVAL = linux.EINVAL
EPOLLIN = linux.EPOLLIN
EINTR = linux.EINTR
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
EBADF = linux.EBADF
E2BIG = linux.E2BIG
EFAULT = linux.EFAULT
EACCES = linux.EACCES
EILSEQ = linux.EILSEQ
EOPNOTSUPP = linux.EOPNOTSUPP
)
const (
@@ -74,6 +75,8 @@ const (
SIGPROF = linux.SIGPROF
SIG_BLOCK = linux.SIG_BLOCK
SIG_UNBLOCK = linux.SIG_UNBLOCK
EM_NONE = linux.EM_NONE
EM_BPF = linux.EM_BPF
)
type Statfs_t = linux.Statfs_t

View File

@@ -26,6 +26,7 @@ const (
EFAULT
EACCES
EILSEQ
EOPNOTSUPP
)
// Constants are distinct to avoid breaking switch statements.
@@ -78,6 +79,8 @@ const (
SIGPROF
SIG_BLOCK
SIG_UNBLOCK
EM_NONE
EM_BPF
)
type Statfs_t struct {

View File

@@ -23,6 +23,9 @@ func vdsoVersion() (uint32, error) {
// to the process. Go does not expose that data, so we must read it from procfs.
// https://man7.org/linux/man-pages/man3/getauxval.3.html
av, err := os.Open("/proc/self/auxv")
if errors.Is(err, unix.EACCES) {
return 0, fmt.Errorf("opening auxv: %w (process may not be dumpable due to file capabilities)", err)
}
if err != nil {
return 0, fmt.Errorf("opening auxv: %w", err)
}

View File

@@ -25,7 +25,7 @@ type probeType uint8
type probeArgs struct {
symbol, group, path string
offset, refCtrOffset, cookie uint64
pid int
pid, retprobeMaxActive int
ret bool
}
@@ -41,6 +41,12 @@ type KprobeOptions struct {
// Can be used to insert kprobes at arbitrary offsets in kernel functions,
// e.g. in places where functions have been inlined.
Offset uint64
// Increase the maximum number of concurrent invocations of a kretprobe.
// Required when tracing some long running functions in the kernel.
//
// Deprecated: this setting forces the use of an outdated kernel API and is not portable
// across kernel versions.
RetprobeMaxActive int
}
const (
@@ -176,6 +182,7 @@ func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*
}
if opts != nil {
args.retprobeMaxActive = opts.RetprobeMaxActive
args.cookie = opts.Cookie
args.offset = opts.Offset
}
@@ -231,6 +238,11 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
return nil, err
}
// Use tracefs if we want to set kretprobe's retprobeMaxActive.
if args.retprobeMaxActive != 0 {
return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
}
var config uint64
if args.ret {
bit, err := readUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", typ.String(), "/format/retprobe")
@@ -347,7 +359,7 @@ func tracefsKprobe(args probeArgs) (*perfEvent, error) {
// Path and offset are only set in the case of uprobe(s) and are used to set
// the executable/library path on the filesystem and the offset where the probe is inserted.
// A perf event is then opened on the newly-created trace event and returned to the caller.
func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) {
func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) {
// Generate a random string for each trace event we attempt to create.
// This value is used as the 'group' token in tracefs to allow creating
// multiple kprobe trace events with the same name.
@@ -357,41 +369,20 @@ func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) {
}
args.group = group
// Before attempting to create a trace event through tracefs,
// check if an event with the same group and name already exists.
// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
// entry, so we need to rely on reads for detecting uniqueness.
_, err = getTraceEventID(group, args.symbol)
if err == nil {
return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol)
}
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err)
}
// Create the [k,u]probe trace event using tracefs.
if err := createTraceFSProbeEvent(typ, args); err != nil {
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
}
defer func() {
if err != nil {
// Make sure we clean up the created tracefs event when we return error.
// If a livepatch handler is already active on the symbol, the write to
// tracefs will succeed, a trace event will show up, but creating the
// perf event will fail with EBUSY.
_ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
}
}()
// Get the newly-created trace event's id.
tid, err := getTraceEventID(group, args.symbol)
tid, err := createTraceFSProbeEvent(typ, args)
if err != nil {
return nil, fmt.Errorf("getting trace event id: %w", err)
return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
}
// Kprobes are ephemeral tracepoints and share the same perf event type.
fd, err := openTracepointPerfEvent(tid, args.pid)
if err != nil {
// Make sure we clean up the created tracefs event when we return error.
// If a livepatch handler is already active on the symbol, the write to
// tracefs will succeed, a trace event will show up, but creating the
// perf event will fail with EBUSY.
_ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
return nil, err
}
@@ -405,15 +396,32 @@ func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) {
}, nil
}
// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
var errInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
// createTraceFSProbeEvent creates a new ephemeral trace event.
//
// Returns os.ErrNotExist if symbol is not a valid
// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
// if a probe with the same group and symbol already exists.
func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
// if a probe with the same group and symbol already exists. Returns an error if
// args.retprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
// the kernel is too old to support kretprobe maxactive.
func createTraceFSProbeEvent(typ probeType, args probeArgs) (uint64, error) {
// Before attempting to create a trace event through tracefs,
// check if an event with the same group and name already exists.
// Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
// entry, so we need to rely on reads for detecting uniqueness.
_, err := getTraceEventID(args.group, args.symbol)
if err == nil {
return 0, fmt.Errorf("trace event %s/%s: %w", args.group, args.symbol, os.ErrExist)
}
if err != nil && !errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("checking trace event %s/%s: %w", args.group, args.symbol, err)
}
// Open the kprobe_events file in tracefs.
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
return 0, fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
}
defer f.Close()
@@ -434,8 +442,11 @@ func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
// subsampling or rate limiting logic can be more accurately implemented in
// the eBPF program itself.
// See Documentation/kprobes.txt for more details.
if args.retprobeMaxActive != 0 && !args.ret {
return 0, errInvalidMaxActive
}
token = kprobeToken(args)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, args.retprobeMaxActive), args.group, sanitizeSymbol(args.symbol), token)
case uprobeType:
// The uprobe_events syntax is as follows:
// p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
@@ -447,38 +458,63 @@ func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
// p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
//
// See Documentation/trace/uprobetracer.txt for more details.
if args.retprobeMaxActive != 0 {
return 0, errInvalidMaxActive
}
token = uprobeToken(args)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token)
pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret, 0), args.group, args.symbol, token)
}
_, err = f.WriteString(pe)
// Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
// when trying to create a retprobe for a missing symbol.
if errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("token %s: not found: %w", token, err)
return 0, fmt.Errorf("token %s: not found: %w", token, err)
}
// Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
// to an invalid insn boundary. The exact conditions that trigger this error are
// arch specific however.
if errors.Is(err, syscall.EILSEQ) {
return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
return 0, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
}
// ERANGE is returned when the `SYM[+offs]` token is too big and cannot
// be resolved.
if errors.Is(err, syscall.ERANGE) {
return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
return 0, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
}
if err != nil {
return fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
return 0, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
}
return nil
// Get the newly-created trace event's id.
tid, err := getTraceEventID(args.group, args.symbol)
if args.retprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
// Kernels < 4.12 don't support maxactive and therefore auto generate
// group and event names from the symbol and offset. The symbol is used
// without any sanitization.
// See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
event := fmt.Sprintf("kprobes/r_%s_%d", args.symbol, args.offset)
if err := removeTraceFSProbeEvent(typ, event); err != nil {
return 0, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
}
return 0, fmt.Errorf("create trace event with non-default maxactive: %w", ErrNotSupported)
}
if err != nil {
return 0, fmt.Errorf("get trace event id: %w", err)
}
return tid, nil
}
// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
// from <tracefs>/[k,u]probe_events.
func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
pe := fmt.Sprintf("%s/%s", group, sanitizeSymbol(symbol))
return removeTraceFSProbeEvent(typ, pe)
}
func removeTraceFSProbeEvent(typ probeType, pe string) error {
f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
if err != nil {
return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
@@ -487,9 +523,8 @@ func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
// See [k,u]probe_events syntax above. The probe type does not need to be specified
// for removals.
pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol))
if _, err = f.WriteString(pe); err != nil {
return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
if _, err = f.WriteString("-:" + pe); err != nil {
return fmt.Errorf("remove event %q from %s: %w", pe, typ.EventsPath(), err)
}
return nil
@@ -517,8 +552,11 @@ func randomGroup(prefix string) (string, error) {
return group, nil
}
func probePrefix(ret bool) string {
func probePrefix(ret bool, maxActive int) string {
if ret {
if maxActive > 0 {
return fmt.Sprintf("r%d", maxActive)
}
return "r"
}
return "p"

View File

@@ -138,7 +138,7 @@ func (kml *kprobeMultiLink) Unpin() error {
return fmt.Errorf("unpin kprobe_multi: %w", ErrNotSupported)
}
var haveBPFLinkKprobeMulti = internal.FeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Name: "probe_kpm_link",
Type: ebpf.Kprobe,
@@ -164,12 +164,16 @@ var haveBPFLinkKprobeMulti = internal.FeatureTest("bpf_link_kprobe_multi", "5.18
Count: 1,
Syms: sys.NewStringSlicePointer([]string{"vprintk"}),
})
if errors.Is(err, unix.EINVAL) {
switch {
case errors.Is(err, unix.EINVAL):
return internal.ErrNotSupported
}
if err != nil {
// If CONFIG_FPROBE isn't set.
case errors.Is(err, unix.EOPNOTSUPP):
return internal.ErrNotSupported
case err != nil:
return err
}
fd.Close()
return nil

View File

@@ -230,6 +230,11 @@ func (l *RawLink) Unpin() error {
return nil
}
// IsPinned returns true if the Link has a non-empty pinned path.
func (l *RawLink) IsPinned() bool {
return l.pinnedPath != ""
}
// Update implements the Link interface.
func (l *RawLink) Update(new *ebpf.Program) error {
return l.UpdateArgs(RawLinkUpdateOptions{

View File

@@ -276,7 +276,7 @@ func getTraceEventID(group, name string) (uint64, error) {
}
tid, err := readUint64FromFile("%d\n", path)
if errors.Is(err, os.ErrNotExist) {
return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
return 0, err
}
if err != nil {
return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
@@ -380,7 +380,7 @@ func readUint64FromFileOnce(format string, path ...string) (uint64, error) {
//
// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error {
var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Name: "probe_bpf_perf_link",
Type: ebpf.Kprobe,

63
vendor/github.com/cilium/ebpf/link/query.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
package link
import (
"fmt"
"os"
"unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/internal/sys"
)
// QueryOptions defines additional parameters when querying for programs.
type QueryOptions struct {
// Path can be a path to a cgroup, netns or LIRC2 device
Path string
// Attach specifies the AttachType of the programs queried for
Attach ebpf.AttachType
// QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
QueryFlags uint32
}
// QueryPrograms retrieves ProgramIDs associated with the AttachType.
//
// It only returns IDs of programs that were attached using PROG_ATTACH and not bpf_link.
// Returns (nil, nil) if there are no programs attached to the queried kernel resource.
// Calling QueryPrograms on a kernel missing PROG_QUERY will result in ErrNotSupported.
func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
if haveProgQuery() != nil {
return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
}
f, err := os.Open(opts.Path)
if err != nil {
return nil, fmt.Errorf("can't open file: %s", err)
}
defer f.Close()
// query the number of programs to allocate correct slice size
attr := sys.ProgQueryAttr{
TargetFd: uint32(f.Fd()),
AttachType: sys.AttachType(opts.Attach),
QueryFlags: opts.QueryFlags,
}
if err := sys.ProgQuery(&attr); err != nil {
return nil, fmt.Errorf("can't query program count: %w", err)
}
// return nil if no progs are attached
if attr.ProgCount == 0 {
return nil, nil
}
// we have at least one prog, so we query again
progIds := make([]ebpf.ProgramID, attr.ProgCount)
attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
attr.ProgCount = uint32(len(progIds))
if err := sys.ProgQuery(&attr); err != nil {
return nil, fmt.Errorf("can't query program IDs: %w", err)
}
return progIds, nil
}

View File

@@ -26,7 +26,7 @@ const (
KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI
)
var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
Type: ebpf.CGroupSKB,
License: "MIT",
@@ -46,7 +46,7 @@ var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() erro
return nil
})
var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error {
if err := haveProgAttach(); err != nil {
return err
}
@@ -86,7 +86,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace
return err
})
var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error {
attr := sys.LinkCreateAttr{
// This is a hopefully invalid file descriptor, which triggers EBADF.
TargetFd: ^uint32(0),
@@ -102,3 +102,22 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
}
return err
})
var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error {
attr := sys.ProgQueryAttr{
// We rely on this being checked during the syscall.
// With an otherwise correct payload we expect EBADF here
// as an indication that the feature is present.
TargetFd: ^uint32(0),
AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
}
err := sys.ProgQuery(&attr)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
if errors.Is(err, unix.EBADF) {
return nil
}
return err
})

View File

@@ -48,7 +48,7 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (
}
defer btfHandle.Close()
spec, err := btfHandle.Spec(nil)
spec, err := btfHandle.Spec()
if err != nil {
return nil, err
}

View File

@@ -18,7 +18,7 @@ var (
uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
// elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
uprobeRefCtrOffsetShift = 32
haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error {
haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error {
_, err := os.Stat(uprobeRefCtrOffsetPMUPath)
if err != nil {
return internal.ErrNotSupported

View File

@@ -4,7 +4,6 @@ import (
"encoding/binary"
"errors"
"fmt"
"sync"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/btf"
@@ -88,9 +87,12 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr
bo = internal.NativeEndian
}
target, err := maybeLoadKernelBTF(target)
if err != nil {
return err
if target == nil {
var err error
target, err = btf.LoadKernelSpec()
if err != nil {
return fmt.Errorf("load kernel spec: %w", err)
}
}
fixups, err := btf.CORERelocate(relos, target, bo)
@@ -216,29 +218,3 @@ func fixupProbeReadKernel(ins *asm.Instruction) {
ins.Constant = int64(asm.FnProbeReadStr)
}
}
var kernelBTF struct {
sync.Mutex
spec *btf.Spec
}
// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise
// it returns spec unchanged.
//
// The kernel BTF is cached for the lifetime of the process.
func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) {
if spec != nil {
return spec, nil
}
kernelBTF.Lock()
defer kernelBTF.Unlock()
if kernelBTF.spec != nil {
return kernelBTF.spec, nil
}
var err error
kernelBTF.spec, err = btf.LoadKernelSpec()
return kernelBTF.spec, err
}

71
vendor/github.com/cilium/ebpf/map.go generated vendored
View File

@@ -77,9 +77,6 @@ type MapSpec struct {
// The key and value type of this map. May be nil.
Key, Value btf.Type
// The BTF associated with this map.
BTF *btf.Spec
}
func (ms *MapSpec) String() string {
@@ -104,12 +101,6 @@ func (ms *MapSpec) Copy() *MapSpec {
return &cpy
}
// hasBTF returns true if the MapSpec has a valid BTF spec and if its
// map type supports associated BTF metadata in the kernel.
func (ms *MapSpec) hasBTF() bool {
return ms.BTF != nil && ms.Type.hasBTF()
}
func (ms *MapSpec) clampPerfEventArraySize() error {
if ms.Type != PerfEventArray {
return nil
@@ -158,7 +149,11 @@ type MapKV struct {
Value interface{}
}
func (ms *MapSpec) checkCompatibility(m *Map) error {
// Compatible returns nil if an existing map may be used instead of creating
// one from the spec.
//
// Returns an error wrapping [ErrMapIncompatible] otherwise.
func (ms *MapSpec) Compatible(m *Map) error {
switch {
case m.typ != ms.Type:
return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
@@ -241,10 +236,7 @@ func NewMap(spec *MapSpec) (*Map, error) {
//
// May return an error wrapping ErrMapIncompatible.
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
handles := newHandleCache()
defer handles.close()
m, err := newMapWithOptions(spec, opts, handles)
m, err := newMapWithOptions(spec, opts)
if err != nil {
return nil, fmt.Errorf("creating map: %w", err)
}
@@ -257,7 +249,7 @@ func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
return m, nil
}
func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) {
func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) {
closeOnError := func(c io.Closer) {
if err != nil {
c.Close()
@@ -284,7 +276,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
}
defer closeOnError(m)
if err := spec.checkCompatibility(m); err != nil {
if err := spec.Compatible(m); err != nil {
return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
}
@@ -307,7 +299,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
return nil, errors.New("inner maps cannot be pinned")
}
template, err := spec.InnerMap.createMap(nil, opts, handles)
template, err := spec.InnerMap.createMap(nil, opts)
if err != nil {
return nil, fmt.Errorf("inner map: %w", err)
}
@@ -319,7 +311,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
innerFd = template.fd
}
m, err := spec.createMap(innerFd, opts, handles)
m, err := spec.createMap(innerFd, opts)
if err != nil {
return nil, err
}
@@ -328,7 +320,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
if spec.Pinning == PinByName {
path := filepath.Join(opts.PinPath, spec.Name)
if err := m.Pin(path); err != nil {
return nil, fmt.Errorf("pin map: %w", err)
return nil, fmt.Errorf("pin map to %s: %w", path, err)
}
}
@@ -337,15 +329,13 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_
// createMap validates the spec's properties and creates the map in the kernel
// using the given opts. It does not populate or freeze the map.
func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) {
func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) {
closeOnError := func(closer io.Closer) {
if err != nil {
closer.Close()
}
}
spec = spec.Copy()
// Kernels 4.13 through 5.4 used a struct bpf_map_def that contained
// additional 'inner_map_idx' and later 'numa_node' fields.
// In order to support loading these definitions, tolerate the presence of
@@ -365,17 +355,21 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
if spec.ValueSize != 0 && spec.ValueSize != 4 {
return nil, errors.New("ValueSize must be zero or four for map of map")
}
spec = spec.Copy()
spec.ValueSize = 4
case PerfEventArray:
if spec.KeySize != 0 && spec.KeySize != 4 {
return nil, errors.New("KeySize must be zero or four for perf event array")
}
spec.KeySize = 4
if spec.ValueSize != 0 && spec.ValueSize != 4 {
return nil, errors.New("ValueSize must be zero or four for perf event array")
}
spec = spec.Copy()
spec.KeySize = 4
spec.ValueSize = 4
if spec.MaxEntries == 0 {
@@ -425,23 +419,16 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
attr.MapName = sys.NewObjName(spec.Name)
}
if spec.hasBTF() {
handle, err := handles.btfHandle(spec.BTF)
if spec.Key != nil || spec.Value != nil {
handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
keyTypeID, err := spec.BTF.TypeID(spec.Key)
if err != nil {
return nil, err
}
valueTypeID, err := spec.BTF.TypeID(spec.Value)
if err != nil {
return nil, err
}
defer handle.Close()
// Use BTF k/v during map creation.
attr.BtfFd = uint32(handle.FD())
attr.BtfKeyTypeId = uint32(keyTypeID)
attr.BtfValueTypeId = uint32(valueTypeID)
@@ -449,16 +436,23 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa
}
fd, err := sys.MapCreate(&attr)
// Some map types don't support BTF k/v in earlier kernel versions.
// Remove BTF metadata and retry map creation.
if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 {
attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0
fd, err = sys.MapCreate(&attr)
}
if err != nil {
if errors.Is(err, unix.EPERM) {
return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
}
if !spec.hasBTF() {
return nil, fmt.Errorf("map create without BTF: %w", err)
}
if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 {
return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
}
if attr.BtfFd == 0 {
return nil, fmt.Errorf("map create: %w (without BTF k/v)", err)
}
return nil, fmt.Errorf("map create: %w", err)
}
defer closeOnError(fd)
@@ -1095,7 +1089,8 @@ func (m *Map) Clone() (*Map, error) {
// the new path already exists. Re-pinning across filesystems is not supported.
// You can Clone a map to pin it to a different path.
//
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
// This requires bpffs to be mounted above fileName.
// See https://docs.cilium.io/en/stable/concepts/kubernetes/configuration/#mounting-bpffs-with-systemd
func (m *Map) Pin(fileName string) error {
if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
return err

161
vendor/github.com/cilium/ebpf/prog.go generated vendored
View File

@@ -123,11 +123,6 @@ type ProgramSpec struct {
// detect this value automatically.
KernelVersion uint32
// The BTF associated with this program. Changing Instructions
// will most likely invalidate the contained data, and may
// result in errors when attempting to load it into the kernel.
BTF *btf.Spec
// The byte order this program was compiled for, may be nil.
ByteOrder binary.ByteOrder
}
@@ -151,6 +146,10 @@ func (ps *ProgramSpec) Tag() (string, error) {
return ps.Instructions.Tag(internal.NativeEndian)
}
// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a
// program is rejected by the verifier.
//
// Use [errors.As] to access the error.
type VerifierError = internal.VerifierError
// Program represents BPF program loaded into the kernel.
@@ -169,7 +168,7 @@ type Program struct {
// NewProgram creates a new Program.
//
// See NewProgramWithOptions for details.
// See [NewProgramWithOptions] for details.
func NewProgram(spec *ProgramSpec) (*Program, error) {
return NewProgramWithOptions(spec, ProgramOptions{})
}
@@ -179,24 +178,20 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
// Loading a program for the first time will perform
// feature detection by loading small, temporary programs.
//
// Returns an error wrapping VerifierError if the program or its BTF is rejected
// by the kernel.
// Returns a [VerifierError] if the program is rejected by the kernel.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
if spec == nil {
return nil, errors.New("can't load a program from a nil spec")
}
handles := newHandleCache()
defer handles.close()
prog, err := newProgramWithOptions(spec, opts, handles)
prog, err := newProgramWithOptions(spec, opts)
if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
}
return prog, err
}
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) {
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
if len(spec.Instructions) == 0 {
return nil, errors.New("instructions cannot be empty")
}
@@ -241,30 +236,22 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
insns := make(asm.Instructions, len(spec.Instructions))
copy(insns, spec.Instructions)
var btfDisabled bool
if spec.BTF != nil {
handle, err := handles.btfHandle(spec.BTF)
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
}
handle, fib, lib, err := btf.MarshalExtInfos(insns)
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("load ext_infos: %w", err)
}
if handle != nil {
defer handle.Close()
if handle != nil {
attr.ProgBtfFd = uint32(handle.FD())
attr.ProgBtfFd = uint32(handle.FD())
fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID)
if err != nil {
return nil, err
}
attr.FuncInfoRecSize = btf.FuncInfoSize
attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
attr.FuncInfo = sys.NewSlicePointer(fib)
attr.FuncInfoRecSize = btf.FuncInfoSize
attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
attr.FuncInfo = sys.NewSlicePointer(fib)
attr.LineInfoRecSize = btf.LineInfoSize
attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
attr.LineInfo = sys.NewSlicePointer(lib)
}
attr.LineInfoRecSize = btf.LineInfoSize
attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
attr.LineInfo = sys.NewSlicePointer(lib)
}
if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil {
@@ -276,7 +263,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
}
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
err := insns.Marshal(buf, internal.NativeEndian)
err = insns.Marshal(buf, internal.NativeEndian)
if err != nil {
return nil, err
}
@@ -313,9 +300,7 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
opts.LogSize = DefaultVerifierLogSize
}
// The caller provided a specific verifier log level. Immediately load
// the program with the given log level and buffer size, and skip retrying
// with a different level / size later.
// The caller requested a specific verifier log level. Set up the log buffer.
var logBuf []byte
if !opts.LogDisabled && opts.LogLevel != 0 {
logBuf = make([]byte, opts.LogSize)
@@ -329,17 +314,19 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
}
// A verifier error occurred, but the caller did not specify a log level.
// Re-run with branch-level verifier logs enabled to obtain more info.
var truncated bool
// An error occurred loading the program, but the caller did not explicitly
// enable the verifier log. Re-run with branch-level verifier logs enabled to
// obtain more info. Preserve the original error to return it to the caller.
// An undersized log buffer will result in ENOSPC regardless of the underlying
// cause.
var err2 error
if !opts.LogDisabled && opts.LogLevel == 0 {
logBuf = make([]byte, opts.LogSize)
attr.LogLevel = LogLevelBranch
attr.LogSize = uint32(len(logBuf))
attr.LogBuf = sys.NewSlicePointer(logBuf)
_, ve := sys.ProgLoad(attr)
truncated = errors.Is(ve, unix.ENOSPC)
_, err2 = sys.ProgLoad(attr)
}
switch {
@@ -364,11 +351,8 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand
}
}
err = internal.ErrorWithLog(err, logBuf, truncated)
if btfDisabled {
return nil, fmt.Errorf("load program: %w (kernel without BTF support)", err)
}
return nil, fmt.Errorf("load program: %w", err)
truncated := errors.Is(err, unix.ENOSPC) || errors.Is(err2, unix.ENOSPC)
return nil, internal.ErrorWithLog("load program", err, logBuf, truncated)
}
// NewProgramFromFD creates a program from a raw fd.
@@ -406,7 +390,7 @@ func newProgramFromFD(fd *sys.FD) (*Program, error) {
return nil, fmt.Errorf("discover program type: %w", err)
}
return &Program{"", fd, "", "", info.Type}, nil
return &Program{"", fd, info.Name, "", info.Type}, nil
}
func (p *Program) String() string {
@@ -477,7 +461,8 @@ func (p *Program) Clone() (*Program, error) {
// Calling Pin on a previously pinned program will overwrite the path, except when
// the new path already exists. Re-pinning across filesystems is not supported.
//
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
// This requires bpffs to be mounted above fileName.
// See https://docs.cilium.io/en/stable/concepts/kubernetes/configuration/#mounting-bpffs-with-systemd
func (p *Program) Pin(fileName string) error {
if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
return err
@@ -518,6 +503,9 @@ func (p *Program) Close() error {
// Various options for Run'ing a Program
type RunOptions struct {
// Program's data input. Required field.
//
// The kernel expects at least 14 bytes input for an ethernet header for
// XDP and SKB programs.
Data []byte
// Program's data after Program has run. Caller must allocate. Optional field.
DataOut []byte
@@ -525,7 +513,10 @@ type RunOptions struct {
Context interface{}
// Program's context after Program has run. Must be a pointer or slice. Optional field.
ContextOut interface{}
// Number of times to run Program. Optional field. Defaults to 1.
// Minimum number of times to run Program. Optional field. Defaults to 1.
//
// The program may be executed more often than this due to interruptions, e.g.
// when runtime.AllThreadsSyscall is invoked.
Repeat uint32
// Optional flags.
Flags uint32
@@ -534,6 +525,8 @@ type RunOptions struct {
CPU uint32
// Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer
// or similar. Typically used during benchmarking. Optional field.
//
// Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead.
Reset func()
}
@@ -561,9 +554,9 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
Repeat: 1,
}
ret, _, err := p.testRun(&opts)
ret, _, err := p.run(&opts)
if err != nil {
return ret, nil, fmt.Errorf("can't test program: %w", err)
return ret, nil, fmt.Errorf("test program: %w", err)
}
return ret, opts.DataOut, nil
}
@@ -572,9 +565,9 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
//
// Note: the same restrictions from Test apply.
func (p *Program) Run(opts *RunOptions) (uint32, error) {
ret, _, err := p.testRun(opts)
ret, _, err := p.run(opts)
if err != nil {
return ret, fmt.Errorf("can't test program: %w", err)
return ret, fmt.Errorf("run program: %w", err)
}
return ret, nil
}
@@ -601,14 +594,14 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D
Reset: reset,
}
ret, total, err := p.testRun(&opts)
ret, total, err := p.run(&opts)
if err != nil {
return ret, total, fmt.Errorf("can't benchmark program: %w", err)
return ret, total, fmt.Errorf("benchmark program: %w", err)
}
return ret, total, nil
}
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error {
prog, err := NewProgram(&ProgramSpec{
// SocketFilter does not require privileges on newer kernels.
Type: SocketFilter,
@@ -652,12 +645,12 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
return err
})
func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) {
func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
if uint(len(opts.Data)) > math.MaxUint32 {
return 0, 0, fmt.Errorf("input is too long")
}
if err := haveProgTestRun(); err != nil {
if err := haveProgRun(); err != nil {
return 0, 0, err
}
@@ -690,24 +683,45 @@ func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) {
Cpu: opts.CPU,
}
if attr.Repeat == 0 {
attr.Repeat = 1
}
retry:
for {
err := sys.ProgRun(&attr)
if err == nil {
break
break retry
}
if errors.Is(err, unix.EINTR) {
if attr.Repeat == 1 {
// Older kernels check whether enough repetitions have been
// executed only after checking for pending signals.
//
// run signal? done? run ...
//
// As a result we can get EINTR for repeat==1 even though
// the program was run exactly once. Treat this as a
// successful run instead.
//
// Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code")
// the conditions are reversed:
// run done? signal? ...
break retry
}
if opts.Reset != nil {
opts.Reset()
}
continue
continue retry
}
if errors.Is(err, sys.ENOTSUPP) {
return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported)
return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported)
}
return 0, 0, fmt.Errorf("can't run test: %w", err)
return 0, 0, err
}
if opts.DataOut != nil {
@@ -861,17 +875,14 @@ func findTargetInKernel(name string, progType ProgramType, attachType AttachType
return nil, 0, errUnrecognizedAttachType
}
// maybeLoadKernelBTF may return external BTF if /sys/... is not available.
// Ideally we shouldn't use external BTF here, since we might try to use
// it for parsing kmod split BTF later on. That seems unlikely to work.
spec, err := maybeLoadKernelBTF(nil)
spec, err := btf.LoadKernelSpec()
if err != nil {
return nil, 0, fmt.Errorf("load kernel spec: %w", err)
}
err = spec.TypeByName(typeName, &target)
if errors.Is(err, btf.ErrNotFound) {
module, id, err := findTargetInModule(spec, typeName, target)
module, id, err := findTargetInModule(typeName, target)
if errors.Is(err, btf.ErrNotFound) {
return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
}
@@ -880,6 +891,12 @@ func findTargetInKernel(name string, progType ProgramType, attachType AttachType
}
return module, id, nil
}
// See cilium/ebpf#894. Until we can disambiguate between equally-named kernel
// symbols, we should explicitly refuse program loads. They will not reliably
// do what the caller intended.
if errors.Is(err, btf.ErrMultipleMatches) {
return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err)
}
if err != nil {
return nil, 0, fmt.Errorf("find target for %s in vmlinux: %w", featureName, err)
}
@@ -893,7 +910,7 @@ func findTargetInKernel(name string, progType ProgramType, attachType AttachType
// vmlinux must contain the kernel's types and is used to parse kmod BTF.
//
// Returns btf.ErrNotFound if the target can't be found in any module.
func findTargetInModule(vmlinux *btf.Spec, typeName string, target btf.Type) (*btf.Handle, btf.TypeID, error) {
func findTargetInModule(typeName string, target btf.Type) (*btf.Handle, btf.TypeID, error) {
it := new(btf.HandleIterator)
defer it.Handle.Close()
@@ -907,7 +924,7 @@ func findTargetInModule(vmlinux *btf.Spec, typeName string, target btf.Type) (*b
continue
}
spec, err := it.Handle.Spec(vmlinux)
spec, err := it.Handle.Spec()
if err != nil {
return nil, 0, fmt.Errorf("parse types for module %s: %w", info.Name, err)
}
@@ -957,7 +974,7 @@ func findTargetInProgram(prog *Program, name string, progType ProgramType, attac
}
defer btfHandle.Close()
spec, err := btfHandle.Spec(nil)
spec, err := btfHandle.Spec()
if err != nil {
return 0, err
}

View File

@@ -47,7 +47,7 @@ func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD,
})
}
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error {
_, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(ArrayOfMaps),
KeySize: 4,
@@ -65,7 +65,7 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
return err
})
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error {
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
m, err := sys.MapCreate(&sys.MapCreateAttr{
@@ -82,7 +82,7 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps
return nil
})
var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error {
var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error {
// This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Array),
@@ -98,7 +98,7 @@ var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error
return nil
})
var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error {
// This checks BPF_F_INNER_MAP, which appeared in 5.10.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Array),
@@ -114,7 +114,7 @@ var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
return nil
})
var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error {
var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error {
// This checks BPF_F_NO_PREALLOC, which appeared in 4.6.
m, err := sys.MapCreate(&sys.MapCreateAttr{
MapType: sys.MapType(Hash),
@@ -154,7 +154,7 @@ func wrapMapError(err error) error {
return err
}
var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error {
attr := sys.MapCreateAttr{
MapType: sys.MapType(Array),
KeySize: 4,
@@ -172,7 +172,7 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
return nil
})
var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error {
var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error {
if err := haveObjName(); err != nil {
return err
}
@@ -194,7 +194,7 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
return nil
})
var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error {
var maxEntries uint32 = 2
attr := sys.MapCreateAttr{
MapType: sys.MapType(Hash),
@@ -226,7 +226,7 @@ var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
return nil
})
var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error {
var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error {
insns := asm.Instructions{
asm.Mov.Reg(asm.R1, asm.R10),
asm.Add.Imm(asm.R1, -8),
@@ -244,7 +244,7 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f
return nil
})
var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error {
var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error {
insns := asm.Instructions{
asm.Call.Label("prog2").WithSymbol("prog1"),
asm.Return(),

View File

@@ -11,11 +11,6 @@ import (
// that will be initialized in the kernel.
type MapType uint32
// Max returns the latest supported MapType.
func (MapType) Max() MapType {
return maxMapType - 1
}
// All the various map types that can be created
const (
UnspecifiedMap MapType = iota
@@ -100,8 +95,6 @@ const (
InodeStorage
// TaskStorage - Specialized local storage map for task_struct.
TaskStorage
// maxMapType - Bound enum of MapTypes, has to be last in enum.
maxMapType
)
// hasPerCPUValue returns true if the Map stores a value per CPU.
@@ -121,25 +114,9 @@ func (mt MapType) canStoreProgram() bool {
return mt == ProgramArray
}
// hasBTF returns true if the map type supports BTF key/value metadata.
func (mt MapType) hasBTF() bool {
switch mt {
case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap,
DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf:
return false
default:
return true
}
}
// ProgramType of the eBPF program
type ProgramType uint32
// Max return the latest supported ProgramType.
func (ProgramType) Max() ProgramType {
return maxProgramType - 1
}
// eBPF program types
const (
UnspecifiedProgram ProgramType = iota
@@ -174,7 +151,6 @@ const (
LSM
SkLookup
Syscall
maxProgramType
)
// AttachType of the eBPF program, needed to differentiate allowed context accesses in

View File

@@ -38,12 +38,11 @@ func _() {
_ = x[RingBuf-27]
_ = x[InodeStorage-28]
_ = x[TaskStorage-29]
_ = x[maxMapType-30]
}
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType"
const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage"
var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300}
var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290}
func (i MapType) String() string {
if i >= MapType(len(_MapType_index)-1) {
@@ -87,12 +86,11 @@ func _() {
_ = x[LSM-29]
_ = x[SkLookup-30]
_ = x[Syscall-31]
_ = x[maxProgramType-32]
}
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType"
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall"
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315}
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301}
func (i ProgramType) String() string {
if i >= ProgramType(len(_ProgramType_index)-1) {

2
vendor/modules.txt vendored
View File

@@ -2,7 +2,7 @@
## explicit; go 1.16
github.com/checkpoint-restore/go-criu/v6
github.com/checkpoint-restore/go-criu/v6/rpc
# github.com/cilium/ebpf v0.9.3
# github.com/cilium/ebpf v0.10.0
## explicit; go 1.18
github.com/cilium/ebpf
github.com/cilium/ebpf/asm