mirror of
https://github.com/opencontainers/runc.git
synced 2025-11-01 19:42:43 +08:00
Upgrade Cilium's eBPF library version to 0.16
Signed-off-by: Rafael Roquetto <rafael.roquetto@grafana.com>
This commit is contained in:
committed by
Rafael Roquetto
parent
7c2e69f1c4
commit
216175a9ca
2
go.mod
2
go.mod
@@ -4,7 +4,7 @@ go 1.22
|
||||
|
||||
require (
|
||||
github.com/checkpoint-restore/go-criu/v6 v6.3.0
|
||||
github.com/cilium/ebpf v0.12.3
|
||||
github.com/cilium/ebpf v0.16.0
|
||||
github.com/containerd/console v1.0.4
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/cyphar/filepath-securejoin v0.3.1
|
||||
|
||||
26
go.sum
26
go.sum
@@ -1,8 +1,8 @@
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA=
|
||||
github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI=
|
||||
github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4=
|
||||
github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM=
|
||||
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
|
||||
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -16,20 +16,28 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
|
||||
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
|
||||
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||
github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM=
|
||||
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
|
||||
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
|
||||
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
|
||||
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
@@ -42,8 +50,8 @@ github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaL
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||
@@ -73,6 +81,8 @@ golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP
|
||||
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
@@ -176,21 +176,18 @@ func loadAttachCgroupDeviceFilter(insts asm.Instructions, license string, dirFd
|
||||
}
|
||||
|
||||
// If there is only one old program, we can just replace it directly.
|
||||
var (
|
||||
replaceProg *ebpf.Program
|
||||
attachFlags uint32 = unix.BPF_F_ALLOW_MULTI
|
||||
)
|
||||
if useReplaceProg {
|
||||
replaceProg = oldProgs[0]
|
||||
attachFlags |= unix.BPF_F_REPLACE
|
||||
}
|
||||
err = link.RawAttachProgram(link.RawAttachProgramOptions{
|
||||
|
||||
attachProgramOptions := link.RawAttachProgramOptions{
|
||||
Target: dirFd,
|
||||
Program: prog,
|
||||
Replace: replaceProg,
|
||||
Attach: ebpf.AttachCGroupDevice,
|
||||
Flags: attachFlags,
|
||||
})
|
||||
Flags: unix.BPF_F_ALLOW_MULTI,
|
||||
}
|
||||
|
||||
if useReplaceProg {
|
||||
attachProgramOptions.Anchor = link.ReplaceProgram(oldProgs[0])
|
||||
}
|
||||
err = link.RawAttachProgram(attachProgramOptions)
|
||||
if err != nil {
|
||||
return nilCloser, fmt.Errorf("failed to call BPF_PROG_ATTACH (BPF_CGROUP_DEVICE, BPF_F_ALLOW_MULTI): %w", err)
|
||||
}
|
||||
|
||||
1
vendor/github.com/cilium/ebpf/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/cilium/ebpf/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
internal/sys/types.go linguist-generated=false
|
||||
12
vendor/github.com/cilium/ebpf/.vimto.toml
generated
vendored
Normal file
12
vendor/github.com/cilium/ebpf/.vimto.toml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
kernel="ghcr.io/cilium/ci-kernels:stable"
|
||||
smp="cpus=2"
|
||||
memory="1G"
|
||||
user="root"
|
||||
setup=[
|
||||
"mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup",
|
||||
"/bin/sh -c 'modprobe bpf_testmod || true'",
|
||||
"dmesg --clear",
|
||||
]
|
||||
teardown=[
|
||||
"dmesg --read-clear",
|
||||
]
|
||||
92
vendor/github.com/cilium/ebpf/ARCHITECTURE.md
generated
vendored
92
vendor/github.com/cilium/ebpf/ARCHITECTURE.md
generated
vendored
@@ -1,92 +0,0 @@
|
||||
Architecture of the library
|
||||
===
|
||||
|
||||
```mermaid
|
||||
graph RL
|
||||
Program --> ProgramSpec --> ELF
|
||||
btf.Spec --> ELF
|
||||
Map --> MapSpec --> ELF
|
||||
Links --> Map & Program
|
||||
ProgramSpec -.-> btf.Spec
|
||||
MapSpec -.-> btf.Spec
|
||||
subgraph Collection
|
||||
Program & Map
|
||||
end
|
||||
subgraph CollectionSpec
|
||||
ProgramSpec & MapSpec & btf.Spec
|
||||
end
|
||||
```
|
||||
|
||||
ELF
|
||||
---
|
||||
|
||||
BPF is usually produced by using Clang to compile a subset of C. Clang outputs
|
||||
an ELF file which contains program byte code (aka BPF), but also metadata for
|
||||
maps used by the program. The metadata follows the conventions set by libbpf
|
||||
shipped with the kernel. Certain ELF sections have special meaning
|
||||
and contain structures defined by libbpf. Newer versions of clang emit
|
||||
additional metadata in [BPF Type Format](#BTF).
|
||||
|
||||
The library aims to be compatible with libbpf so that moving from a C toolchain
|
||||
to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
|
||||
is tested against the Linux selftests and avoids introducing custom behaviour
|
||||
if possible.
|
||||
|
||||
The output of the ELF reader is a `CollectionSpec` which encodes
|
||||
all of the information contained in the ELF in a form that is easy to work with
|
||||
in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF
|
||||
file on different systems must produce the same output.
|
||||
As a corollary, any changes that depend on the runtime environment like the
|
||||
current kernel version must happen when creating [Objects](#Objects).
|
||||
|
||||
Specifications
|
||||
---
|
||||
|
||||
`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and
|
||||
`btf.Spec`. Avoid adding functionality to it if possible.
|
||||
|
||||
`ProgramSpec` and `MapSpec` are blueprints for in-kernel
|
||||
objects and contain everything necessary to execute the relevant `bpf(2)`
|
||||
syscalls. They refer to `btf.Spec` for type information such as `Map` key and
|
||||
value types.
|
||||
|
||||
The [asm](asm/) package provides an assembler that can be used to generate
|
||||
`ProgramSpec` on the fly.
|
||||
|
||||
Objects
|
||||
---
|
||||
|
||||
`Program` and `Map` are the result of loading specifications into the kernel.
|
||||
Features that depend on knowledge of the current system (e.g kernel version)
|
||||
are implemented at this point.
|
||||
|
||||
Sometimes loading a spec will fail because the kernel is too old, or a feature is not
|
||||
enabled. There are multiple ways the library deals with that:
|
||||
|
||||
* Fallback: older kernels don't allow naming programs and maps. The library
|
||||
automatically detects support for names, and omits them during load if
|
||||
necessary. This works since name is primarily a debug aid.
|
||||
|
||||
* Sentinel error: sometimes it's possible to detect that a feature isn't available.
|
||||
In that case the library will return an error wrapping `ErrNotSupported`.
|
||||
This is also useful to skip tests that can't run on the current kernel.
|
||||
|
||||
Once program and map objects are loaded they expose the kernel's low-level API,
|
||||
e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
|
||||
wrappers on top of the low-level API, like `MapIterator`. The low-level API is
|
||||
useful when our higher-level API doesn't support a particular use case.
|
||||
|
||||
Links
|
||||
---
|
||||
|
||||
Programs can be attached to many different points in the kernel and newer BPF hooks
|
||||
tend to use bpf_link to do so. Older hooks unfortunately use a combination of
|
||||
syscalls, netlink messages, etc. Adding support for a new link type should not
|
||||
pull in large dependencies like netlink, so XDP programs or tracepoints are
|
||||
out of scope.
|
||||
|
||||
Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds
|
||||
to BPF_LINK_TRACING. In general, these types should be unexported as long as they
|
||||
don't export methods outside of the Link interface. Each Go type may have multiple
|
||||
exported constructors. For example `AttachTracing` and `AttachLSM` create a
|
||||
tracing link, but are distinct functions since they may require different arguments.
|
||||
11
vendor/github.com/cilium/ebpf/CODEOWNERS
generated
vendored
Normal file
11
vendor/github.com/cilium/ebpf/CODEOWNERS
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
* @cilium/ebpf-lib-maintainers
|
||||
|
||||
features/ @rgo3
|
||||
link/ @mmat11
|
||||
|
||||
perf/ @florianl
|
||||
ringbuf/ @florianl
|
||||
|
||||
btf/ @dylandreimerink
|
||||
|
||||
cmd/bpf2go/ @mejedi
|
||||
49
vendor/github.com/cilium/ebpf/CONTRIBUTING.md
generated
vendored
49
vendor/github.com/cilium/ebpf/CONTRIBUTING.md
generated
vendored
@@ -1,48 +1,5 @@
|
||||
# How to contribute
|
||||
# Contributing to ebpf-go
|
||||
|
||||
Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
|
||||
the form of pull requests and issues reporting bugs or suggesting new features
|
||||
are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
|
||||
a better understanding for the high-level goals.
|
||||
|
||||
## Adding a new feature
|
||||
|
||||
1. [Join](https://ebpf.io/slack) the
|
||||
[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.**
|
||||
2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements.
|
||||
3. Create a PR that is ready to merge. This must pass CI and have tests.
|
||||
|
||||
### API stability
|
||||
|
||||
The library doesn't guarantee the stability of its API at the moment.
|
||||
|
||||
1. If possible avoid breakage by introducing new API and deprecating the old one
|
||||
at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1.
|
||||
2. Breaking API in a way that causes compilation failures is acceptable but must
|
||||
have good reasons.
|
||||
3. Changing the semantics of the API without causing compilation failures is
|
||||
heavily discouraged.
|
||||
|
||||
## Running the tests
|
||||
|
||||
Many of the tests require privileges to set resource limits and load eBPF code.
|
||||
The easiest way to obtain these is to run the tests with `sudo`.
|
||||
|
||||
To test the current package with your local kernel you can simply run:
|
||||
```
|
||||
go test -exec sudo ./...
|
||||
```
|
||||
|
||||
To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
|
||||
It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Run all tests on a 5.4 kernel
|
||||
./run-tests.sh 5.4
|
||||
|
||||
# Run a subset of tests:
|
||||
./run-tests.sh 5.4 ./link
|
||||
```
|
||||
Want to contribute to ebpf-go? There are a few things you need to know.
|
||||
|
||||
We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started.
|
||||
|
||||
20
vendor/github.com/cilium/ebpf/Makefile
generated
vendored
20
vendor/github.com/cilium/ebpf/Makefile
generated
vendored
@@ -44,9 +44,11 @@ TARGETS := \
|
||||
testdata/invalid-kfunc \
|
||||
testdata/kfunc-kmod \
|
||||
testdata/constants \
|
||||
testdata/errors \
|
||||
btf/testdata/relocs \
|
||||
btf/testdata/relocs_read \
|
||||
btf/testdata/relocs_read_tgt \
|
||||
btf/testdata/relocs_enum \
|
||||
cmd/bpf2go/testdata/minimal
|
||||
|
||||
.PHONY: all clean container-all container-shell generate
|
||||
@@ -84,7 +86,8 @@ all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) gene
|
||||
ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
go generate -run "internal/cmd/gentypes" ./...
|
||||
go generate -skip "internal/cmd/gentypes" ./...
|
||||
|
||||
testdata/loader-%-el.elf: testdata/loader.c
|
||||
$* $(CFLAGS) -target bpfel -c $< -o $@
|
||||
@@ -102,13 +105,8 @@ testdata/loader-%-eb.elf: testdata/loader.c
|
||||
$(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
|
||||
$(STRIP) -g $@
|
||||
|
||||
.PHONY: generate-btf
|
||||
generate-btf: KERNEL_VERSION?=6.1.29
|
||||
generate-btf:
|
||||
$(eval TMP := $(shell mktemp -d))
|
||||
curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-amd64.tgz" -o "$(TMP)/linux.tgz"
|
||||
tar xvf "$(TMP)/linux.tgz" -C "$(TMP)" --strip-components=2 ./boot/vmlinuz ./lib/modules
|
||||
/lib/modules/$(shell uname -r)/build/scripts/extract-vmlinux "$(TMP)/vmlinuz" > "$(TMP)/vmlinux"
|
||||
$(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
|
||||
find "$(TMP)/modules" -type f -name bpf_testmod.ko -exec $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" {} /dev/null \;
|
||||
$(RM) -r "$(TMP)"
|
||||
.PHONY: update-kernel-deps
|
||||
update-kernel-deps: export KERNEL_VERSION?=6.8
|
||||
update-kernel-deps:
|
||||
./testdata/sh/update-kernel-deps.sh
|
||||
$(MAKE) container-all
|
||||
|
||||
22
vendor/github.com/cilium/ebpf/README.md
generated
vendored
22
vendor/github.com/cilium/ebpf/README.md
generated
vendored
@@ -13,10 +13,9 @@ ecosystem.
|
||||
|
||||
## Getting Started
|
||||
|
||||
A small collection of Go and eBPF programs that serve as examples for building
|
||||
your own tools can be found under [examples/](examples/).
|
||||
Please take a look at our [Getting Started] guide.
|
||||
|
||||
[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of
|
||||
[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of
|
||||
eBPF and the library, and help shape the future of the project.
|
||||
|
||||
## Getting Help
|
||||
@@ -59,19 +58,8 @@ This library includes the following packages:
|
||||
|
||||
* A version of Go that is [supported by
|
||||
upstream](https://golang.org/doc/devel/release.html#policy)
|
||||
* Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is
|
||||
not tested against.
|
||||
|
||||
## Regenerating Testdata
|
||||
|
||||
Run `make` in the root of this repository to rebuild testdata in all
|
||||
subpackages. This requires Docker, as it relies on a standardized build
|
||||
environment to keep the build output stable.
|
||||
|
||||
It is possible to regenerate data using Podman by overriding the `CONTAINER_*`
|
||||
variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`.
|
||||
|
||||
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
|
||||
* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions
|
||||
are not supported.
|
||||
|
||||
## License
|
||||
|
||||
@@ -80,3 +68,5 @@ MIT
|
||||
### eBPF Gopher
|
||||
|
||||
The eBPF honeygopher is based on the Go gopher designed by Renee French.
|
||||
|
||||
[Getting Started]: https://ebpf-go.dev/guides/getting-started/
|
||||
|
||||
17
vendor/github.com/cilium/ebpf/attachtype_string.go
generated
vendored
17
vendor/github.com/cilium/ebpf/attachtype_string.go
generated
vendored
@@ -52,11 +52,24 @@ func _() {
|
||||
_ = x[AttachSkReuseportSelectOrMigrate-40]
|
||||
_ = x[AttachPerfEvent-41]
|
||||
_ = x[AttachTraceKprobeMulti-42]
|
||||
_ = x[AttachLSMCgroup-43]
|
||||
_ = x[AttachStructOps-44]
|
||||
_ = x[AttachNetfilter-45]
|
||||
_ = x[AttachTCXIngress-46]
|
||||
_ = x[AttachTCXEgress-47]
|
||||
_ = x[AttachTraceUprobeMulti-48]
|
||||
_ = x[AttachCgroupUnixConnect-49]
|
||||
_ = x[AttachCgroupUnixSendmsg-50]
|
||||
_ = x[AttachCgroupUnixRecvmsg-51]
|
||||
_ = x[AttachCgroupUnixGetpeername-52]
|
||||
_ = x[AttachCgroupUnixGetsockname-53]
|
||||
_ = x[AttachNetkitPrimary-54]
|
||||
_ = x[AttachNetkitPeer-55]
|
||||
}
|
||||
|
||||
const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMulti"
|
||||
const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer"
|
||||
|
||||
var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626}
|
||||
var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804}
|
||||
|
||||
func (i AttachType) String() string {
|
||||
if i >= AttachType(len(_AttachType_index)-1) {
|
||||
|
||||
351
vendor/github.com/cilium/ebpf/btf/btf.go
generated
vendored
351
vendor/github.com/cilium/ebpf/btf/btf.go
generated
vendored
@@ -29,9 +29,8 @@ var (
|
||||
// ID represents the unique ID of a BTF object.
|
||||
type ID = sys.BTFID
|
||||
|
||||
// Spec allows querying a set of Types and loading the set into the
|
||||
// kernel.
|
||||
type Spec struct {
|
||||
// immutableTypes is a set of types which musn't be changed.
|
||||
type immutableTypes struct {
|
||||
// All types contained by the spec, not including types from the base in
|
||||
// case the spec was parsed from split BTF.
|
||||
types []Type
|
||||
@@ -44,13 +43,140 @@ type Spec struct {
|
||||
|
||||
// Types indexed by essential name.
|
||||
// Includes all struct flavors and types with the same name.
|
||||
namedTypes map[essentialName][]Type
|
||||
namedTypes map[essentialName][]TypeID
|
||||
|
||||
// Byte order of the types. This affects things like struct member order
|
||||
// when using bitfields.
|
||||
byteOrder binary.ByteOrder
|
||||
}
|
||||
|
||||
func (s *immutableTypes) typeByID(id TypeID) (Type, bool) {
|
||||
if id < s.firstTypeID {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
index := int(id - s.firstTypeID)
|
||||
if index >= len(s.types) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return s.types[index], true
|
||||
}
|
||||
|
||||
// mutableTypes is a set of types which may be changed.
|
||||
type mutableTypes struct {
|
||||
imm immutableTypes
|
||||
mu sync.RWMutex // protects copies below
|
||||
copies map[Type]Type // map[orig]copy
|
||||
copiedTypeIDs map[Type]TypeID // map[copy]origID
|
||||
}
|
||||
|
||||
// add a type to the set of mutable types.
|
||||
//
|
||||
// Copies type and all of its children once. Repeated calls with the same type
|
||||
// do not copy again.
|
||||
func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type {
|
||||
mt.mu.RLock()
|
||||
cpy, ok := mt.copies[typ]
|
||||
mt.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
// Fast path: the type has been copied before.
|
||||
return cpy
|
||||
}
|
||||
|
||||
// modifyGraphPreorder copies the type graph node by node, so we can't drop
|
||||
// the lock in between.
|
||||
mt.mu.Lock()
|
||||
defer mt.mu.Unlock()
|
||||
|
||||
return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs)
|
||||
}
|
||||
|
||||
// copy a set of mutable types.
|
||||
func (mt *mutableTypes) copy() *mutableTypes {
|
||||
if mt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
mtCopy := &mutableTypes{
|
||||
mt.imm,
|
||||
sync.RWMutex{},
|
||||
make(map[Type]Type, len(mt.copies)),
|
||||
make(map[Type]TypeID, len(mt.copiedTypeIDs)),
|
||||
}
|
||||
|
||||
// Prevent concurrent modification of mt.copiedTypeIDs.
|
||||
mt.mu.RLock()
|
||||
defer mt.mu.RUnlock()
|
||||
|
||||
copiesOfCopies := make(map[Type]Type, len(mt.copies))
|
||||
for orig, copy := range mt.copies {
|
||||
// NB: We make a copy of copy, not orig, so that changes to mutable types
|
||||
// are preserved.
|
||||
copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs)
|
||||
mtCopy.copies[orig] = copyOfCopy
|
||||
}
|
||||
|
||||
return mtCopy
|
||||
}
|
||||
|
||||
func (mt *mutableTypes) typeID(typ Type) (TypeID, error) {
|
||||
if _, ok := typ.(*Void); ok {
|
||||
// Equality is weird for void, since it is a zero sized type.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
mt.mu.RLock()
|
||||
defer mt.mu.RUnlock()
|
||||
|
||||
id, ok := mt.copiedTypeIDs[typ]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) {
|
||||
immT, ok := mt.imm.typeByID(id)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return mt.add(immT, mt.imm.typeIDs), true
|
||||
}
|
||||
|
||||
func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) {
|
||||
immTypes := mt.imm.namedTypes[newEssentialName(name)]
|
||||
if len(immTypes) == 0 {
|
||||
return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
|
||||
}
|
||||
|
||||
// Return a copy to prevent changes to namedTypes.
|
||||
result := make([]Type, 0, len(immTypes))
|
||||
for _, id := range immTypes {
|
||||
immT, ok := mt.imm.typeByID(id)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no type with ID %d", id)
|
||||
}
|
||||
|
||||
// Match against the full name, not just the essential one
|
||||
// in case the type being looked up is a struct flavor.
|
||||
if immT.TypeName() == name {
|
||||
result = append(result, mt.add(immT, mt.imm.typeIDs))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Spec allows querying a set of Types and loading the set into the
|
||||
// kernel.
|
||||
type Spec struct {
|
||||
*mutableTypes
|
||||
|
||||
// String table from ELF.
|
||||
strings *stringTable
|
||||
|
||||
// Byte order of the ELF we decoded the spec from, may be nil.
|
||||
byteOrder binary.ByteOrder
|
||||
}
|
||||
|
||||
// LoadSpec opens file and calls LoadSpecFromReader on it.
|
||||
@@ -181,7 +307,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = fixupDatasec(spec.types, sectionSizes, offsets)
|
||||
err = fixupDatasec(spec.imm.types, sectionSizes, offsets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -197,7 +323,7 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error
|
||||
)
|
||||
|
||||
if base != nil {
|
||||
if base.firstTypeID != 0 {
|
||||
if base.imm.firstTypeID != 0 {
|
||||
return nil, fmt.Errorf("can't use split BTF as base")
|
||||
}
|
||||
|
||||
@@ -217,16 +343,23 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error
|
||||
typeIDs, typesByName := indexTypes(types, firstTypeID)
|
||||
|
||||
return &Spec{
|
||||
namedTypes: typesByName,
|
||||
typeIDs: typeIDs,
|
||||
types: types,
|
||||
firstTypeID: firstTypeID,
|
||||
strings: rawStrings,
|
||||
byteOrder: bo,
|
||||
&mutableTypes{
|
||||
immutableTypes{
|
||||
types,
|
||||
typeIDs,
|
||||
firstTypeID,
|
||||
typesByName,
|
||||
bo,
|
||||
},
|
||||
sync.RWMutex{},
|
||||
make(map[Type]Type),
|
||||
make(map[Type]TypeID),
|
||||
},
|
||||
rawStrings,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
|
||||
func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) {
|
||||
namedTypes := 0
|
||||
for _, typ := range types {
|
||||
if typ.TypeName() != "" {
|
||||
@@ -238,119 +371,20 @@ func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentia
|
||||
}
|
||||
|
||||
typeIDs := make(map[Type]TypeID, len(types))
|
||||
typesByName := make(map[essentialName][]Type, namedTypes)
|
||||
typesByName := make(map[essentialName][]TypeID, namedTypes)
|
||||
|
||||
for i, typ := range types {
|
||||
id := firstTypeID + TypeID(i)
|
||||
typeIDs[typ] = id
|
||||
|
||||
if name := newEssentialName(typ.TypeName()); name != "" {
|
||||
typesByName[name] = append(typesByName[name], typ)
|
||||
typesByName[name] = append(typesByName[name], id)
|
||||
}
|
||||
typeIDs[typ] = firstTypeID + TypeID(i)
|
||||
}
|
||||
|
||||
return typeIDs, typesByName
|
||||
}
|
||||
|
||||
// LoadKernelSpec returns the current kernel's BTF information.
|
||||
//
|
||||
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
|
||||
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
|
||||
func LoadKernelSpec() (*Spec, error) {
|
||||
spec, _, err := kernelSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
var kernelBTF struct {
|
||||
sync.RWMutex
|
||||
spec *Spec
|
||||
// True if the spec was read from an ELF instead of raw BTF in /sys.
|
||||
fallback bool
|
||||
}
|
||||
|
||||
// FlushKernelSpec removes any cached kernel type information.
|
||||
func FlushKernelSpec() {
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
kernelBTF.spec, kernelBTF.fallback = nil, false
|
||||
}
|
||||
|
||||
func kernelSpec() (*Spec, bool, error) {
|
||||
kernelBTF.RLock()
|
||||
spec, fallback := kernelBTF.spec, kernelBTF.fallback
|
||||
kernelBTF.RUnlock()
|
||||
|
||||
if spec == nil {
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
spec, fallback = kernelBTF.spec, kernelBTF.fallback
|
||||
}
|
||||
|
||||
if spec != nil {
|
||||
return spec, fallback, nil
|
||||
}
|
||||
|
||||
spec, fallback, err := loadKernelSpec()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
kernelBTF.spec, kernelBTF.fallback = spec, fallback
|
||||
return spec, fallback, nil
|
||||
}
|
||||
|
||||
func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
|
||||
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||
if err == nil {
|
||||
defer fh.Close()
|
||||
|
||||
spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
|
||||
return spec, false, err
|
||||
}
|
||||
|
||||
file, err := findVMLinux()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
spec, err := LoadSpecFromReader(file)
|
||||
return spec, true, err
|
||||
}
|
||||
|
||||
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
|
||||
func findVMLinux() (*os.File, error) {
|
||||
release, err := internal.KernelRelease()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use same list of locations as libbpf
|
||||
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
|
||||
locations := []string{
|
||||
"/boot/vmlinux-%s",
|
||||
"/lib/modules/%s/vmlinux-%[1]s",
|
||||
"/lib/modules/%s/build/vmlinux",
|
||||
"/usr/lib/modules/%s/kernel/vmlinux",
|
||||
"/usr/lib/debug/boot/vmlinux-%s",
|
||||
"/usr/lib/debug/boot/vmlinux-%s.debug",
|
||||
"/usr/lib/debug/lib/modules/%s/vmlinux",
|
||||
}
|
||||
|
||||
for _, loc := range locations {
|
||||
file, err := os.Open(fmt.Sprintf(loc, release))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return file, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
|
||||
}
|
||||
|
||||
func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
|
||||
buf := new(bufio.Reader)
|
||||
for _, bo := range []binary.ByteOrder{
|
||||
@@ -492,17 +526,13 @@ func fixupDatasecLayout(ds *Datasec) error {
|
||||
|
||||
// Copy creates a copy of Spec.
|
||||
func (s *Spec) Copy() *Spec {
|
||||
types := copyTypes(s.types, nil)
|
||||
typeIDs, typesByName := indexTypes(types, s.firstTypeID)
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NB: Other parts of spec are not copied since they are immutable.
|
||||
return &Spec{
|
||||
types,
|
||||
typeIDs,
|
||||
s.firstTypeID,
|
||||
typesByName,
|
||||
s.mutableTypes.copy(),
|
||||
s.strings,
|
||||
s.byteOrder,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,8 +549,8 @@ func (sw sliceWriter) Write(p []byte) (int, error) {
|
||||
// nextTypeID returns the next unallocated type ID or an error if there are no
|
||||
// more type IDs.
|
||||
func (s *Spec) nextTypeID() (TypeID, error) {
|
||||
id := s.firstTypeID + TypeID(len(s.types))
|
||||
if id < s.firstTypeID {
|
||||
id := s.imm.firstTypeID + TypeID(len(s.imm.types))
|
||||
if id < s.imm.firstTypeID {
|
||||
return 0, fmt.Errorf("no more type IDs")
|
||||
}
|
||||
return id, nil
|
||||
@@ -531,33 +561,19 @@ func (s *Spec) nextTypeID() (TypeID, error) {
|
||||
// Returns an error wrapping ErrNotFound if a Type with the given ID
|
||||
// does not exist in the Spec.
|
||||
func (s *Spec) TypeByID(id TypeID) (Type, error) {
|
||||
if id < s.firstTypeID {
|
||||
return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
|
||||
typ, ok := s.typeByID(id)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound)
|
||||
}
|
||||
|
||||
index := int(id - s.firstTypeID)
|
||||
if index >= len(s.types) {
|
||||
return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
|
||||
}
|
||||
|
||||
return s.types[index], nil
|
||||
return typ, nil
|
||||
}
|
||||
|
||||
// TypeID returns the ID for a given Type.
|
||||
//
|
||||
// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
|
||||
// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec.
|
||||
func (s *Spec) TypeID(typ Type) (TypeID, error) {
|
||||
if _, ok := typ.(*Void); ok {
|
||||
// Equality is weird for void, since it is a zero sized type.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
id, ok := s.typeIDs[typ]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
return s.mutableTypes.typeID(typ)
|
||||
}
|
||||
|
||||
// AnyTypesByName returns a list of BTF Types with the given name.
|
||||
@@ -568,21 +584,7 @@ func (s *Spec) TypeID(typ Type) (TypeID, error) {
|
||||
//
|
||||
// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
|
||||
func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
|
||||
types := s.namedTypes[newEssentialName(name)]
|
||||
if len(types) == 0 {
|
||||
return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
|
||||
}
|
||||
|
||||
// Return a copy to prevent changes to namedTypes.
|
||||
result := make([]Type, 0, len(types))
|
||||
for _, t := range types {
|
||||
// Match against the full name, not just the essential one
|
||||
// in case the type being looked up is a struct flavor.
|
||||
if t.TypeName() == name {
|
||||
result = append(result, t)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return s.mutableTypes.anyTypesByName(name)
|
||||
}
|
||||
|
||||
// AnyTypeByName returns a Type with the given name.
|
||||
@@ -671,26 +673,27 @@ func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
|
||||
|
||||
// TypesIterator iterates over types of a given spec.
|
||||
type TypesIterator struct {
|
||||
types []Type
|
||||
index int
|
||||
spec *Spec
|
||||
id TypeID
|
||||
done bool
|
||||
// The last visited type in the spec.
|
||||
Type Type
|
||||
}
|
||||
|
||||
// Iterate returns the types iterator.
|
||||
func (s *Spec) Iterate() *TypesIterator {
|
||||
// We share the backing array of types with the Spec. This is safe since
|
||||
// we don't allow deletion or shuffling of types.
|
||||
return &TypesIterator{types: s.types, index: 0}
|
||||
return &TypesIterator{spec: s, id: s.imm.firstTypeID}
|
||||
}
|
||||
|
||||
// Next returns true as long as there are any remaining types.
|
||||
func (iter *TypesIterator) Next() bool {
|
||||
if len(iter.types) <= iter.index {
|
||||
if iter.done {
|
||||
return false
|
||||
}
|
||||
|
||||
iter.Type = iter.types[iter.index]
|
||||
iter.index++
|
||||
return true
|
||||
var ok bool
|
||||
iter.Type, ok = iter.spec.typeByID(iter.id)
|
||||
iter.id++
|
||||
iter.done = !ok
|
||||
return !iter.done
|
||||
}
|
||||
|
||||
436
vendor/github.com/cilium/ebpf/btf/core.go
generated
vendored
436
vendor/github.com/cilium/ebpf/btf/core.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -15,6 +16,11 @@ import (
|
||||
// Code in this file is derived from libbpf, which is available under a BSD
|
||||
// 2-Clause license.
|
||||
|
||||
// A constant used when CO-RE relocation has to remove instructions.
|
||||
//
|
||||
// Taken from libbpf.
|
||||
const COREBadRelocationSentinel = 0xbad2310
|
||||
|
||||
// COREFixup is the result of computing a CO-RE relocation for a target.
|
||||
type COREFixup struct {
|
||||
kind coreKind
|
||||
@@ -41,9 +47,22 @@ func (f *COREFixup) String() string {
|
||||
|
||||
func (f *COREFixup) Apply(ins *asm.Instruction) error {
|
||||
if f.poison {
|
||||
const badRelo = 0xbad2310
|
||||
// Relocation is poisoned, replace the instruction with an invalid one.
|
||||
if ins.OpCode.IsDWordLoad() {
|
||||
// Replace a dword load with a invalid dword load to preserve instruction size.
|
||||
*ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord)
|
||||
} else {
|
||||
// Replace all single size instruction with a invalid call instruction.
|
||||
*ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call()
|
||||
}
|
||||
|
||||
// Add context to the kernel verifier output.
|
||||
if source := ins.Source(); source != nil {
|
||||
*ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source)))
|
||||
} else {
|
||||
*ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE"))
|
||||
}
|
||||
|
||||
*ins = asm.BuiltinFunc(badRelo).Call()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -119,10 +138,11 @@ const (
|
||||
reloTypeSize /* type size in bytes */
|
||||
reloEnumvalExists /* enum value existence in target kernel */
|
||||
reloEnumvalValue /* enum value integer value */
|
||||
reloTypeMatches /* type matches kernel type */
|
||||
)
|
||||
|
||||
func (k coreKind) checksForExistence() bool {
|
||||
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
|
||||
return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches
|
||||
}
|
||||
|
||||
func (k coreKind) String() string {
|
||||
@@ -151,30 +171,43 @@ func (k coreKind) String() string {
|
||||
return "enumval_exists"
|
||||
case reloEnumvalValue:
|
||||
return "enumval_value"
|
||||
case reloTypeMatches:
|
||||
return "type_matches"
|
||||
default:
|
||||
return "unknown"
|
||||
return fmt.Sprintf("unknown (%d)", k)
|
||||
}
|
||||
}
|
||||
|
||||
// CORERelocate calculates changes needed to adjust eBPF instructions for differences
|
||||
// in types.
|
||||
//
|
||||
// targets forms the set of types to relocate against. The first element has to be
|
||||
// BTF for vmlinux, the following must be types for kernel modules.
|
||||
//
|
||||
// resolveLocalTypeID is called for each local type which requires a stable TypeID.
|
||||
// Calling the function with the same type multiple times must produce the same
|
||||
// result. It is the callers responsibility to ensure that the relocated instructions
|
||||
// are loaded with matching BTF.
|
||||
//
|
||||
// Returns a list of fixups which can be applied to instructions to make them
|
||||
// match the target type(s).
|
||||
//
|
||||
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
|
||||
// for relos[i].
|
||||
func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
|
||||
if target == nil {
|
||||
var err error
|
||||
target, _, err = kernelSpec()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load kernel spec: %w", err)
|
||||
}
|
||||
func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
|
||||
if len(targets) == 0 {
|
||||
// Explicitly check for nil here since the argument used to be optional.
|
||||
return nil, fmt.Errorf("targets must be provided")
|
||||
}
|
||||
|
||||
if bo != target.byteOrder {
|
||||
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
|
||||
// We can't encode type IDs that aren't for vmlinux into instructions at the
|
||||
// moment.
|
||||
resolveTargetTypeID := targets[0].TypeID
|
||||
|
||||
for _, target := range targets {
|
||||
if bo != target.imm.byteOrder {
|
||||
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
|
||||
}
|
||||
}
|
||||
|
||||
type reloGroup struct {
|
||||
@@ -194,14 +227,15 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
|
||||
return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
|
||||
}
|
||||
|
||||
id, err := resolveLocalTypeID(relo.typ)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err)
|
||||
}
|
||||
|
||||
result[i] = COREFixup{
|
||||
kind: relo.kind,
|
||||
local: uint64(relo.id),
|
||||
// NB: Using relo.id as the target here is incorrect, since
|
||||
// it doesn't match the BTF we generate on the fly. This isn't
|
||||
// too bad for now since there are no uses of the local type ID
|
||||
// in the kernel, yet.
|
||||
target: uint64(relo.id),
|
||||
target: uint64(id),
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -221,8 +255,23 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([
|
||||
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
|
||||
targets := target.namedTypes[newEssentialName(localTypeName)]
|
||||
fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
|
||||
essentialName := newEssentialName(localTypeName)
|
||||
|
||||
var targetTypes []Type
|
||||
for _, target := range targets {
|
||||
namedTypeIDs := target.imm.namedTypes[essentialName]
|
||||
targetTypes = slices.Grow(targetTypes, len(namedTypeIDs))
|
||||
for _, id := range namedTypeIDs {
|
||||
typ, err := target.TypeByID(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targetTypes = append(targetTypes, typ)
|
||||
}
|
||||
}
|
||||
|
||||
fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("relocate %s: %w", localType, err)
|
||||
}
|
||||
@@ -245,19 +294,14 @@ var errIncompatibleTypes = errors.New("incompatible types")
|
||||
//
|
||||
// The best target is determined by scoring: the less poisoning we have to do
|
||||
// the better the target is.
|
||||
func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
|
||||
func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
|
||||
bestScore := len(relos)
|
||||
var bestFixups []COREFixup
|
||||
for _, target := range targets {
|
||||
targetID, err := targetSpec.TypeID(target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target type ID: %w", err)
|
||||
}
|
||||
|
||||
score := 0 // lower is better
|
||||
fixups := make([]COREFixup, 0, len(relos))
|
||||
for _, relo := range relos {
|
||||
fixup, err := coreCalculateFixup(relo, target, targetID, bo)
|
||||
fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
|
||||
}
|
||||
@@ -308,9 +352,8 @@ func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Ty
|
||||
|
||||
var errNoSignedness = errors.New("no signedness")
|
||||
|
||||
// coreCalculateFixup calculates the fixup for a single local type, target type
|
||||
// and relocation.
|
||||
func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) {
|
||||
// coreCalculateFixup calculates the fixup given a relocation and a target type.
|
||||
func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) {
|
||||
fixup := func(local, target uint64) (COREFixup, error) {
|
||||
return COREFixup{kind: relo.kind, local: local, target: target}, nil
|
||||
}
|
||||
@@ -328,12 +371,27 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
|
||||
local := relo.typ
|
||||
|
||||
switch relo.kind {
|
||||
case reloTypeMatches:
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
|
||||
}
|
||||
|
||||
err := coreTypesMatch(local, target, nil)
|
||||
if errors.Is(err, errIncompatibleTypes) {
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloTypeIDTarget, reloTypeSize, reloTypeExists:
|
||||
if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
|
||||
return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
|
||||
}
|
||||
|
||||
err := coreAreTypesCompatible(local, target)
|
||||
err := CheckTypeCompatibility(local, target)
|
||||
if errors.Is(err, errIncompatibleTypes) {
|
||||
return poison()
|
||||
}
|
||||
@@ -346,6 +404,15 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
|
||||
return fixup(1, 1)
|
||||
|
||||
case reloTypeIDTarget:
|
||||
targetID, err := resolveTargetTypeID(target)
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
// Probably a relocation trying to get the ID
|
||||
// of a type from a kmod.
|
||||
return poison()
|
||||
}
|
||||
if err != nil {
|
||||
return zero, err
|
||||
}
|
||||
return fixup(uint64(relo.id), uint64(targetID))
|
||||
|
||||
case reloTypeSize:
|
||||
@@ -380,7 +447,7 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
|
||||
}
|
||||
|
||||
case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
|
||||
if _, ok := as[*Fwd](target); ok {
|
||||
if _, ok := As[*Fwd](target); ok {
|
||||
// We can't relocate fields using a forward declaration, so
|
||||
// skip it. If a non-forward declaration is present in the BTF
|
||||
// we'll find it in one of the other iterations.
|
||||
@@ -449,14 +516,14 @@ func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo b
|
||||
case reloFieldSigned:
|
||||
switch local := UnderlyingType(localField.Type).(type) {
|
||||
case *Enum:
|
||||
target, ok := as[*Enum](targetField.Type)
|
||||
target, ok := As[*Enum](targetField.Type)
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
|
||||
}
|
||||
|
||||
return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed))
|
||||
case *Int:
|
||||
target, ok := as[*Int](targetField.Type)
|
||||
target, ok := As[*Int](targetField.Type)
|
||||
if !ok {
|
||||
return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
|
||||
}
|
||||
@@ -540,7 +607,7 @@ func (ca coreAccessor) String() string {
|
||||
}
|
||||
|
||||
func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
|
||||
e, ok := as[*Enum](t)
|
||||
e, ok := As[*Enum](t)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an enum: %s", t)
|
||||
}
|
||||
@@ -666,7 +733,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
|
||||
|
||||
localMember := localMembers[acc]
|
||||
if localMember.Name == "" {
|
||||
localMemberType, ok := as[composite](localMember.Type)
|
||||
localMemberType, ok := As[composite](localMember.Type)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
|
||||
}
|
||||
@@ -680,7 +747,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
|
||||
continue
|
||||
}
|
||||
|
||||
targetType, ok := as[composite](target.Type)
|
||||
targetType, ok := As[composite](target.Type)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
|
||||
}
|
||||
@@ -726,7 +793,7 @@ func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField,
|
||||
|
||||
case *Array:
|
||||
// For arrays, acc is the index in the target.
|
||||
targetType, ok := as[*Array](target.Type)
|
||||
targetType, ok := As[*Array](target.Type)
|
||||
if !ok {
|
||||
return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
|
||||
}
|
||||
@@ -820,7 +887,7 @@ func coreFindMember(typ composite, name string) (Member, bool, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
comp, ok := as[composite](member.Type)
|
||||
comp, ok := As[composite](member.Type)
|
||||
if !ok {
|
||||
return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
|
||||
}
|
||||
@@ -839,7 +906,7 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
targetEnum, ok := as[*Enum](target)
|
||||
targetEnum, ok := As[*Enum](target)
|
||||
if !ok {
|
||||
return nil, nil, errImpossibleRelocation
|
||||
}
|
||||
@@ -860,7 +927,11 @@ func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localVal
|
||||
//
|
||||
// Only layout compatibility is checked, ignoring names of the root type.
|
||||
func CheckTypeCompatibility(localType Type, targetType Type) error {
|
||||
return coreAreTypesCompatible(localType, targetType)
|
||||
return coreAreTypesCompatible(localType, targetType, nil)
|
||||
}
|
||||
|
||||
type pair struct {
|
||||
A, B Type
|
||||
}
|
||||
|
||||
/* The comment below is from bpf_core_types_are_compat in libbpf.c:
|
||||
@@ -886,61 +957,62 @@ func CheckTypeCompatibility(localType Type, targetType Type) error {
|
||||
*
|
||||
* Returns errIncompatibleTypes if types are not compatible.
|
||||
*/
|
||||
func coreAreTypesCompatible(localType Type, targetType Type) error {
|
||||
|
||||
var (
|
||||
localTs, targetTs typeDeque
|
||||
l, t = &localType, &targetType
|
||||
depth = 0
|
||||
)
|
||||
|
||||
for ; l != nil && t != nil; l, t = localTs.Shift(), targetTs.Shift() {
|
||||
if depth >= maxResolveDepth {
|
||||
return errors.New("types are nested too deep")
|
||||
}
|
||||
|
||||
localType = UnderlyingType(*l)
|
||||
targetType = UnderlyingType(*t)
|
||||
func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error {
|
||||
localType = UnderlyingType(localType)
|
||||
targetType = UnderlyingType(targetType)
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
|
||||
return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
switch lv := (localType).(type) {
|
||||
case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
|
||||
// Nothing to do here
|
||||
if _, ok := visited[pair{localType, targetType}]; ok {
|
||||
return nil
|
||||
}
|
||||
if visited == nil {
|
||||
visited = make(map[pair]struct{})
|
||||
}
|
||||
visited[pair{localType, targetType}] = struct{}{}
|
||||
|
||||
case *Pointer, *Array:
|
||||
depth++
|
||||
walkType(localType, localTs.Push)
|
||||
walkType(targetType, targetTs.Push)
|
||||
switch lv := localType.(type) {
|
||||
case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
|
||||
return nil
|
||||
|
||||
case *Pointer:
|
||||
tv := targetType.(*Pointer)
|
||||
return coreAreTypesCompatible(lv.Target, tv.Target, visited)
|
||||
|
||||
case *Array:
|
||||
tv := targetType.(*Array)
|
||||
if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return coreAreTypesCompatible(lv.Type, tv.Type, visited)
|
||||
|
||||
case *FuncProto:
|
||||
tv := targetType.(*FuncProto)
|
||||
if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(lv.Params) != len(tv.Params) {
|
||||
return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
|
||||
}
|
||||
|
||||
depth++
|
||||
walkType(localType, localTs.Push)
|
||||
walkType(targetType, targetTs.Push)
|
||||
for i, localParam := range lv.Params {
|
||||
targetParam := tv.Params[i]
|
||||
if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %T", localType)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
return fmt.Errorf("dangling local type %T", *l)
|
||||
}
|
||||
|
||||
if t != nil {
|
||||
return fmt.Errorf("dangling target type %T", *t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
|
||||
*
|
||||
* The comment below is from bpf_core_fields_are_compat in libbpf.c:
|
||||
@@ -970,19 +1042,6 @@ func coreAreMembersCompatible(localType Type, targetType Type) error {
|
||||
localType = UnderlyingType(localType)
|
||||
targetType = UnderlyingType(targetType)
|
||||
|
||||
doNamesMatch := func(a, b string) error {
|
||||
if a == "" || b == "" {
|
||||
// allow anonymous and named type to match
|
||||
return nil
|
||||
}
|
||||
|
||||
if newEssentialName(a) == newEssentialName(b) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
|
||||
}
|
||||
|
||||
_, lok := localType.(composite)
|
||||
_, tok := targetType.(composite)
|
||||
if lok && tok {
|
||||
@@ -999,13 +1058,204 @@ func coreAreMembersCompatible(localType Type, targetType Type) error {
|
||||
|
||||
case *Enum:
|
||||
tv := targetType.(*Enum)
|
||||
return doNamesMatch(lv.Name, tv.Name)
|
||||
if !coreEssentialNamesMatch(lv.Name, tv.Name) {
|
||||
return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case *Fwd:
|
||||
tv := targetType.(*Fwd)
|
||||
return doNamesMatch(lv.Name, tv.Name)
|
||||
if !coreEssentialNamesMatch(lv.Name, tv.Name) {
|
||||
return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
|
||||
}
|
||||
}
|
||||
|
||||
// coreEssentialNamesMatch compares two names while ignoring their flavour suffix.
|
||||
//
|
||||
// This should only be used on names which are in the global scope, like struct
|
||||
// names, typedefs or enum values.
|
||||
func coreEssentialNamesMatch(a, b string) bool {
|
||||
if a == "" || b == "" {
|
||||
// allow anonymous and named type to match
|
||||
return true
|
||||
}
|
||||
|
||||
return newEssentialName(a) == newEssentialName(b)
|
||||
}
|
||||
|
||||
/* The comment below is from __bpf_core_types_match in relo_core.c:
|
||||
*
|
||||
* Check that two types "match". This function assumes that root types were
|
||||
* already checked for name match.
|
||||
*
|
||||
* The matching relation is defined as follows:
|
||||
* - modifiers and typedefs are stripped (and, hence, effectively ignored)
|
||||
* - generally speaking types need to be of same kind (struct vs. struct, union
|
||||
* vs. union, etc.)
|
||||
* - exceptions are struct/union behind a pointer which could also match a
|
||||
* forward declaration of a struct or union, respectively, and enum vs.
|
||||
* enum64 (see below)
|
||||
* Then, depending on type:
|
||||
* - integers:
|
||||
* - match if size and signedness match
|
||||
* - arrays & pointers:
|
||||
* - target types are recursively matched
|
||||
* - structs & unions:
|
||||
* - local members need to exist in target with the same name
|
||||
* - for each member we recursively check match unless it is already behind a
|
||||
* pointer, in which case we only check matching names and compatible kind
|
||||
* - enums:
|
||||
* - local variants have to have a match in target by symbolic name (but not
|
||||
* numeric value)
|
||||
* - size has to match (but enum may match enum64 and vice versa)
|
||||
* - function pointers:
|
||||
* - number and position of arguments in local type has to match target
|
||||
* - for each argument and the return value we recursively check match
|
||||
*/
|
||||
func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error {
|
||||
localType = UnderlyingType(localType)
|
||||
targetType = UnderlyingType(targetType)
|
||||
|
||||
if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) {
|
||||
return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes)
|
||||
}
|
||||
|
||||
if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
|
||||
return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
if _, ok := visited[pair{localType, targetType}]; ok {
|
||||
return nil
|
||||
}
|
||||
if visited == nil {
|
||||
visited = make(map[pair]struct{})
|
||||
}
|
||||
visited[pair{localType, targetType}] = struct{}{}
|
||||
|
||||
switch lv := (localType).(type) {
|
||||
case *Void:
|
||||
|
||||
case *Fwd:
|
||||
if targetType.(*Fwd).Kind != lv.Kind {
|
||||
return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
case *Enum:
|
||||
return coreEnumsMatch(lv, targetType.(*Enum))
|
||||
|
||||
case composite:
|
||||
tv := targetType.(composite)
|
||||
|
||||
if len(lv.members()) > len(tv.members()) {
|
||||
return errIncompatibleTypes
|
||||
}
|
||||
|
||||
localMembers := lv.members()
|
||||
targetMembers := map[string]Member{}
|
||||
for _, member := range tv.members() {
|
||||
targetMembers[member.Name] = member
|
||||
}
|
||||
|
||||
for _, localMember := range localMembers {
|
||||
targetMember, found := targetMembers[localMember.Name]
|
||||
if !found {
|
||||
return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
err := coreTypesMatch(localMember.Type, targetMember.Type, visited)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case *Int:
|
||||
if !coreEncodingMatches(lv, targetType.(*Int)) {
|
||||
return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
case *Pointer:
|
||||
tv := targetType.(*Pointer)
|
||||
|
||||
// Allow a pointer to a forward declaration to match a struct
|
||||
// or union.
|
||||
if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return coreTypesMatch(lv.Target, tv.Target, visited)
|
||||
|
||||
case *Array:
|
||||
tv := targetType.(*Array)
|
||||
|
||||
if lv.Nelems != tv.Nelems {
|
||||
return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
return coreTypesMatch(lv.Type, tv.Type, visited)
|
||||
|
||||
case *FuncProto:
|
||||
tv := targetType.(*FuncProto)
|
||||
|
||||
if len(lv.Params) != len(tv.Params) {
|
||||
return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
|
||||
}
|
||||
|
||||
for i, lparam := range lv.Params {
|
||||
if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return coreTypesMatch(lv.Return, tv.Return, visited)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported type %T", localType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// coreEncodingMatches returns true if both ints have the same size and signedness.
|
||||
// All encodings other than `Signed` are considered unsigned.
|
||||
func coreEncodingMatches(local, target *Int) bool {
|
||||
return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed)
|
||||
}
|
||||
|
||||
// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true:
|
||||
// - size has to match (but enum may match enum64 and vice versa)
|
||||
// - local variants have to have a match in target by symbolic name (but not numeric value)
|
||||
func coreEnumsMatch(local *Enum, target *Enum) error {
|
||||
if local.Size != target.Size {
|
||||
return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
// If there are more values in the local than the target, there must be at least one value in the local
|
||||
// that isn't in the target, and therefor the types are incompatible.
|
||||
if len(local.Values) > len(target.Values) {
|
||||
return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes)
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, lv := range local.Values {
|
||||
for _, rv := range target.Values {
|
||||
if coreEssentialNamesMatch(lv.Name, rv.Name) {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
40
vendor/github.com/cilium/ebpf/btf/ext_info.go
generated
vendored
40
vendor/github.com/cilium/ebpf/btf/ext_info.go
generated
vendored
@@ -143,27 +143,19 @@ func AssignMetadataToInstructions(
|
||||
// MarshalExtInfos encodes function and line info embedded in insns into kernel
|
||||
// wire format.
|
||||
//
|
||||
// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
|
||||
func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
|
||||
// Bail out early if the kernel doesn't support Func(Proto). If this is the
|
||||
// case, func_info will also be unsupported.
|
||||
if err := haveProgBTF(); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// If an instruction has an [asm.Comment], it will be synthesized into a mostly
|
||||
// empty line info.
|
||||
func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) {
|
||||
iter := insns.Iterate()
|
||||
for iter.Next() {
|
||||
_, ok := iter.Ins.Source().(*Line)
|
||||
fn := FuncMetadata(iter.Ins)
|
||||
if ok || fn != nil {
|
||||
if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil {
|
||||
goto marshal
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, nil, nil
|
||||
return nil, nil, nil
|
||||
|
||||
marshal:
|
||||
var b Builder
|
||||
var fiBuf, liBuf bytes.Buffer
|
||||
for {
|
||||
if fn := FuncMetadata(iter.Ins); fn != nil {
|
||||
@@ -171,18 +163,27 @@ marshal:
|
||||
fn: fn,
|
||||
offset: iter.Offset,
|
||||
}
|
||||
if err := fi.marshal(&fiBuf, &b); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("write func info: %w", err)
|
||||
if err := fi.marshal(&fiBuf, b); err != nil {
|
||||
return nil, nil, fmt.Errorf("write func info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if source := iter.Ins.Source(); source != nil {
|
||||
var line *Line
|
||||
if l, ok := source.(*Line); ok {
|
||||
line = l
|
||||
} else {
|
||||
line = &Line{
|
||||
line: source.String(),
|
||||
}
|
||||
}
|
||||
|
||||
if line, ok := iter.Ins.Source().(*Line); ok {
|
||||
li := &lineInfo{
|
||||
line: line,
|
||||
offset: iter.Offset,
|
||||
}
|
||||
if err := li.marshal(&liBuf, &b); err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("write line info: %w", err)
|
||||
if err := li.marshal(&liBuf, b); err != nil {
|
||||
return nil, nil, fmt.Errorf("write line info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,8 +192,7 @@ marshal:
|
||||
}
|
||||
}
|
||||
|
||||
handle, err := NewHandle(&b)
|
||||
return handle, fiBuf.Bytes(), liBuf.Bytes(), err
|
||||
return fiBuf.Bytes(), liBuf.Bytes(), nil
|
||||
}
|
||||
|
||||
// btfExtHeader is found at the start of the .BTF.ext section.
|
||||
|
||||
54
vendor/github.com/cilium/ebpf/btf/handle.go
generated
vendored
54
vendor/github.com/cilium/ebpf/btf/handle.go
generated
vendored
@@ -41,6 +41,8 @@ func NewHandle(b *Builder) (*Handle, error) {
|
||||
//
|
||||
// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
|
||||
func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
|
||||
const minLogSize = 64 * 1024
|
||||
|
||||
if uint64(len(btf)) > math.MaxUint32 {
|
||||
return nil, errors.New("BTF exceeds the maximum size")
|
||||
}
|
||||
@@ -50,26 +52,54 @@ func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
|
||||
BtfSize: uint32(len(btf)),
|
||||
}
|
||||
|
||||
fd, err := sys.BtfLoad(attr)
|
||||
var (
|
||||
logBuf []byte
|
||||
err error
|
||||
)
|
||||
for {
|
||||
var fd *sys.FD
|
||||
fd, err = sys.BtfLoad(attr)
|
||||
if err == nil {
|
||||
return &Handle{fd, attr.BtfSize, false}, nil
|
||||
}
|
||||
|
||||
if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize {
|
||||
// The log buffer already has the correct size.
|
||||
break
|
||||
}
|
||||
|
||||
if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) {
|
||||
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
|
||||
// if there are other verification errors. ENOSPC is only returned when
|
||||
// the BTF blob is correct, a log was requested, and the provided buffer
|
||||
// is too small. We're therefore not sure whether we got the full
|
||||
// log or not.
|
||||
break
|
||||
}
|
||||
|
||||
// Make an educated guess how large the buffer should be. Start
|
||||
// at a reasonable minimum and then double the size.
|
||||
logSize := uint32(max(len(logBuf)*2, minLogSize))
|
||||
if int(logSize) < len(logBuf) {
|
||||
return nil, errors.New("overflow while probing log buffer size")
|
||||
}
|
||||
|
||||
if attr.BtfLogTrueSize != 0 {
|
||||
// The kernel has given us a hint how large the log buffer has to be.
|
||||
logSize = attr.BtfLogTrueSize
|
||||
}
|
||||
|
||||
logBuf = make([]byte, logSize)
|
||||
attr.BtfLogSize = logSize
|
||||
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
|
||||
attr.BtfLogLevel = 1
|
||||
}
|
||||
|
||||
if err := haveBTF(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logBuf := make([]byte, 64*1024)
|
||||
attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
|
||||
attr.BtfLogSize = uint32(len(logBuf))
|
||||
attr.BtfLogLevel = 1
|
||||
|
||||
// Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
|
||||
// if there are other verification errors. ENOSPC is only returned when
|
||||
// the BTF blob is correct, a log was requested, and the provided buffer
|
||||
// is too small.
|
||||
_, ve := sys.BtfLoad(attr)
|
||||
return nil, internal.ErrorWithLog("load btf", err, logBuf, errors.Is(ve, unix.ENOSPC))
|
||||
return nil, internal.ErrorWithLog("load btf", err, logBuf)
|
||||
}
|
||||
|
||||
// NewHandleFromID returns the BTF handle for a given id.
|
||||
|
||||
159
vendor/github.com/cilium/ebpf/btf/kernel.go
generated
vendored
Normal file
159
vendor/github.com/cilium/ebpf/btf/kernel.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package btf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/kallsyms"
|
||||
)
|
||||
|
||||
var kernelBTF = struct {
|
||||
sync.RWMutex
|
||||
kernel *Spec
|
||||
modules map[string]*Spec
|
||||
}{
|
||||
modules: make(map[string]*Spec),
|
||||
}
|
||||
|
||||
// FlushKernelSpec removes any cached kernel type information.
|
||||
func FlushKernelSpec() {
|
||||
kallsyms.FlushKernelModuleCache()
|
||||
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
kernelBTF.kernel = nil
|
||||
kernelBTF.modules = make(map[string]*Spec)
|
||||
}
|
||||
|
||||
// LoadKernelSpec returns the current kernel's BTF information.
|
||||
//
|
||||
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
|
||||
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
|
||||
func LoadKernelSpec() (*Spec, error) {
|
||||
kernelBTF.RLock()
|
||||
spec := kernelBTF.kernel
|
||||
kernelBTF.RUnlock()
|
||||
|
||||
if spec == nil {
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
spec = kernelBTF.kernel
|
||||
}
|
||||
|
||||
if spec != nil {
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
spec, _, err := loadKernelSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kernelBTF.kernel = spec
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
// LoadKernelModuleSpec returns the BTF information for the named kernel module.
|
||||
//
|
||||
// Defaults to /sys/kernel/btf/<module>.
|
||||
// Returns an error wrapping ErrNotSupported if BTF is not enabled.
|
||||
// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist.
|
||||
func LoadKernelModuleSpec(module string) (*Spec, error) {
|
||||
kernelBTF.RLock()
|
||||
spec := kernelBTF.modules[module]
|
||||
kernelBTF.RUnlock()
|
||||
|
||||
if spec != nil {
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
base, err := LoadKernelSpec()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load kernel spec: %w", err)
|
||||
}
|
||||
|
||||
kernelBTF.Lock()
|
||||
defer kernelBTF.Unlock()
|
||||
|
||||
if spec = kernelBTF.modules[module]; spec != nil {
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
spec, err = loadKernelModuleSpec(module, base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kernelBTF.modules[module] = spec
|
||||
return spec.Copy(), nil
|
||||
}
|
||||
|
||||
func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
|
||||
fh, err := os.Open("/sys/kernel/btf/vmlinux")
|
||||
if err == nil {
|
||||
defer fh.Close()
|
||||
|
||||
spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
|
||||
return spec, false, err
|
||||
}
|
||||
|
||||
file, err := findVMLinux()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
spec, err := LoadSpecFromReader(file)
|
||||
return spec, true, err
|
||||
}
|
||||
|
||||
func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) {
|
||||
dir, file := filepath.Split(module)
|
||||
if dir != "" || filepath.Ext(file) != "" {
|
||||
return nil, fmt.Errorf("invalid module name %q", module)
|
||||
}
|
||||
|
||||
fh, err := os.Open(filepath.Join("/sys/kernel/btf", module))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
return loadRawSpec(fh, internal.NativeEndian, base)
|
||||
}
|
||||
|
||||
// findVMLinux scans multiple well-known paths for vmlinux kernel images.
|
||||
func findVMLinux() (*os.File, error) {
|
||||
release, err := internal.KernelRelease()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use same list of locations as libbpf
|
||||
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
|
||||
locations := []string{
|
||||
"/boot/vmlinux-%s",
|
||||
"/lib/modules/%s/vmlinux-%[1]s",
|
||||
"/lib/modules/%s/build/vmlinux",
|
||||
"/usr/lib/modules/%s/kernel/vmlinux",
|
||||
"/usr/lib/debug/boot/vmlinux-%s",
|
||||
"/usr/lib/debug/boot/vmlinux-%s.debug",
|
||||
"/usr/lib/debug/lib/modules/%s/vmlinux",
|
||||
}
|
||||
|
||||
for _, loc := range locations {
|
||||
file, err := os.Open(fmt.Sprintf(loc, release))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return file, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
|
||||
}
|
||||
77
vendor/github.com/cilium/ebpf/btf/marshal.go
generated
vendored
77
vendor/github.com/cilium/ebpf/btf/marshal.go
generated
vendored
@@ -5,12 +5,12 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type MarshalOptions struct {
|
||||
@@ -20,6 +20,8 @@ type MarshalOptions struct {
|
||||
StripFuncLinkage bool
|
||||
// Replace Enum64 with a placeholder for compatibility with <6.0 kernels.
|
||||
ReplaceEnum64 bool
|
||||
// Prevent the "No type found" error when loading BTF without any types.
|
||||
PreventNoTypeFound bool
|
||||
}
|
||||
|
||||
// KernelMarshalOptions will generate BTF suitable for the current kernel.
|
||||
@@ -28,6 +30,7 @@ func KernelMarshalOptions() *MarshalOptions {
|
||||
Order: internal.NativeEndian,
|
||||
StripFuncLinkage: haveFuncLinkage() != nil,
|
||||
ReplaceEnum64: haveEnum64() != nil,
|
||||
PreventNoTypeFound: true, // All current kernels require this.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +42,7 @@ type encoder struct {
|
||||
buf *bytes.Buffer
|
||||
strings *stringTableBuilder
|
||||
ids map[Type]TypeID
|
||||
visited map[Type]struct{}
|
||||
lastID TypeID
|
||||
}
|
||||
|
||||
@@ -93,6 +97,11 @@ func NewBuilder(types []Type) (*Builder, error) {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Empty returns true if neither types nor strings have been added.
|
||||
func (b *Builder) Empty() bool {
|
||||
return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0)
|
||||
}
|
||||
|
||||
// Add a Type and allocate a stable ID for it.
|
||||
//
|
||||
// Adding the identical Type multiple times is valid and will return the same ID.
|
||||
@@ -159,15 +168,29 @@ func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
|
||||
buf: w,
|
||||
strings: stb,
|
||||
lastID: TypeID(len(b.types)),
|
||||
ids: make(map[Type]TypeID, len(b.types)),
|
||||
visited: make(map[Type]struct{}, len(b.types)),
|
||||
ids: maps.Clone(b.stableIDs),
|
||||
}
|
||||
|
||||
if e.ids == nil {
|
||||
e.ids = make(map[Type]TypeID)
|
||||
}
|
||||
|
||||
types := b.types
|
||||
if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound {
|
||||
// We have strings that need to be written out,
|
||||
// but no types (besides the implicit Void).
|
||||
// Kernels as recent as v6.7 refuse to load such BTF
|
||||
// with a "No type found" error in the log.
|
||||
// Fix this by adding a dummy type.
|
||||
types = []Type{&Int{Size: 0}}
|
||||
}
|
||||
|
||||
// Ensure that types are marshaled in the exact order they were Add()ed.
|
||||
// Otherwise the ID returned from Add() won't match.
|
||||
e.pending.Grow(len(b.types))
|
||||
for _, typ := range b.types {
|
||||
e.pending.Grow(len(types))
|
||||
for _, typ := range types {
|
||||
e.pending.Push(typ)
|
||||
e.ids[typ] = b.stableIDs[typ]
|
||||
}
|
||||
|
||||
if err := e.deflatePending(); err != nil {
|
||||
@@ -214,16 +237,28 @@ func (b *Builder) addString(str string) (uint32, error) {
|
||||
return b.strings.Add(str)
|
||||
}
|
||||
|
||||
func (e *encoder) allocateID(typ Type) error {
|
||||
func (e *encoder) allocateIDs(root Type) (err error) {
|
||||
visitInPostorder(root, e.visited, func(typ Type) bool {
|
||||
if _, ok := typ.(*Void); ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := e.ids[typ]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
id := e.lastID + 1
|
||||
if id < e.lastID {
|
||||
return errors.New("type ID overflow")
|
||||
err = errors.New("type ID overflow")
|
||||
return false
|
||||
}
|
||||
|
||||
e.pending.Push(typ)
|
||||
e.ids[typ] = id
|
||||
e.lastID = id
|
||||
return nil
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// id returns the ID for the given type or panics with an error.
|
||||
@@ -243,34 +278,14 @@ func (e *encoder) id(typ Type) TypeID {
|
||||
func (e *encoder) deflatePending() error {
|
||||
// Declare root outside of the loop to avoid repeated heap allocations.
|
||||
var root Type
|
||||
skip := func(t Type) (skip bool) {
|
||||
if t == root {
|
||||
// Force descending into the current root type even if it already
|
||||
// has an ID. Otherwise we miss children of types that have their
|
||||
// ID pre-allocated via Add.
|
||||
return false
|
||||
}
|
||||
|
||||
_, isVoid := t.(*Void)
|
||||
_, alreadyEncoded := e.ids[t]
|
||||
return isVoid || alreadyEncoded
|
||||
}
|
||||
|
||||
for !e.pending.Empty() {
|
||||
root = e.pending.Shift()
|
||||
|
||||
// Allocate IDs for all children of typ, including transitive dependencies.
|
||||
iter := postorderTraversal(root, skip)
|
||||
for iter.Next() {
|
||||
if iter.Type == root {
|
||||
// The iterator yields root at the end, do not allocate another ID.
|
||||
break
|
||||
}
|
||||
|
||||
if err := e.allocateID(iter.Type); err != nil {
|
||||
if err := e.allocateIDs(root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := e.deflateType(root); err != nil {
|
||||
id := e.ids[root]
|
||||
@@ -494,7 +509,7 @@ func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) {
|
||||
if enum.Signed {
|
||||
placeholder.Encoding = Signed
|
||||
}
|
||||
if err := e.allocateID(placeholder); err != nil {
|
||||
if err := e.allocateIDs(placeholder); err != nil {
|
||||
return fmt.Errorf("add enum64 placeholder: %w", err)
|
||||
}
|
||||
|
||||
|
||||
5
vendor/github.com/cilium/ebpf/btf/strings.go
generated
vendored
5
vendor/github.com/cilium/ebpf/btf/strings.go
generated
vendored
@@ -6,10 +6,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type stringTable struct {
|
||||
|
||||
164
vendor/github.com/cilium/ebpf/btf/traversal.go
generated
vendored
164
vendor/github.com/cilium/ebpf/btf/traversal.go
generated
vendored
@@ -2,93 +2,41 @@ package btf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
)
|
||||
|
||||
// Functions to traverse a cyclic graph of types. The below was very useful:
|
||||
// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
|
||||
|
||||
type postorderIterator struct {
|
||||
// Iteration skips types for which this function returns true.
|
||||
skip func(Type) bool
|
||||
// The root type. May be nil if skip(root) is true.
|
||||
root Type
|
||||
|
||||
// Contains types which need to be either walked or yielded.
|
||||
types typeDeque
|
||||
// Contains a boolean whether the type has been walked or not.
|
||||
walked internal.Deque[bool]
|
||||
// The set of types which has been pushed onto types.
|
||||
pushed map[Type]struct{}
|
||||
|
||||
// The current type. Only valid after a call to Next().
|
||||
Type Type
|
||||
}
|
||||
|
||||
// postorderTraversal iterates all types reachable from root by visiting the
|
||||
// leaves of the graph first.
|
||||
// Visit all types reachable from root in postorder.
|
||||
//
|
||||
// Types for which skip returns true are ignored. skip may be nil.
|
||||
func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
|
||||
// Avoid allocations for the common case of a skipped root.
|
||||
if skip != nil && skip(root) {
|
||||
return postorderIterator{}
|
||||
}
|
||||
|
||||
po := postorderIterator{root: root, skip: skip}
|
||||
walkType(root, po.push)
|
||||
|
||||
return po
|
||||
}
|
||||
|
||||
func (po *postorderIterator) push(t *Type) {
|
||||
if _, ok := po.pushed[*t]; ok || *t == po.root {
|
||||
return
|
||||
}
|
||||
|
||||
if po.skip != nil && po.skip(*t) {
|
||||
return
|
||||
}
|
||||
|
||||
if po.pushed == nil {
|
||||
// Lazily allocate pushed to avoid an allocation for Types without children.
|
||||
po.pushed = make(map[Type]struct{})
|
||||
}
|
||||
|
||||
po.pushed[*t] = struct{}{}
|
||||
po.types.Push(t)
|
||||
po.walked.Push(false)
|
||||
}
|
||||
|
||||
// Next returns true if there is another Type to traverse.
|
||||
func (po *postorderIterator) Next() bool {
|
||||
for !po.types.Empty() {
|
||||
t := po.types.Pop()
|
||||
|
||||
if !po.walked.Pop() {
|
||||
// Push the type again, so that we re-evaluate it in done state
|
||||
// after all children have been handled.
|
||||
po.types.Push(t)
|
||||
po.walked.Push(true)
|
||||
|
||||
// Add all direct children to todo.
|
||||
walkType(*t, po.push)
|
||||
} else {
|
||||
// We've walked this type previously, so we now know that all
|
||||
// children have been handled.
|
||||
po.Type = *t
|
||||
// Traversal stops if yield returns false.
|
||||
//
|
||||
// Returns false if traversal was aborted.
|
||||
func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool {
|
||||
if _, ok := visited[root]; ok {
|
||||
return true
|
||||
}
|
||||
if visited == nil {
|
||||
visited = make(map[Type]struct{})
|
||||
}
|
||||
visited[root] = struct{}{}
|
||||
|
||||
cont := children(root, func(child *Type) bool {
|
||||
return visitInPostorder(*child, visited, yield)
|
||||
})
|
||||
if !cont {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only return root once.
|
||||
po.Type, po.root = po.root, nil
|
||||
return po.Type != nil
|
||||
return yield(root)
|
||||
}
|
||||
|
||||
// walkType calls fn on each child of typ.
|
||||
func walkType(typ Type, fn func(*Type)) {
|
||||
// children calls yield on each child of typ.
|
||||
//
|
||||
// Traversal stops if yield returns false.
|
||||
//
|
||||
// Returns false if traversal was aborted.
|
||||
func children(typ Type, yield func(child *Type) bool) bool {
|
||||
// Explicitly type switch on the most common types to allow the inliner to
|
||||
// do its work. This avoids allocating intermediate slices from walk() on
|
||||
// the heap.
|
||||
@@ -96,46 +44,80 @@ func walkType(typ Type, fn func(*Type)) {
|
||||
case *Void, *Int, *Enum, *Fwd, *Float:
|
||||
// No children to traverse.
|
||||
case *Pointer:
|
||||
fn(&v.Target)
|
||||
if !yield(&v.Target) {
|
||||
return false
|
||||
}
|
||||
case *Array:
|
||||
fn(&v.Index)
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Index) {
|
||||
return false
|
||||
}
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Struct:
|
||||
for i := range v.Members {
|
||||
fn(&v.Members[i].Type)
|
||||
if !yield(&v.Members[i].Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *Union:
|
||||
for i := range v.Members {
|
||||
fn(&v.Members[i].Type)
|
||||
if !yield(&v.Members[i].Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *Typedef:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Volatile:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Const:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Restrict:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Func:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *FuncProto:
|
||||
fn(&v.Return)
|
||||
if !yield(&v.Return) {
|
||||
return false
|
||||
}
|
||||
for i := range v.Params {
|
||||
fn(&v.Params[i].Type)
|
||||
if !yield(&v.Params[i].Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *Var:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *Datasec:
|
||||
for i := range v.Vars {
|
||||
fn(&v.Vars[i].Type)
|
||||
if !yield(&v.Vars[i].Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
case *declTag:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *typeTag:
|
||||
fn(&v.Type)
|
||||
if !yield(&v.Type) {
|
||||
return false
|
||||
}
|
||||
case *cycle:
|
||||
// cycle has children, but we ignore them deliberately.
|
||||
default:
|
||||
panic(fmt.Sprintf("don't know how to walk Type %T", v))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
94
vendor/github.com/cilium/ebpf/btf/types.go
generated
vendored
94
vendor/github.com/cilium/ebpf/btf/types.go
generated
vendored
@@ -6,12 +6,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Mirrors MAX_RESOLVE_DEPTH in libbpf.
|
||||
@@ -318,6 +318,18 @@ func (f *Fwd) copy() Type {
|
||||
return &cpy
|
||||
}
|
||||
|
||||
func (f *Fwd) matches(typ Type) bool {
|
||||
if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Typedef is an alias of a Type.
|
||||
type Typedef struct {
|
||||
Name string
|
||||
@@ -655,75 +667,44 @@ func alignof(typ Type) (int, error) {
|
||||
return 0, fmt.Errorf("can't calculate alignment of %T", t)
|
||||
}
|
||||
|
||||
if !pow(n) {
|
||||
if !internal.IsPow(n) {
|
||||
return 0, fmt.Errorf("alignment value %d is not a power of two", n)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// pow returns true if n is a power of two.
|
||||
func pow(n int) bool {
|
||||
return n != 0 && (n&(n-1)) == 0
|
||||
}
|
||||
|
||||
// Transformer modifies a given Type and returns the result.
|
||||
//
|
||||
// For example, UnderlyingType removes any qualifiers or typedefs from a type.
|
||||
// See the example on Copy for how to use a transform.
|
||||
type Transformer func(Type) Type
|
||||
|
||||
// Copy a Type recursively.
|
||||
//
|
||||
// typ may form a cycle. If transform is not nil, it is called with the
|
||||
// to be copied type, and the returned value is copied instead.
|
||||
func Copy(typ Type, transform Transformer) Type {
|
||||
copies := copier{copies: make(map[Type]Type)}
|
||||
copies.copy(&typ, transform)
|
||||
return typ
|
||||
// typ may form a cycle.
|
||||
func Copy(typ Type) Type {
|
||||
return copyType(typ, nil, make(map[Type]Type), nil)
|
||||
}
|
||||
|
||||
// copy a slice of Types recursively.
|
||||
//
|
||||
// See Copy for the semantics.
|
||||
func copyTypes(types []Type, transform Transformer) []Type {
|
||||
result := make([]Type, len(types))
|
||||
copy(result, types)
|
||||
|
||||
copies := copier{copies: make(map[Type]Type, len(types))}
|
||||
for i := range result {
|
||||
copies.copy(&result[i], transform)
|
||||
func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type {
|
||||
if typ == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
cpy, ok := copies[typ]
|
||||
if ok {
|
||||
// This has been copied previously, no need to continue.
|
||||
return cpy
|
||||
}
|
||||
|
||||
type copier struct {
|
||||
copies map[Type]Type
|
||||
work typeDeque
|
||||
cpy = typ.copy()
|
||||
copies[typ] = cpy
|
||||
|
||||
if id, ok := ids[typ]; ok {
|
||||
copiedIDs[cpy] = id
|
||||
}
|
||||
|
||||
func (c *copier) copy(typ *Type, transform Transformer) {
|
||||
for t := typ; t != nil; t = c.work.Pop() {
|
||||
// *t is the identity of the type.
|
||||
if cpy := c.copies[*t]; cpy != nil {
|
||||
*t = cpy
|
||||
continue
|
||||
}
|
||||
children(cpy, func(child *Type) bool {
|
||||
*child = copyType(*child, ids, copies, copiedIDs)
|
||||
return true
|
||||
})
|
||||
|
||||
var cpy Type
|
||||
if transform != nil {
|
||||
cpy = transform(*t).copy()
|
||||
} else {
|
||||
cpy = (*t).copy()
|
||||
}
|
||||
|
||||
c.copies[*t] = cpy
|
||||
*t = cpy
|
||||
|
||||
// Mark any nested types for copying.
|
||||
walkType(cpy, c.work.Push)
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
type typeDeque = internal.Deque[*Type]
|
||||
@@ -1226,12 +1207,15 @@ func UnderlyingType(typ Type) Type {
|
||||
return &cycle{typ}
|
||||
}
|
||||
|
||||
// as returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
|
||||
// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
|
||||
// until it finds a T.
|
||||
//
|
||||
// Returns the zero value and false if there is no T or if the type is nested
|
||||
// too deeply.
|
||||
func as[T Type](typ Type) (T, bool) {
|
||||
func As[T Type](typ Type) (T, bool) {
|
||||
// NB: We can't make this function return (*T) since then
|
||||
// we can't assert that a type matches an interface which
|
||||
// embeds Type: as[composite](T).
|
||||
for depth := 0; depth <= maxResolveDepth; depth++ {
|
||||
switch v := (typ).(type) {
|
||||
case T:
|
||||
|
||||
2
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
2
vendor/github.com/cilium/ebpf/collection.go
generated
vendored
@@ -57,7 +57,7 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
|
||||
Maps: make(map[string]*MapSpec, len(cs.Maps)),
|
||||
Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
|
||||
ByteOrder: cs.ByteOrder,
|
||||
Types: cs.Types,
|
||||
Types: cs.Types.Copy(),
|
||||
}
|
||||
|
||||
for name, spec := range cs.Maps {
|
||||
|
||||
23
vendor/github.com/cilium/ebpf/internal/cpu.go → vendor/github.com/cilium/ebpf/cpu.go
generated
vendored
23
vendor/github.com/cilium/ebpf/internal/cpu.go → vendor/github.com/cilium/ebpf/cpu.go
generated
vendored
@@ -1,17 +1,32 @@
|
||||
package internal
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// PossibleCPUs returns the max number of CPUs a system may possibly have
|
||||
// Logical CPU numbers must be of the form 0-n
|
||||
var PossibleCPUs = Memoize(func() (int, error) {
|
||||
var possibleCPU = sync.OnceValues(func() (int, error) {
|
||||
return parseCPUsFromFile("/sys/devices/system/cpu/possible")
|
||||
})
|
||||
|
||||
// PossibleCPU returns the max number of CPUs a system may possibly have
|
||||
// Logical CPU numbers must be of the form 0-n
|
||||
func PossibleCPU() (int, error) {
|
||||
return possibleCPU()
|
||||
}
|
||||
|
||||
// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if
|
||||
// the error is non-nil.
|
||||
func MustPossibleCPU() int {
|
||||
cpus, err := PossibleCPU()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cpus
|
||||
}
|
||||
|
||||
func parseCPUsFromFile(path string) (int, error) {
|
||||
spec, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
234
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
234
vendor/github.com/cilium/ebpf/elf_reader.go
generated
vendored
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/btf"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,12 @@ type kconfigMeta struct {
|
||||
Offset uint32
|
||||
}
|
||||
|
||||
type kfuncMeta struct{}
|
||||
type kfuncMetaKey struct{}
|
||||
|
||||
type kfuncMeta struct {
|
||||
Binding elf.SymBind
|
||||
Func *btf.Func
|
||||
}
|
||||
|
||||
// elfCode is a convenience to reduce the amount of arguments that have to
|
||||
// be passed around explicitly. You should treat its contents as immutable.
|
||||
@@ -456,6 +462,8 @@ func jumpTarget(offset uint64, ins asm.Instruction) uint64 {
|
||||
return uint64(dest)
|
||||
}
|
||||
|
||||
var errUnsupportedBinding = errors.New("unsupported binding")
|
||||
|
||||
func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
|
||||
var (
|
||||
typ = elf.ST_TYPE(rel.Info)
|
||||
@@ -472,7 +480,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
}
|
||||
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("map %q: unsupported relocation %s", name, bind)
|
||||
return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
|
||||
@@ -488,7 +496,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
switch typ {
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind)
|
||||
return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
// This is really a reference to a static symbol, which clang doesn't
|
||||
@@ -499,7 +507,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
case elf.STT_OBJECT:
|
||||
// LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
|
||||
if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind)
|
||||
return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
offset = uint32(rel.Value)
|
||||
@@ -507,7 +515,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
case elf.STT_NOTYPE:
|
||||
// LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind)
|
||||
return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
offset = uint32(rel.Value)
|
||||
@@ -535,12 +543,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
switch typ {
|
||||
case elf.STT_NOTYPE, elf.STT_FUNC:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
|
||||
return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
// The function we want to call is in the indicated section,
|
||||
@@ -563,12 +571,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
switch typ {
|
||||
case elf.STT_FUNC:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
|
||||
return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
case elf.STT_SECTION:
|
||||
if bind != elf.STB_LOCAL {
|
||||
return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
|
||||
return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
// ins.Constant already contains the offset in bytes from the
|
||||
@@ -597,8 +605,8 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
// function declarations, as well as extern kfunc declarations using __ksym
|
||||
// and extern kconfig variables declared using __kconfig.
|
||||
case undefSection:
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
|
||||
if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK {
|
||||
return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
if typ != elf.STT_NOTYPE {
|
||||
@@ -607,13 +615,25 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
|
||||
kf := ec.kfuncs[name]
|
||||
switch {
|
||||
// If a Call instruction is found and the datasec has a btf.Func with a Name
|
||||
// that matches the symbol name we mark the instruction as a call to a kfunc.
|
||||
// If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name
|
||||
// that matches the symbol name we mark the instruction as a referencing a kfunc.
|
||||
case kf != nil && ins.OpCode.JumpOp() == asm.Call:
|
||||
ins.Metadata.Set(kfuncMeta{}, kf)
|
||||
ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{
|
||||
Func: kf,
|
||||
Binding: bind,
|
||||
})
|
||||
|
||||
ins.Src = asm.PseudoKfuncCall
|
||||
ins.Constant = -1
|
||||
|
||||
case kf != nil && ins.OpCode.IsDWordLoad():
|
||||
ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{
|
||||
Func: kf,
|
||||
Binding: bind,
|
||||
})
|
||||
|
||||
ins.Constant = 0
|
||||
|
||||
// If no kconfig map is found, this must be a symbol reference from inline
|
||||
// asm (see testdata/loader.c:asm_relocation()) or a call to a forward
|
||||
// function declaration (see testdata/fwd_decl.c). Don't interfere, These
|
||||
@@ -623,6 +643,10 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
|
||||
// require it to contain the symbol to disambiguate between inline asm
|
||||
// relos and kconfigs.
|
||||
case ec.kconfig != nil && ins.OpCode.IsDWordLoad():
|
||||
if bind != elf.STB_GLOBAL {
|
||||
return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind)
|
||||
}
|
||||
|
||||
for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars {
|
||||
if vsi.Type.(*btf.Var).Name != rel.Name {
|
||||
continue
|
||||
@@ -948,6 +972,9 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b
|
||||
return nil, fmt.Errorf("resolving values contents: %w", err)
|
||||
}
|
||||
|
||||
case "map_extra":
|
||||
return nil, fmt.Errorf("BTF map definition: field %s: %w", member.Name, ErrNotSupported)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
|
||||
}
|
||||
@@ -1181,109 +1208,106 @@ func (ec *elfCode) loadKsymsSection() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
|
||||
types := []struct {
|
||||
prefix string
|
||||
progType ProgramType
|
||||
attachType AttachType
|
||||
progFlags uint32
|
||||
}{
|
||||
// Please update the types from libbpf.c and follow the order of it.
|
||||
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
|
||||
{"socket", SocketFilter, AttachNone, 0},
|
||||
{"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0},
|
||||
{"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0},
|
||||
{"kprobe/", Kprobe, AttachNone, 0},
|
||||
{"uprobe/", Kprobe, AttachNone, 0},
|
||||
{"kretprobe/", Kprobe, AttachNone, 0},
|
||||
{"uretprobe/", Kprobe, AttachNone, 0},
|
||||
{"tc", SchedCLS, AttachNone, 0},
|
||||
{"classifier", SchedCLS, AttachNone, 0},
|
||||
{"action", SchedACT, AttachNone, 0},
|
||||
{"tracepoint/", TracePoint, AttachNone, 0},
|
||||
{"tp/", TracePoint, AttachNone, 0},
|
||||
{"raw_tracepoint/", RawTracepoint, AttachNone, 0},
|
||||
{"raw_tp/", RawTracepoint, AttachNone, 0},
|
||||
{"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0},
|
||||
{"raw_tp.w/", RawTracepointWritable, AttachNone, 0},
|
||||
{"tp_btf/", Tracing, AttachTraceRawTp, 0},
|
||||
{"fentry/", Tracing, AttachTraceFEntry, 0},
|
||||
{"fmod_ret/", Tracing, AttachModifyReturn, 0},
|
||||
{"fexit/", Tracing, AttachTraceFExit, 0},
|
||||
{"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
|
||||
{"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
|
||||
{"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},
|
||||
{"freplace/", Extension, AttachNone, 0},
|
||||
{"lsm/", LSM, AttachLSMMac, 0},
|
||||
{"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
|
||||
{"iter/", Tracing, AttachTraceIter, 0},
|
||||
{"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE},
|
||||
{"syscall", Syscall, AttachNone, 0},
|
||||
{"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS},
|
||||
{"xdp_devmap/", XDP, AttachXDPDevMap, 0},
|
||||
{"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS},
|
||||
{"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
|
||||
{"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS},
|
||||
{"xdp", XDP, AttachNone, 0},
|
||||
{"perf_event", PerfEvent, AttachNone, 0},
|
||||
{"lwt_in", LWTIn, AttachNone, 0},
|
||||
{"lwt_out", LWTOut, AttachNone, 0},
|
||||
{"lwt_xmit", LWTXmit, AttachNone, 0},
|
||||
{"lwt_seg6local", LWTSeg6Local, AttachNone, 0},
|
||||
{"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0},
|
||||
{"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0},
|
||||
{"cgroup/skb", CGroupSKB, AttachNone, 0},
|
||||
{"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0},
|
||||
{"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0},
|
||||
{"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0},
|
||||
{"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0},
|
||||
{"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0},
|
||||
{"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0},
|
||||
{"sockops", SockOps, AttachCGroupSockOps, 0},
|
||||
{"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0},
|
||||
{"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0},
|
||||
{"sk_skb", SkSKB, AttachNone, 0},
|
||||
{"sk_msg", SkMsg, AttachSkMsgVerdict, 0},
|
||||
{"lirc_mode2", LircMode2, AttachLircMode2, 0},
|
||||
{"flow_dissector", FlowDissector, AttachFlowDissector, 0},
|
||||
{"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0},
|
||||
{"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0},
|
||||
{"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0},
|
||||
{"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0},
|
||||
{"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0},
|
||||
{"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0},
|
||||
{"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0},
|
||||
{"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0},
|
||||
{"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0},
|
||||
{"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0},
|
||||
{"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0},
|
||||
{"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0},
|
||||
{"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0},
|
||||
{"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0},
|
||||
{"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0},
|
||||
{"struct_ops+", StructOps, AttachNone, 0},
|
||||
{"sk_lookup/", SkLookup, AttachSkLookup, 0},
|
||||
{"seccomp", SocketFilter, AttachNone, 0},
|
||||
{"kprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
|
||||
{"kretprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
|
||||
// Document all prefixes in docs/ebpf/concepts/elf-sections.md.
|
||||
type libbpfElfSectionDef struct {
|
||||
pattern string
|
||||
programType sys.ProgType
|
||||
attachType sys.AttachType
|
||||
flags libbpfElfSectionFlag
|
||||
}
|
||||
|
||||
for _, t := range types {
|
||||
if !strings.HasPrefix(sectionName, t.prefix) {
|
||||
type libbpfElfSectionFlag uint32
|
||||
|
||||
// The values correspond to enum sec_def_flags in libbpf.
|
||||
const (
|
||||
_SEC_NONE libbpfElfSectionFlag = 0
|
||||
|
||||
_SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1)
|
||||
_SEC_ATTACHABLE
|
||||
_SEC_ATTACH_BTF
|
||||
_SEC_SLEEPABLE
|
||||
_SEC_XDP_FRAGS
|
||||
_SEC_USDT
|
||||
|
||||
// Ignore any present extra in order to preserve backwards compatibility
|
||||
// with earlier versions of the library.
|
||||
ignoreExtra
|
||||
|
||||
_SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Compatibility with older versions of the library.
|
||||
// We prepend libbpf definitions since they contain a prefix match
|
||||
// for "xdp".
|
||||
elfSectionDefs = append([]libbpfElfSectionDef{
|
||||
{"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra},
|
||||
{"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
|
||||
{"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0},
|
||||
{"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
|
||||
{"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0},
|
||||
// This has been in the library since the beginning of time. Not sure
|
||||
// where it came from.
|
||||
{"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
|
||||
}, elfSectionDefs...)
|
||||
}
|
||||
|
||||
func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
|
||||
// Skip optional program marking for now.
|
||||
sectionName = strings.TrimPrefix(sectionName, "?")
|
||||
|
||||
for _, t := range elfSectionDefs {
|
||||
extra, ok := matchSectionName(sectionName, t.pattern)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(t.prefix, "/") {
|
||||
return t.progType, t.attachType, t.progFlags, ""
|
||||
programType := ProgramType(t.programType)
|
||||
attachType := AttachType(t.attachType)
|
||||
|
||||
var flags uint32
|
||||
if t.flags&_SEC_SLEEPABLE > 0 {
|
||||
flags |= unix.BPF_F_SLEEPABLE
|
||||
}
|
||||
if t.flags&_SEC_XDP_FRAGS > 0 {
|
||||
flags |= unix.BPF_F_XDP_HAS_FRAGS
|
||||
}
|
||||
if t.flags&_SEC_EXP_ATTACH_OPT > 0 {
|
||||
if programType == XDP {
|
||||
// The library doesn't yet have code to fallback to not specifying
|
||||
// attach type. Only do this for XDP since we've enforced correct
|
||||
// attach type for all other program types.
|
||||
attachType = AttachNone
|
||||
}
|
||||
}
|
||||
if t.flags&ignoreExtra > 0 {
|
||||
extra = ""
|
||||
}
|
||||
|
||||
return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):]
|
||||
return programType, attachType, flags, extra
|
||||
}
|
||||
|
||||
return UnspecifiedProgram, AttachNone, 0, ""
|
||||
}
|
||||
|
||||
// matchSectionName checks a section name against a pattern.
|
||||
//
|
||||
// It's behaviour mirrors that of libbpf's sec_def_matches.
|
||||
func matchSectionName(sectionName, pattern string) (extra string, found bool) {
|
||||
have, extra, found := strings.Cut(sectionName, "/")
|
||||
want := strings.TrimRight(pattern, "+/")
|
||||
|
||||
if strings.HasSuffix(pattern, "/") {
|
||||
// Section name must have a slash and extra may be empty.
|
||||
return extra, have == want && found
|
||||
} else if strings.HasSuffix(pattern, "+") {
|
||||
// Section name may have a slash and extra may be empty.
|
||||
return extra, have == want
|
||||
}
|
||||
|
||||
// Section name must have a prefix. extra is ignored.
|
||||
return "", strings.HasPrefix(sectionName, pattern)
|
||||
}
|
||||
|
||||
func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
|
||||
rels := make(map[uint64]elf.Symbol)
|
||||
|
||||
|
||||
109
vendor/github.com/cilium/ebpf/elf_sections.go
generated
vendored
Normal file
109
vendor/github.com/cilium/ebpf/elf_sections.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
// Code generated by internal/cmd/gensections.awk; DO NOT EDIT.
|
||||
|
||||
package ebpf
|
||||
|
||||
// Code in this file is derived from libbpf, available under BSD-2-Clause.
|
||||
|
||||
import "github.com/cilium/ebpf/internal/sys"
|
||||
|
||||
var elfSectionDefs = []libbpfElfSectionDef{
|
||||
{"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE},
|
||||
{"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE},
|
||||
{"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE},
|
||||
{"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
|
||||
{"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE},
|
||||
{"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
|
||||
{"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE},
|
||||
{"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
|
||||
{"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE},
|
||||
{"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
|
||||
{"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE},
|
||||
{"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE},
|
||||
{"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT},
|
||||
{"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE},
|
||||
{"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
|
||||
{"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
|
||||
{"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE},
|
||||
{"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE},
|
||||
{"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
|
||||
{"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE},
|
||||
{"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE},
|
||||
{"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE},
|
||||
{"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE},
|
||||
{"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
|
||||
{"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE},
|
||||
{"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
|
||||
{"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE},
|
||||
{"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
|
||||
{"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE},
|
||||
{"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF},
|
||||
{"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF},
|
||||
{"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF},
|
||||
{"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF},
|
||||
{"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
|
||||
{"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
|
||||
{"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
|
||||
{"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF},
|
||||
{"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF},
|
||||
{"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
|
||||
{"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF},
|
||||
{"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF},
|
||||
{"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE},
|
||||
{"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE},
|
||||
{"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS},
|
||||
{"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE},
|
||||
{"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS},
|
||||
{"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE},
|
||||
{"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS},
|
||||
{"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT},
|
||||
{"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE},
|
||||
{"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE},
|
||||
{"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE},
|
||||
{"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE},
|
||||
{"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE},
|
||||
{"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT},
|
||||
{"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT},
|
||||
{"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT},
|
||||
{"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE},
|
||||
{"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT},
|
||||
{"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT},
|
||||
{"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT},
|
||||
{"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT},
|
||||
{"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT},
|
||||
{"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE},
|
||||
{"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE},
|
||||
{"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE},
|
||||
{"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT},
|
||||
{"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE},
|
||||
{"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE},
|
||||
{"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE},
|
||||
{"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE},
|
||||
{"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE},
|
||||
{"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE},
|
||||
{"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE},
|
||||
{"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE},
|
||||
{"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE},
|
||||
{"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE},
|
||||
{"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE},
|
||||
{"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE},
|
||||
{"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT},
|
||||
{"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE},
|
||||
{"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE},
|
||||
{"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE},
|
||||
{"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE},
|
||||
}
|
||||
46
vendor/github.com/cilium/ebpf/info.go
generated
vendored
46
vendor/github.com/cilium/ebpf/info.go
generated
vendored
@@ -20,6 +20,23 @@ import (
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// The *Info structs expose metadata about a program or map. Most
|
||||
// fields are exposed via a getter:
|
||||
//
|
||||
// func (*MapInfo) ID() (MapID, bool)
|
||||
//
|
||||
// This is because the metadata available changes based on kernel version.
|
||||
// The second boolean return value indicates whether a particular field is
|
||||
// available on the current kernel.
|
||||
//
|
||||
// Always add new metadata as such a getter, unless you can somehow get the
|
||||
// value of the field on all supported kernels. Also document which version
|
||||
// a particular field first appeared in.
|
||||
//
|
||||
// Some metadata is a buffer which needs additional parsing. In this case,
|
||||
// store the undecoded data in the Info struct and provide a getter which
|
||||
// decodes it when necessary. See ProgramInfo.Instructions for an example.
|
||||
|
||||
// MapInfo describes a map.
|
||||
type MapInfo struct {
|
||||
Type MapType
|
||||
@@ -30,6 +47,8 @@ type MapInfo struct {
|
||||
Flags uint32
|
||||
// Name as supplied by user space at load time. Available from 4.15.
|
||||
Name string
|
||||
|
||||
btf btf.ID
|
||||
}
|
||||
|
||||
func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
|
||||
@@ -50,6 +69,7 @@ func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
|
||||
info.MaxEntries,
|
||||
uint32(info.MapFlags),
|
||||
unix.ByteSliceToString(info.Name[:]),
|
||||
btf.ID(info.BtfId),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -77,12 +97,27 @@ func (mi *MapInfo) ID() (MapID, bool) {
|
||||
return mi.id, mi.id > 0
|
||||
}
|
||||
|
||||
// BTFID returns the BTF ID associated with the Map.
|
||||
//
|
||||
// The ID is only valid as long as the associated Map is kept alive.
|
||||
// Available from 4.18.
|
||||
//
|
||||
// The bool return value indicates whether this optional field is available and
|
||||
// populated. (The field may be available but not populated if the kernel
|
||||
// supports the field but the Map was loaded without BTF information.)
|
||||
func (mi *MapInfo) BTFID() (btf.ID, bool) {
|
||||
return mi.btf, mi.btf > 0
|
||||
}
|
||||
|
||||
// programStats holds statistics of a program.
|
||||
type programStats struct {
|
||||
// Total accumulated runtime of the program ins ns.
|
||||
runtime time.Duration
|
||||
// Total number of times the program was called.
|
||||
runCount uint64
|
||||
// Total number of times the programm was NOT called.
|
||||
// Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented").
|
||||
recursionMisses uint64
|
||||
}
|
||||
|
||||
// ProgramInfo describes a program.
|
||||
@@ -127,6 +162,7 @@ func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
|
||||
stats: &programStats{
|
||||
runtime: time.Duration(info.RunTimeNs),
|
||||
runCount: info.RunCnt,
|
||||
recursionMisses: info.RecursionMisses,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -259,6 +295,16 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
|
||||
return time.Duration(0), false
|
||||
}
|
||||
|
||||
// RecursionMisses returns the total number of times the program was NOT called.
|
||||
// This can happen when another bpf program is already running on the cpu, which
|
||||
// is likely to happen for example when you interrupt bpf program execution.
|
||||
func (pi *ProgramInfo) RecursionMisses() (uint64, bool) {
|
||||
if pi.stats != nil {
|
||||
return pi.stats.recursionMisses, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Instructions returns the 'xlated' instruction stream of the program
|
||||
// after it has been verified and rewritten by the kernel. These instructions
|
||||
// cannot be loaded back into the kernel as-is, this is mainly used for
|
||||
|
||||
60
vendor/github.com/cilium/ebpf/internal/auxv.go
generated
vendored
Normal file
60
vendor/github.com/cilium/ebpf/internal/auxv.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
type auxvPairReader interface {
|
||||
Close() error
|
||||
ReadAuxvPair() (uint64, uint64, error)
|
||||
}
|
||||
|
||||
// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h
|
||||
const (
|
||||
_AT_NULL = 0 // End of vector
|
||||
_AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image
|
||||
)
|
||||
|
||||
//go:linkname runtime_getAuxv runtime.getAuxv
|
||||
func runtime_getAuxv() []uintptr
|
||||
|
||||
type auxvRuntimeReader struct {
|
||||
data []uintptr
|
||||
index int
|
||||
}
|
||||
|
||||
func (r *auxvRuntimeReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) {
|
||||
if r.index >= len(r.data)+2 {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
|
||||
// we manually add the (_AT_NULL, _AT_NULL) pair at the end
|
||||
// that is not provided by the go runtime
|
||||
var tag, value uintptr
|
||||
if r.index+1 < len(r.data) {
|
||||
tag, value = r.data[r.index], r.data[r.index+1]
|
||||
} else {
|
||||
tag, value = _AT_NULL, _AT_NULL
|
||||
}
|
||||
r.index += 2
|
||||
return uint64(tag), uint64(value), nil
|
||||
}
|
||||
|
||||
func newAuxvRuntimeReader() (auxvPairReader, error) {
|
||||
data := runtime_getAuxv()
|
||||
|
||||
if len(data)%2 != 0 {
|
||||
return nil, errors.New("malformed auxv passed from runtime")
|
||||
}
|
||||
|
||||
return &auxvRuntimeReader{
|
||||
data: data,
|
||||
index: 0,
|
||||
}, nil
|
||||
}
|
||||
3
vendor/github.com/cilium/ebpf/internal/endian_be.go
generated
vendored
3
vendor/github.com/cilium/ebpf/internal/endian_be.go
generated
vendored
@@ -7,6 +7,3 @@ import "encoding/binary"
|
||||
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
|
||||
// depending on the host's endianness.
|
||||
var NativeEndian = binary.BigEndian
|
||||
|
||||
// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
|
||||
const ClangEndian = "eb"
|
||||
|
||||
3
vendor/github.com/cilium/ebpf/internal/endian_le.go
generated
vendored
3
vendor/github.com/cilium/ebpf/internal/endian_le.go
generated
vendored
@@ -7,6 +7,3 @@ import "encoding/binary"
|
||||
// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
|
||||
// depending on the host's endianness.
|
||||
var NativeEndian = binary.LittleEndian
|
||||
|
||||
// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
|
||||
const ClangEndian = "el"
|
||||
|
||||
29
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
29
vendor/github.com/cilium/ebpf/internal/errors.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
//
|
||||
// The default error output is a summary of the full log. The latter can be
|
||||
// accessed via VerifierError.Log or by formatting the error, see Format.
|
||||
func ErrorWithLog(source string, err error, log []byte, truncated bool) *VerifierError {
|
||||
func ErrorWithLog(source string, err error, log []byte) *VerifierError {
|
||||
const whitespace = "\t\r\v\n "
|
||||
|
||||
// Convert verifier log C string by truncating it on the first 0 byte
|
||||
@@ -23,7 +23,7 @@ func ErrorWithLog(source string, err error, log []byte, truncated bool) *Verifie
|
||||
|
||||
log = bytes.Trim(log, whitespace)
|
||||
if len(log) == 0 {
|
||||
return &VerifierError{source, err, nil, truncated}
|
||||
return &VerifierError{source, err, nil, false}
|
||||
}
|
||||
|
||||
logLines := bytes.Split(log, []byte{'\n'})
|
||||
@@ -34,7 +34,7 @@ func ErrorWithLog(source string, err error, log []byte, truncated bool) *Verifie
|
||||
lines = append(lines, string(bytes.TrimRight(line, whitespace)))
|
||||
}
|
||||
|
||||
return &VerifierError{source, err, lines, truncated}
|
||||
return &VerifierError{source, err, lines, false}
|
||||
}
|
||||
|
||||
// VerifierError includes information from the eBPF verifier.
|
||||
@@ -46,7 +46,7 @@ type VerifierError struct {
|
||||
Cause error
|
||||
// The verifier output split into lines.
|
||||
Log []string
|
||||
// Whether the log output is truncated, based on several heuristics.
|
||||
// Deprecated: the log is never truncated anymore.
|
||||
Truncated bool
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func (le *VerifierError) Error() string {
|
||||
}
|
||||
|
||||
lines := log[n-1:]
|
||||
if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) {
|
||||
if n >= 2 && includePreviousLine(log[n-1]) {
|
||||
// Add one more line of context if it aids understanding the error.
|
||||
lines = log[n-2:]
|
||||
}
|
||||
@@ -81,22 +81,9 @@ func (le *VerifierError) Error() string {
|
||||
}
|
||||
|
||||
omitted := len(le.Log) - len(lines)
|
||||
if omitted == 0 && !le.Truncated {
|
||||
return b.String()
|
||||
}
|
||||
|
||||
b.WriteString(" (")
|
||||
if le.Truncated {
|
||||
b.WriteString("truncated")
|
||||
}
|
||||
|
||||
if omitted > 0 {
|
||||
if le.Truncated {
|
||||
b.WriteString(", ")
|
||||
fmt.Fprintf(&b, " (%d line(s) omitted)", omitted)
|
||||
}
|
||||
fmt.Fprintf(&b, "%d line(s) omitted", omitted)
|
||||
}
|
||||
b.WriteString(")")
|
||||
|
||||
return b.String()
|
||||
}
|
||||
@@ -188,10 +175,6 @@ func (le *VerifierError) Format(f fmt.State, verb rune) {
|
||||
}
|
||||
}
|
||||
|
||||
if le.Truncated {
|
||||
fmt.Fprintf(f, "\n\t(truncated)")
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Fprintf(f, "%%!%c(BADVERB)", verb)
|
||||
}
|
||||
|
||||
2
vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
2
vendor/github.com/cilium/ebpf/internal/feature.go
generated
vendored
@@ -37,7 +37,7 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool {
|
||||
type FeatureTest struct {
|
||||
// The name of the feature being detected.
|
||||
Name string
|
||||
// Version in in the form Major.Minor[.Patch].
|
||||
// Version in the form Major.Minor[.Patch].
|
||||
Version string
|
||||
// The feature test itself.
|
||||
Fn FeatureTestFn
|
||||
|
||||
74
vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go
generated
vendored
Normal file
74
vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package kallsyms
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var kernelModules struct {
|
||||
sync.RWMutex
|
||||
// function to kernel module mapping
|
||||
kmods map[string]string
|
||||
}
|
||||
|
||||
// KernelModule returns the kernel module, if any, a probe-able function is contained in.
|
||||
func KernelModule(fn string) (string, error) {
|
||||
kernelModules.RLock()
|
||||
kmods := kernelModules.kmods
|
||||
kernelModules.RUnlock()
|
||||
|
||||
if kmods == nil {
|
||||
kernelModules.Lock()
|
||||
defer kernelModules.Unlock()
|
||||
kmods = kernelModules.kmods
|
||||
}
|
||||
|
||||
if kmods != nil {
|
||||
return kmods[fn], nil
|
||||
}
|
||||
|
||||
f, err := os.Open("/proc/kallsyms")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
kmods, err = loadKernelModuleMapping(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
kernelModules.kmods = kmods
|
||||
return kmods[fn], nil
|
||||
}
|
||||
|
||||
// FlushKernelModuleCache removes any cached information about function to kernel module mapping.
|
||||
func FlushKernelModuleCache() {
|
||||
kernelModules.Lock()
|
||||
defer kernelModules.Unlock()
|
||||
|
||||
kernelModules.kmods = nil
|
||||
}
|
||||
|
||||
func loadKernelModuleMapping(f io.Reader) (map[string]string, error) {
|
||||
mods := make(map[string]string)
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
fields := bytes.Fields(scanner.Bytes())
|
||||
if len(fields) < 4 {
|
||||
continue
|
||||
}
|
||||
switch string(fields[1]) {
|
||||
case "t", "T":
|
||||
mods[string(fields[2])] = string(bytes.Trim(fields[3], "[]"))
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
return nil, scanner.Err()
|
||||
}
|
||||
return mods, nil
|
||||
}
|
||||
13
vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
generated
vendored
13
vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
generated
vendored
@@ -263,12 +263,25 @@ func PutInteger(data []byte, integer *btf.Int, n uint64) error {
|
||||
return fmt.Errorf("invalid boolean value: %d", n)
|
||||
}
|
||||
|
||||
if len(data) < int(integer.Size) {
|
||||
return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data))
|
||||
}
|
||||
|
||||
switch integer.Size {
|
||||
case 1:
|
||||
if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) {
|
||||
return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
|
||||
}
|
||||
data[0] = byte(n)
|
||||
case 2:
|
||||
if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) {
|
||||
return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
|
||||
}
|
||||
internal.NativeEndian.PutUint16(data, uint16(n))
|
||||
case 4:
|
||||
if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) {
|
||||
return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size)
|
||||
}
|
||||
internal.NativeEndian.PutUint32(data, uint32(n))
|
||||
case 8:
|
||||
internal.NativeEndian.PutUint64(data, uint64(n))
|
||||
|
||||
@@ -6,3 +6,8 @@ import "golang.org/x/exp/constraints"
|
||||
func Align[I constraints.Integer](n, alignment I) I {
|
||||
return (n + alignment - 1) / alignment * alignment
|
||||
}
|
||||
|
||||
// IsPow returns true if n is a power of two.
|
||||
func IsPow[I constraints.Integer](n I) bool {
|
||||
return n != 0 && (n&(n-1)) == 0
|
||||
}
|
||||
26
vendor/github.com/cilium/ebpf/internal/memoize.go
generated
vendored
26
vendor/github.com/cilium/ebpf/internal/memoize.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type memoizedFunc[T any] struct {
|
||||
once sync.Once
|
||||
fn func() (T, error)
|
||||
result T
|
||||
err error
|
||||
}
|
||||
|
||||
func (mf *memoizedFunc[T]) do() (T, error) {
|
||||
mf.once.Do(func() {
|
||||
mf.result, mf.err = mf.fn()
|
||||
})
|
||||
return mf.result, mf.err
|
||||
}
|
||||
|
||||
// Memoize the result of a function call.
|
||||
//
|
||||
// fn is only ever called once, even if it returns an error.
|
||||
func Memoize[T any](fn func() (T, error)) func() (T, error) {
|
||||
return (&memoizedFunc[T]{fn: fn}).do
|
||||
}
|
||||
6
vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
generated
vendored
6
vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
generated
vendored
@@ -21,9 +21,11 @@ func _() {
|
||||
_ = x[BPF_F_MMAPABLE-1024]
|
||||
_ = x[BPF_F_PRESERVE_ELEMS-2048]
|
||||
_ = x[BPF_F_INNER_MAP-4096]
|
||||
_ = x[BPF_F_LINK-8192]
|
||||
_ = x[BPF_F_PATH_FD-16384]
|
||||
}
|
||||
|
||||
const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP"
|
||||
const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD"
|
||||
|
||||
var _MapFlags_map = map[MapFlags]string{
|
||||
1: _MapFlags_name[0:17],
|
||||
@@ -39,6 +41,8 @@ var _MapFlags_map = map[MapFlags]string{
|
||||
1024: _MapFlags_name[155:169],
|
||||
2048: _MapFlags_name[169:189],
|
||||
4096: _MapFlags_name[189:204],
|
||||
8192: _MapFlags_name[204:214],
|
||||
16384: _MapFlags_name[214:227],
|
||||
}
|
||||
|
||||
func (i MapFlags) String() string {
|
||||
|
||||
2
vendor/github.com/cilium/ebpf/internal/sys/signals.go
generated
vendored
2
vendor/github.com/cilium/ebpf/internal/sys/signals.go
generated
vendored
@@ -63,7 +63,7 @@ func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
|
||||
// For amd64, runtime.sigaddset() performs the following operation:
|
||||
// set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31)
|
||||
//
|
||||
// This trick depends on sigset being two u32's, causing a signal in the the
|
||||
// This trick depends on sigset being two u32's, causing a signal in the
|
||||
// bottom 31 bits to be written to the low word if bit 32 is low, or the high
|
||||
// word if bit 32 is high.
|
||||
|
||||
|
||||
51
vendor/github.com/cilium/ebpf/internal/sys/syscall.go
generated
vendored
51
vendor/github.com/cilium/ebpf/internal/sys/syscall.go
generated
vendored
@@ -71,12 +71,52 @@ func (i *LinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
var _ Info = (*BtfInfo)(nil)
|
||||
|
||||
func (i *BtfInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) {
|
||||
return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
|
||||
}
|
||||
|
||||
// ObjInfo retrieves information about a BPF Fd.
|
||||
//
|
||||
// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo.
|
||||
@@ -139,6 +179,17 @@ const (
|
||||
BPF_F_MMAPABLE
|
||||
BPF_F_PRESERVE_ELEMS
|
||||
BPF_F_INNER_MAP
|
||||
BPF_F_LINK
|
||||
BPF_F_PATH_FD
|
||||
)
|
||||
|
||||
// Flags used by bpf_mprog.
|
||||
const (
|
||||
BPF_F_REPLACE = 1 << (iota + 2)
|
||||
BPF_F_BEFORE
|
||||
BPF_F_AFTER
|
||||
BPF_F_ID
|
||||
BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK
|
||||
)
|
||||
|
||||
// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
|
||||
|
||||
251
vendor/github.com/cilium/ebpf/internal/sys/types.go
vendored
251
vendor/github.com/cilium/ebpf/internal/sys/types.go
vendored
@@ -65,7 +65,14 @@ const (
|
||||
BPF_TCX_INGRESS AttachType = 46
|
||||
BPF_TCX_EGRESS AttachType = 47
|
||||
BPF_TRACE_UPROBE_MULTI AttachType = 48
|
||||
__MAX_BPF_ATTACH_TYPE AttachType = 49
|
||||
BPF_CGROUP_UNIX_CONNECT AttachType = 49
|
||||
BPF_CGROUP_UNIX_SENDMSG AttachType = 50
|
||||
BPF_CGROUP_UNIX_RECVMSG AttachType = 51
|
||||
BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52
|
||||
BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53
|
||||
BPF_NETKIT_PRIMARY AttachType = 54
|
||||
BPF_NETKIT_PEER AttachType = 55
|
||||
__MAX_BPF_ATTACH_TYPE AttachType = 56
|
||||
)
|
||||
|
||||
type Cmd uint32
|
||||
@@ -351,7 +358,8 @@ const (
|
||||
BPF_LINK_TYPE_NETFILTER LinkType = 10
|
||||
BPF_LINK_TYPE_TCX LinkType = 11
|
||||
BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12
|
||||
MAX_BPF_LINK_TYPE LinkType = 13
|
||||
BPF_LINK_TYPE_NETKIT LinkType = 13
|
||||
__MAX_BPF_LINK_TYPE LinkType = 14
|
||||
)
|
||||
|
||||
type MapType uint32
|
||||
@@ -379,6 +387,7 @@ const (
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19
|
||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21
|
||||
BPF_MAP_TYPE_QUEUE MapType = 22
|
||||
BPF_MAP_TYPE_STACK MapType = 23
|
||||
@@ -393,6 +402,18 @@ const (
|
||||
BPF_MAP_TYPE_CGRP_STORAGE MapType = 32
|
||||
)
|
||||
|
||||
type PerfEventType uint32
|
||||
|
||||
const (
|
||||
BPF_PERF_EVENT_UNSPEC PerfEventType = 0
|
||||
BPF_PERF_EVENT_UPROBE PerfEventType = 1
|
||||
BPF_PERF_EVENT_URETPROBE PerfEventType = 2
|
||||
BPF_PERF_EVENT_KPROBE PerfEventType = 3
|
||||
BPF_PERF_EVENT_KRETPROBE PerfEventType = 4
|
||||
BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5
|
||||
BPF_PERF_EVENT_EVENT PerfEventType = 6
|
||||
)
|
||||
|
||||
type ProgType uint32
|
||||
|
||||
const (
|
||||
@@ -462,6 +483,15 @@ const (
|
||||
BPF_STATS_RUN_TIME StatsType = 0
|
||||
)
|
||||
|
||||
type TcxActionBase int32
|
||||
|
||||
const (
|
||||
TCX_NEXT TcxActionBase = -1
|
||||
TCX_PASS TcxActionBase = 0
|
||||
TCX_DROP TcxActionBase = 2
|
||||
TCX_REDIRECT TcxActionBase = 7
|
||||
)
|
||||
|
||||
type XdpAction uint32
|
||||
|
||||
const (
|
||||
@@ -498,7 +528,7 @@ type LinkInfo struct {
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Extra [32]uint8
|
||||
Extra [48]uint8
|
||||
}
|
||||
|
||||
type MapInfo struct {
|
||||
@@ -702,6 +732,45 @@ func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) {
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreateNetfilterAttr struct {
|
||||
ProgFd uint32
|
||||
TargetFd uint32
|
||||
AttachType AttachType
|
||||
Flags uint32
|
||||
Pf uint32
|
||||
Hooknum uint32
|
||||
Priority int32
|
||||
NetfilterFlags uint32
|
||||
_ [32]byte
|
||||
}
|
||||
|
||||
func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreateNetkitAttr struct {
|
||||
ProgFd uint32
|
||||
TargetIfindex uint32
|
||||
AttachType AttachType
|
||||
Flags uint32
|
||||
RelativeFdOrId uint32
|
||||
_ [4]byte
|
||||
ExpectedRevision uint64
|
||||
_ [32]byte
|
||||
}
|
||||
|
||||
func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreatePerfEventAttr struct {
|
||||
ProgFd uint32
|
||||
TargetFd uint32
|
||||
@@ -719,6 +788,25 @@ func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreateTcxAttr struct {
|
||||
ProgFd uint32
|
||||
TargetIfindex uint32
|
||||
AttachType AttachType
|
||||
Flags uint32
|
||||
RelativeFdOrId uint32
|
||||
_ [4]byte
|
||||
ExpectedRevision uint64
|
||||
_ [32]byte
|
||||
}
|
||||
|
||||
func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreateTracingAttr struct {
|
||||
ProgFd uint32
|
||||
TargetFd uint32
|
||||
@@ -738,6 +826,49 @@ func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) {
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkCreateUprobeMultiAttr struct {
|
||||
ProgFd uint32
|
||||
TargetFd uint32
|
||||
AttachType AttachType
|
||||
Flags uint32
|
||||
Path Pointer
|
||||
Offsets Pointer
|
||||
RefCtrOffsets Pointer
|
||||
Cookies Pointer
|
||||
Count uint32
|
||||
UprobeMultiFlags uint32
|
||||
Pid uint32
|
||||
_ [4]byte
|
||||
}
|
||||
|
||||
func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkGetFdByIdAttr struct{ Id LinkID }
|
||||
|
||||
func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) {
|
||||
fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFD(int(fd))
|
||||
}
|
||||
|
||||
type LinkGetNextIdAttr struct {
|
||||
Id LinkID
|
||||
NextId LinkID
|
||||
}
|
||||
|
||||
func LinkGetNextId(attr *LinkGetNextIdAttr) error {
|
||||
_, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
|
||||
return err
|
||||
}
|
||||
|
||||
type LinkUpdateAttr struct {
|
||||
LinkFd uint32
|
||||
NewProgFd uint32
|
||||
@@ -971,12 +1102,12 @@ func ObjPin(attr *ObjPinAttr) error {
|
||||
}
|
||||
|
||||
type ProgAttachAttr struct {
|
||||
TargetFd uint32
|
||||
TargetFdOrIfindex uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
AttachFlags uint32
|
||||
ReplaceBpfFd uint32
|
||||
RelativeFd uint32
|
||||
RelativeFdOrId uint32
|
||||
ExpectedRevision uint64
|
||||
}
|
||||
|
||||
@@ -997,9 +1128,13 @@ func ProgBindMap(attr *ProgBindMapAttr) error {
|
||||
}
|
||||
|
||||
type ProgDetachAttr struct {
|
||||
TargetFd uint32
|
||||
TargetFdOrIfindex uint32
|
||||
AttachBpfFd uint32
|
||||
AttachType uint32
|
||||
AttachFlags uint32
|
||||
_ [4]byte
|
||||
RelativeFdOrId uint32
|
||||
ExpectedRevision uint64
|
||||
}
|
||||
|
||||
func ProgDetach(attr *ProgDetachAttr) error {
|
||||
@@ -1065,12 +1200,12 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
|
||||
}
|
||||
|
||||
type ProgQueryAttr struct {
|
||||
TargetFd uint32
|
||||
TargetFdOrIfindex uint32
|
||||
AttachType AttachType
|
||||
QueryFlags uint32
|
||||
AttachFlags uint32
|
||||
ProgIds Pointer
|
||||
ProgCount uint32
|
||||
Count uint32
|
||||
_ [4]byte
|
||||
ProgAttachFlags Pointer
|
||||
LinkIds Pointer
|
||||
@@ -1122,31 +1257,127 @@ func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) {
|
||||
}
|
||||
|
||||
type CgroupLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
CgroupId uint64
|
||||
AttachType AttachType
|
||||
_ [4]byte
|
||||
_ [36]byte
|
||||
}
|
||||
|
||||
type IterLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
TargetName Pointer
|
||||
TargetNameLen uint32
|
||||
}
|
||||
|
||||
type KprobeLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
PerfEventType PerfEventType
|
||||
_ [4]byte
|
||||
FuncName Pointer
|
||||
NameLen uint32
|
||||
Offset uint32
|
||||
Addr uint64
|
||||
Missed uint64
|
||||
_ [8]byte
|
||||
}
|
||||
|
||||
type KprobeMultiLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Addrs Pointer
|
||||
Count uint32
|
||||
Flags uint32
|
||||
Missed uint64
|
||||
_ [24]byte
|
||||
}
|
||||
|
||||
type NetNsLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
NetnsIno uint32
|
||||
AttachType AttachType
|
||||
_ [40]byte
|
||||
}
|
||||
|
||||
type NetfilterLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Pf uint32
|
||||
Hooknum uint32
|
||||
Priority int32
|
||||
Flags uint32
|
||||
_ [32]byte
|
||||
}
|
||||
|
||||
type NetkitLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Ifindex uint32
|
||||
AttachType AttachType
|
||||
_ [40]byte
|
||||
}
|
||||
|
||||
type PerfEventLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
PerfEventType PerfEventType
|
||||
}
|
||||
|
||||
type RawTracepointLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
TpName Pointer
|
||||
TpNameLen uint32
|
||||
_ [36]byte
|
||||
}
|
||||
|
||||
type TcxLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Ifindex uint32
|
||||
AttachType AttachType
|
||||
_ [40]byte
|
||||
}
|
||||
|
||||
type TracingLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
AttachType AttachType
|
||||
TargetObjId uint32
|
||||
TargetBtfId TypeID
|
||||
_ [36]byte
|
||||
}
|
||||
|
||||
type XDPLinkInfo struct{ Ifindex uint32 }
|
||||
type XDPLinkInfo struct {
|
||||
Type LinkType
|
||||
Id LinkID
|
||||
ProgId uint32
|
||||
_ [4]byte
|
||||
Ifindex uint32
|
||||
_ [44]byte
|
||||
}
|
||||
|
||||
6
vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
generated
vendored
6
vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
generated
vendored
@@ -32,6 +32,7 @@ func UnsafeBuffer(ptr unsafe.Pointer) Buffer {
|
||||
|
||||
// SyscallOutput prepares a Buffer for a syscall to write into.
|
||||
//
|
||||
// size is the length of the desired buffer in bytes.
|
||||
// The buffer may point at the underlying memory of dst, in which case [Unmarshal]
|
||||
// becomes a no-op.
|
||||
//
|
||||
@@ -53,6 +54,11 @@ func (b Buffer) CopyTo(dst []byte) int {
|
||||
return copy(dst, b.unsafeBytes())
|
||||
}
|
||||
|
||||
// AppendTo appends the buffer onto dst.
|
||||
func (b Buffer) AppendTo(dst []byte) []byte {
|
||||
return append(dst, b.unsafeBytes()...)
|
||||
}
|
||||
|
||||
// Pointer returns the location where a syscall should write.
|
||||
func (b Buffer) Pointer() sys.Pointer {
|
||||
// NB: This deliberately ignores b.length to support zero-copy
|
||||
|
||||
3
vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
generated
vendored
3
vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
generated
vendored
@@ -7,12 +7,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// Marshal turns data into a byte slice using the system's native endianness.
|
||||
|
||||
3
vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
generated
vendored
3
vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
@@ -110,7 +111,7 @@ func sanitizeTracefsPath(path ...string) (string, error) {
|
||||
// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing,
|
||||
// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted.
|
||||
// The available tracefs paths will depends on distribution choices.
|
||||
var getTracefsPath = internal.Memoize(func() (string, error) {
|
||||
var getTracefsPath = sync.OnceValues(func() (string, error) {
|
||||
for _, p := range []struct {
|
||||
path string
|
||||
fsType int64
|
||||
|
||||
12
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
12
vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
generated
vendored
@@ -25,6 +25,7 @@ const (
|
||||
EACCES = linux.EACCES
|
||||
EILSEQ = linux.EILSEQ
|
||||
EOPNOTSUPP = linux.EOPNOTSUPP
|
||||
ESTALE = linux.ESTALE
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,6 +40,8 @@ const (
|
||||
BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
|
||||
BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
|
||||
BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN
|
||||
BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN
|
||||
BPF_F_LOCK = linux.BPF_F_LOCK
|
||||
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
|
||||
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
|
||||
BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
|
||||
@@ -98,6 +101,7 @@ type PerfEventMmapPage = linux.PerfEventMmapPage
|
||||
type EpollEvent = linux.EpollEvent
|
||||
type PerfEventAttr = linux.PerfEventAttr
|
||||
type Utsname = linux.Utsname
|
||||
type CPUSet = linux.CPUSet
|
||||
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
|
||||
return linux.Syscall(trap, a1, a2, a3)
|
||||
@@ -202,3 +206,11 @@ func Fstat(fd int, stat *Stat_t) error {
|
||||
func SetsockoptInt(fd, level, opt, value int) error {
|
||||
return linux.SetsockoptInt(fd, level, opt, value)
|
||||
}
|
||||
|
||||
func SchedSetaffinity(pid int, set *CPUSet) error {
|
||||
return linux.SchedSetaffinity(pid, set)
|
||||
}
|
||||
|
||||
func SchedGetaffinity(pid int, set *CPUSet) error {
|
||||
return linux.SchedGetaffinity(pid, set)
|
||||
}
|
||||
|
||||
15
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
15
vendor/github.com/cilium/ebpf/internal/unix/types_other.go
generated
vendored
@@ -27,6 +27,7 @@ const (
|
||||
EACCES
|
||||
EILSEQ
|
||||
EOPNOTSUPP
|
||||
ESTALE
|
||||
)
|
||||
|
||||
// Constants are distinct to avoid breaking switch statements.
|
||||
@@ -41,6 +42,7 @@ const (
|
||||
BPF_F_MMAPABLE
|
||||
BPF_F_INNER_MAP
|
||||
BPF_F_KPROBE_MULTI_RETURN
|
||||
BPF_F_UPROBE_MULTI_RETURN
|
||||
BPF_F_XDP_HAS_FRAGS
|
||||
BPF_OBJ_NAME_LEN
|
||||
BPF_TAG_SIZE
|
||||
@@ -91,6 +93,7 @@ const (
|
||||
DEBUGFS_MAGIC
|
||||
BPF_RB_NO_WAKEUP
|
||||
BPF_RB_FORCE_WAKEUP
|
||||
BPF_F_LOCK
|
||||
)
|
||||
|
||||
type Statfs_t struct {
|
||||
@@ -294,3 +297,15 @@ func Fstat(fd int, stat *Stat_t) error {
|
||||
func SetsockoptInt(fd, level, opt, value int) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
type CPUSet struct{}
|
||||
|
||||
func (*CPUSet) Set(int) {}
|
||||
|
||||
func SchedSetaffinity(pid int, set *CPUSet) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
func SchedGetaffinity(pid int, set *CPUSet) error {
|
||||
return errNonLinux
|
||||
}
|
||||
|
||||
50
vendor/github.com/cilium/ebpf/internal/vdso.go
generated
vendored
50
vendor/github.com/cilium/ebpf/internal/vdso.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
@@ -20,21 +19,14 @@ var (
|
||||
// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library
|
||||
// linked into the current process image.
|
||||
func vdsoVersion() (uint32, error) {
|
||||
const uintptrIs32bits = unsafe.Sizeof((uintptr)(0)) == 4
|
||||
|
||||
// Read data from the auxiliary vector, which is normally passed directly
|
||||
// to the process. Go does not expose that data, so we must read it from procfs.
|
||||
// https://man7.org/linux/man-pages/man3/getauxval.3.html
|
||||
av, err := os.Open("/proc/self/auxv")
|
||||
if errors.Is(err, unix.EACCES) {
|
||||
return 0, fmt.Errorf("opening auxv: %w (process may not be dumpable due to file capabilities)", err)
|
||||
}
|
||||
av, err := newAuxvRuntimeReader()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("opening auxv: %w", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
defer av.Close()
|
||||
|
||||
vdsoAddr, err := vdsoMemoryAddress(av, NativeEndian, uintptrIs32bits)
|
||||
vdsoAddr, err := vdsoMemoryAddress(av)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("finding vDSO memory address: %w", err)
|
||||
}
|
||||
@@ -55,43 +47,13 @@ func vdsoVersion() (uint32, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type auxvPair32 struct {
|
||||
Tag, Value uint32
|
||||
}
|
||||
|
||||
type auxvPair64 struct {
|
||||
Tag, Value uint64
|
||||
}
|
||||
|
||||
func readAuxvPair(r io.Reader, order binary.ByteOrder, uintptrIs32bits bool) (tag, value uint64, _ error) {
|
||||
if uintptrIs32bits {
|
||||
var aux auxvPair32
|
||||
if err := binary.Read(r, order, &aux); err != nil {
|
||||
return 0, 0, fmt.Errorf("reading auxv entry: %w", err)
|
||||
}
|
||||
return uint64(aux.Tag), uint64(aux.Value), nil
|
||||
}
|
||||
|
||||
var aux auxvPair64
|
||||
if err := binary.Read(r, order, &aux); err != nil {
|
||||
return 0, 0, fmt.Errorf("reading auxv entry: %w", err)
|
||||
}
|
||||
return aux.Tag, aux.Value, nil
|
||||
}
|
||||
|
||||
// vdsoMemoryAddress returns the memory address of the vDSO library
|
||||
// linked into the current process image. r is an io.Reader into an auxv blob.
|
||||
func vdsoMemoryAddress(r io.Reader, order binary.ByteOrder, uintptrIs32bits bool) (uintptr, error) {
|
||||
// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h
|
||||
const (
|
||||
_AT_NULL = 0 // End of vector
|
||||
_AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image
|
||||
)
|
||||
|
||||
func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) {
|
||||
// Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`,
|
||||
// the address of a page containing the virtual Dynamic Shared Object (vDSO).
|
||||
for {
|
||||
tag, value, err := readAuxvPair(r, order, uintptrIs32bits)
|
||||
tag, value, err := r.ReadAuxvPair()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
3
vendor/github.com/cilium/ebpf/internal/version.go
generated
vendored
3
vendor/github.com/cilium/ebpf/internal/version.go
generated
vendored
@@ -2,6 +2,7 @@ package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
@@ -79,7 +80,7 @@ func (v Version) Kernel() uint32 {
|
||||
}
|
||||
|
||||
// KernelVersion returns the version of the currently running kernel.
|
||||
var KernelVersion = Memoize(func() (Version, error) {
|
||||
var KernelVersion = sync.OnceValues(func() (Version, error) {
|
||||
return detectKernelVersion()
|
||||
})
|
||||
|
||||
|
||||
137
vendor/github.com/cilium/ebpf/link/anchor.go
generated
vendored
Normal file
137
vendor/github.com/cilium/ebpf/link/anchor.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
const anchorFlags = sys.BPF_F_REPLACE |
|
||||
sys.BPF_F_BEFORE |
|
||||
sys.BPF_F_AFTER |
|
||||
sys.BPF_F_ID |
|
||||
sys.BPF_F_LINK_MPROG
|
||||
|
||||
// Anchor is a reference to a link or program.
|
||||
//
|
||||
// It is used to describe where an attachment or detachment should take place
|
||||
// for link types which support multiple attachment.
|
||||
type Anchor interface {
|
||||
// anchor returns an fd or ID and a set of flags.
|
||||
//
|
||||
// By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG
|
||||
// changes this to refer to a link instead.
|
||||
//
|
||||
// BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program
|
||||
// is attached. The default behaviour if none of these flags is specified
|
||||
// matches BPF_F_AFTER.
|
||||
anchor() (fdOrID, flags uint32, _ error)
|
||||
}
|
||||
|
||||
type firstAnchor struct{}
|
||||
|
||||
func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) {
|
||||
return 0, sys.BPF_F_BEFORE, nil
|
||||
}
|
||||
|
||||
// Head is the position before all other programs or links.
|
||||
func Head() Anchor {
|
||||
return firstAnchor{}
|
||||
}
|
||||
|
||||
type lastAnchor struct{}
|
||||
|
||||
func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) {
|
||||
return 0, sys.BPF_F_AFTER, nil
|
||||
}
|
||||
|
||||
// Tail is the position after all other programs or links.
|
||||
func Tail() Anchor {
|
||||
return lastAnchor{}
|
||||
}
|
||||
|
||||
// Before is the position just in front of target.
|
||||
func BeforeLink(target Link) Anchor {
|
||||
return anchor{target, sys.BPF_F_BEFORE}
|
||||
}
|
||||
|
||||
// After is the position just after target.
|
||||
func AfterLink(target Link) Anchor {
|
||||
return anchor{target, sys.BPF_F_AFTER}
|
||||
}
|
||||
|
||||
// Before is the position just in front of target.
|
||||
func BeforeLinkByID(target ID) Anchor {
|
||||
return anchor{target, sys.BPF_F_BEFORE}
|
||||
}
|
||||
|
||||
// After is the position just after target.
|
||||
func AfterLinkByID(target ID) Anchor {
|
||||
return anchor{target, sys.BPF_F_AFTER}
|
||||
}
|
||||
|
||||
// Before is the position just in front of target.
|
||||
func BeforeProgram(target *ebpf.Program) Anchor {
|
||||
return anchor{target, sys.BPF_F_BEFORE}
|
||||
}
|
||||
|
||||
// After is the position just after target.
|
||||
func AfterProgram(target *ebpf.Program) Anchor {
|
||||
return anchor{target, sys.BPF_F_AFTER}
|
||||
}
|
||||
|
||||
// Replace the target itself.
|
||||
func ReplaceProgram(target *ebpf.Program) Anchor {
|
||||
return anchor{target, sys.BPF_F_REPLACE}
|
||||
}
|
||||
|
||||
// Before is the position just in front of target.
|
||||
func BeforeProgramByID(target ebpf.ProgramID) Anchor {
|
||||
return anchor{target, sys.BPF_F_BEFORE}
|
||||
}
|
||||
|
||||
// After is the position just after target.
|
||||
func AfterProgramByID(target ebpf.ProgramID) Anchor {
|
||||
return anchor{target, sys.BPF_F_AFTER}
|
||||
}
|
||||
|
||||
// Replace the target itself.
|
||||
func ReplaceProgramByID(target ebpf.ProgramID) Anchor {
|
||||
return anchor{target, sys.BPF_F_REPLACE}
|
||||
}
|
||||
|
||||
type anchor struct {
|
||||
target any
|
||||
position uint32
|
||||
}
|
||||
|
||||
func (ap anchor) anchor() (fdOrID, flags uint32, _ error) {
|
||||
var typeFlag uint32
|
||||
switch target := ap.target.(type) {
|
||||
case *ebpf.Program:
|
||||
fd := target.FD()
|
||||
if fd < 0 {
|
||||
return 0, 0, sys.ErrClosedFd
|
||||
}
|
||||
fdOrID = uint32(fd)
|
||||
typeFlag = 0
|
||||
case ebpf.ProgramID:
|
||||
fdOrID = uint32(target)
|
||||
typeFlag = sys.BPF_F_ID
|
||||
case interface{ FD() int }:
|
||||
fd := target.FD()
|
||||
if fd < 0 {
|
||||
return 0, 0, sys.ErrClosedFd
|
||||
}
|
||||
fdOrID = uint32(fd)
|
||||
typeFlag = sys.BPF_F_LINK_MPROG
|
||||
case ID:
|
||||
fdOrID = uint32(target)
|
||||
typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("invalid target %T", ap.target)
|
||||
}
|
||||
|
||||
return fdOrID, ap.position | typeFlag, nil
|
||||
}
|
||||
22
vendor/github.com/cilium/ebpf/link/cgroup.go
generated
vendored
22
vendor/github.com/cilium/ebpf/link/cgroup.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
type cgroupAttachFlags uint32
|
||||
@@ -143,8 +144,7 @@ func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
|
||||
// Atomically replacing multiple programs requires at least
|
||||
// 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
|
||||
// program in MULTI mode")
|
||||
args.Flags |= uint32(flagReplace)
|
||||
args.Replace = cg.current
|
||||
args.Anchor = ReplaceProgram(cg.current)
|
||||
}
|
||||
|
||||
if err := RawAttachProgram(args); err != nil {
|
||||
@@ -188,3 +188,21 @@ func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program)
|
||||
|
||||
return &linkCgroup{*link}, err
|
||||
}
|
||||
|
||||
func (cg *linkCgroup) Info() (*Info, error) {
|
||||
var info sys.CgroupLinkInfo
|
||||
if err := sys.ObjInfo(cg.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("cgroup link info: %s", err)
|
||||
}
|
||||
extra := &CgroupInfo{
|
||||
CgroupId: info.CgroupId,
|
||||
AttachType: info.AttachType,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
10
vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
10
vendor/github.com/cilium/ebpf/link/kprobe.go
generated
vendored
@@ -59,6 +59,8 @@ func (ko *KprobeOptions) cookie() uint64 {
|
||||
// If attaching to symbol fails, automatically retries with the running
|
||||
// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
|
||||
// in a portable fashion.
|
||||
//
|
||||
// The returned Link may implement [PerfEvent].
|
||||
func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
|
||||
k, err := kprobe(symbol, prog, opts, false)
|
||||
if err != nil {
|
||||
@@ -90,6 +92,8 @@ func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error
|
||||
//
|
||||
// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
|
||||
// incorrectly returns unix.EINVAL instead of os.ErrNotExist.
|
||||
//
|
||||
// The returned Link may implement [PerfEvent].
|
||||
func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
|
||||
k, err := kprobe(symbol, prog, opts, true)
|
||||
if err != nil {
|
||||
@@ -274,7 +278,11 @@ func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
|
||||
}
|
||||
}
|
||||
|
||||
rawFd, err := unix.PerfEventOpen(&attr, args.Pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||
cpu := 0
|
||||
if args.Pid != perfAllThreads {
|
||||
cpu = -1
|
||||
}
|
||||
rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||
|
||||
// On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
|
||||
// return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
|
||||
|
||||
19
vendor/github.com/cilium/ebpf/link/kprobe_multi.go
generated
vendored
19
vendor/github.com/cilium/ebpf/link/kprobe_multi.go
generated
vendored
@@ -130,12 +130,23 @@ func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error {
|
||||
return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (kml *kprobeMultiLink) Pin(string) error {
|
||||
return fmt.Errorf("pin kprobe_multi: %w", ErrNotSupported)
|
||||
func (kml *kprobeMultiLink) Info() (*Info, error) {
|
||||
var info sys.KprobeMultiLinkInfo
|
||||
if err := sys.ObjInfo(kml.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("kprobe multi link info: %s", err)
|
||||
}
|
||||
extra := &KprobeMultiInfo{
|
||||
count: info.Count,
|
||||
flags: info.Flags,
|
||||
missed: info.Missed,
|
||||
}
|
||||
|
||||
func (kml *kprobeMultiLink) Unpin() error {
|
||||
return fmt.Errorf("unpin kprobe_multi: %w", ErrNotSupported)
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
|
||||
|
||||
264
vendor/github.com/cilium/ebpf/link/link.go
generated
vendored
264
vendor/github.com/cilium/ebpf/link/link.go
generated
vendored
@@ -1,9 +1,9 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/btf"
|
||||
@@ -48,8 +48,15 @@ type Link interface {
|
||||
|
||||
// NewLinkFromFD creates a link from a raw fd.
|
||||
//
|
||||
// You should not use fd after calling this function.
|
||||
// Deprecated: use [NewFromFD] instead.
|
||||
func NewLinkFromFD(fd int) (Link, error) {
|
||||
return NewFromFD(fd)
|
||||
}
|
||||
|
||||
// NewFromFD creates a link from a raw fd.
|
||||
//
|
||||
// You should not use fd after calling this function.
|
||||
func NewFromFD(fd int) (Link, error) {
|
||||
sysFD, err := sys.NewFD(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -58,6 +65,19 @@ func NewLinkFromFD(fd int) (Link, error) {
|
||||
return wrapRawLink(&RawLink{fd: sysFD})
|
||||
}
|
||||
|
||||
// NewFromID returns the link associated with the given id.
|
||||
//
|
||||
// Returns ErrNotExist if there is no link with the given id.
|
||||
func NewFromID(id ID) (Link, error) {
|
||||
getFdAttr := &sys.LinkGetFdByIdAttr{Id: id}
|
||||
fd, err := sys.LinkGetFdById(getFdAttr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get link fd from ID %d: %w", id, err)
|
||||
}
|
||||
|
||||
return wrapRawLink(&RawLink{fd, ""})
|
||||
}
|
||||
|
||||
// LoadPinnedLink loads a link that was persisted into a bpffs.
|
||||
func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
|
||||
raw, err := loadPinnedRawLink(fileName, opts)
|
||||
@@ -96,8 +116,18 @@ func wrapRawLink(raw *RawLink) (_ Link, err error) {
|
||||
return &NetNsLink{*raw}, nil
|
||||
case KprobeMultiType:
|
||||
return &kprobeMultiLink{*raw}, nil
|
||||
case UprobeMultiType:
|
||||
return &uprobeMultiLink{*raw}, nil
|
||||
case PerfEventType:
|
||||
return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported)
|
||||
return &perfEventLink{*raw, nil}, nil
|
||||
case TCXType:
|
||||
return &tcxLink{*raw}, nil
|
||||
case NetfilterType:
|
||||
return &netfilterLink{*raw}, nil
|
||||
case NetkitType:
|
||||
return &netkitLink{*raw}, nil
|
||||
case XDPType:
|
||||
return &xdpLink{*raw}, nil
|
||||
default:
|
||||
return raw, nil
|
||||
}
|
||||
@@ -128,10 +158,85 @@ type Info struct {
|
||||
extra interface{}
|
||||
}
|
||||
|
||||
type TracingInfo sys.TracingLinkInfo
|
||||
type CgroupInfo sys.CgroupLinkInfo
|
||||
type NetNsInfo sys.NetNsLinkInfo
|
||||
type XDPInfo sys.XDPLinkInfo
|
||||
type TracingInfo struct {
|
||||
AttachType sys.AttachType
|
||||
TargetObjId uint32
|
||||
TargetBtfId sys.TypeID
|
||||
}
|
||||
|
||||
type CgroupInfo struct {
|
||||
CgroupId uint64
|
||||
AttachType sys.AttachType
|
||||
_ [4]byte
|
||||
}
|
||||
|
||||
type NetNsInfo struct {
|
||||
NetnsIno uint32
|
||||
AttachType sys.AttachType
|
||||
}
|
||||
|
||||
type TCXInfo struct {
|
||||
Ifindex uint32
|
||||
AttachType sys.AttachType
|
||||
}
|
||||
|
||||
type XDPInfo struct {
|
||||
Ifindex uint32
|
||||
}
|
||||
|
||||
type NetfilterInfo struct {
|
||||
Pf uint32
|
||||
Hooknum uint32
|
||||
Priority int32
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
type NetkitInfo struct {
|
||||
Ifindex uint32
|
||||
AttachType sys.AttachType
|
||||
}
|
||||
|
||||
type KprobeMultiInfo struct {
|
||||
count uint32
|
||||
flags uint32
|
||||
missed uint64
|
||||
}
|
||||
|
||||
// AddressCount is the number of addresses hooked by the kprobe.
|
||||
func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) {
|
||||
return kpm.count, kpm.count > 0
|
||||
}
|
||||
|
||||
func (kpm *KprobeMultiInfo) Flags() (uint32, bool) {
|
||||
return kpm.flags, kpm.count > 0
|
||||
}
|
||||
|
||||
func (kpm *KprobeMultiInfo) Missed() (uint64, bool) {
|
||||
return kpm.missed, kpm.count > 0
|
||||
}
|
||||
|
||||
type PerfEventInfo struct {
|
||||
Type sys.PerfEventType
|
||||
extra interface{}
|
||||
}
|
||||
|
||||
func (r *PerfEventInfo) Kprobe() *KprobeInfo {
|
||||
e, _ := r.extra.(*KprobeInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
type KprobeInfo struct {
|
||||
address uint64
|
||||
missed uint64
|
||||
}
|
||||
|
||||
func (kp *KprobeInfo) Address() (uint64, bool) {
|
||||
return kp.address, kp.address > 0
|
||||
}
|
||||
|
||||
func (kp *KprobeInfo) Missed() (uint64, bool) {
|
||||
return kp.missed, kp.address > 0
|
||||
}
|
||||
|
||||
// Tracing returns tracing type-specific link info.
|
||||
//
|
||||
@@ -157,7 +262,7 @@ func (r Info) NetNs() *NetNsInfo {
|
||||
return e
|
||||
}
|
||||
|
||||
// ExtraNetNs returns XDP type-specific link info.
|
||||
// XDP returns XDP type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) XDP() *XDPInfo {
|
||||
@@ -165,6 +270,46 @@ func (r Info) XDP() *XDPInfo {
|
||||
return e
|
||||
}
|
||||
|
||||
// TCX returns TCX type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) TCX() *TCXInfo {
|
||||
e, _ := r.extra.(*TCXInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
// Netfilter returns netfilter type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) Netfilter() *NetfilterInfo {
|
||||
e, _ := r.extra.(*NetfilterInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
// Netkit returns netkit type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) Netkit() *NetkitInfo {
|
||||
e, _ := r.extra.(*NetkitInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
// KprobeMulti returns kprobe-multi type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) KprobeMulti() *KprobeMultiInfo {
|
||||
e, _ := r.extra.(*KprobeMultiInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
// PerfEvent returns perf-event type-specific link info.
|
||||
//
|
||||
// Returns nil if the type-specific link info isn't available.
|
||||
func (r Info) PerfEvent() *PerfEventInfo {
|
||||
e, _ := r.extra.(*PerfEventInfo)
|
||||
return e
|
||||
}
|
||||
|
||||
// RawLink is the low-level API to bpf_link.
|
||||
//
|
||||
// You should consider using the higher level interfaces in this
|
||||
@@ -295,6 +440,9 @@ func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
|
||||
}
|
||||
|
||||
// Info returns metadata about the link.
|
||||
//
|
||||
// Linktype specific metadata is not included and can be retrieved
|
||||
// via the linktype specific Info() method.
|
||||
func (l *RawLink) Info() (*Info, error) {
|
||||
var info sys.LinkInfo
|
||||
|
||||
@@ -302,35 +450,81 @@ func (l *RawLink) Info() (*Info, error) {
|
||||
return nil, fmt.Errorf("link info: %s", err)
|
||||
}
|
||||
|
||||
var extra interface{}
|
||||
switch info.Type {
|
||||
case CgroupType:
|
||||
extra = &CgroupInfo{}
|
||||
case NetNsType:
|
||||
extra = &NetNsInfo{}
|
||||
case TracingType:
|
||||
extra = &TracingInfo{}
|
||||
case XDPType:
|
||||
extra = &XDPInfo{}
|
||||
case RawTracepointType, IterType,
|
||||
PerfEventType, KprobeMultiType:
|
||||
// Extra metadata not supported.
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown link info type: %d", info.Type)
|
||||
}
|
||||
|
||||
if extra != nil {
|
||||
buf := bytes.NewReader(info.Extra[:])
|
||||
err := binary.Read(buf, internal.NativeEndian, extra)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read extra link info: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Iterator allows iterating over links attached into the kernel.
|
||||
type Iterator struct {
|
||||
// The ID of the current link. Only valid after a call to Next
|
||||
ID ID
|
||||
// The current link. Only valid until a call to Next.
|
||||
// See Take if you want to retain the link.
|
||||
Link Link
|
||||
err error
|
||||
}
|
||||
|
||||
// Next retrieves the next link.
|
||||
//
|
||||
// Returns true if another link was found. Call [Iterator.Err] after the function returns false.
|
||||
func (it *Iterator) Next() bool {
|
||||
id := it.ID
|
||||
for {
|
||||
getIdAttr := &sys.LinkGetNextIdAttr{Id: id}
|
||||
err := sys.LinkGetNextId(getIdAttr)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// There are no more links.
|
||||
break
|
||||
} else if err != nil {
|
||||
it.err = fmt.Errorf("get next link ID: %w", err)
|
||||
break
|
||||
}
|
||||
|
||||
id = getIdAttr.NextId
|
||||
l, err := NewFromID(id)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Couldn't load the link fast enough. Try next ID.
|
||||
continue
|
||||
} else if err != nil {
|
||||
it.err = fmt.Errorf("get link for ID %d: %w", id, err)
|
||||
break
|
||||
}
|
||||
|
||||
if it.Link != nil {
|
||||
it.Link.Close()
|
||||
}
|
||||
it.ID, it.Link = id, l
|
||||
return true
|
||||
}
|
||||
|
||||
// No more links or we encountered an error.
|
||||
if it.Link != nil {
|
||||
it.Link.Close()
|
||||
}
|
||||
it.Link = nil
|
||||
return false
|
||||
}
|
||||
|
||||
// Take the ownership of the current link.
|
||||
//
|
||||
// It's the callers responsibility to close the link.
|
||||
func (it *Iterator) Take() Link {
|
||||
l := it.Link
|
||||
it.Link = nil
|
||||
return l
|
||||
}
|
||||
|
||||
// Err returns an error if iteration failed for some reason.
|
||||
func (it *Iterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
func (it *Iterator) Close() {
|
||||
if it.Link != nil {
|
||||
it.Link.Close()
|
||||
}
|
||||
}
|
||||
|
||||
90
vendor/github.com/cilium/ebpf/link/netfilter.go
generated
vendored
Normal file
90
vendor/github.com/cilium/ebpf/link/netfilter.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation
|
||||
|
||||
type NetfilterAttachFlags uint32
|
||||
|
||||
type NetfilterOptions struct {
|
||||
// Program must be a netfilter BPF program.
|
||||
Program *ebpf.Program
|
||||
// The protocol family.
|
||||
ProtocolFamily uint32
|
||||
// The number of the hook you are interested in.
|
||||
HookNumber uint32
|
||||
// Priority within hook
|
||||
Priority int32
|
||||
// Extra link flags
|
||||
Flags uint32
|
||||
// Netfilter flags
|
||||
NetfilterFlags NetfilterAttachFlags
|
||||
}
|
||||
|
||||
type netfilterLink struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
// AttachNetfilter links a netfilter BPF program to a netfilter hook.
|
||||
func AttachNetfilter(opts NetfilterOptions) (Link, error) {
|
||||
if opts.Program == nil {
|
||||
return nil, fmt.Errorf("netfilter program is nil")
|
||||
}
|
||||
|
||||
if t := opts.Program.Type(); t != ebpf.Netfilter {
|
||||
return nil, fmt.Errorf("invalid program type %s, expected netfilter", t)
|
||||
}
|
||||
|
||||
progFd := opts.Program.FD()
|
||||
if progFd < 0 {
|
||||
return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
|
||||
}
|
||||
|
||||
attr := sys.LinkCreateNetfilterAttr{
|
||||
ProgFd: uint32(opts.Program.FD()),
|
||||
AttachType: sys.BPF_NETFILTER,
|
||||
Flags: opts.Flags,
|
||||
Pf: uint32(opts.ProtocolFamily),
|
||||
Hooknum: uint32(opts.HookNumber),
|
||||
Priority: opts.Priority,
|
||||
NetfilterFlags: uint32(opts.NetfilterFlags),
|
||||
}
|
||||
|
||||
fd, err := sys.LinkCreateNetfilter(&attr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("attach netfilter link: %w", err)
|
||||
}
|
||||
|
||||
return &netfilterLink{RawLink{fd, ""}}, nil
|
||||
}
|
||||
|
||||
func (*netfilterLink) Update(new *ebpf.Program) error {
|
||||
return fmt.Errorf("netfilter update: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (nf *netfilterLink) Info() (*Info, error) {
|
||||
var info sys.NetfilterLinkInfo
|
||||
if err := sys.ObjInfo(nf.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("netfilter link info: %s", err)
|
||||
}
|
||||
extra := &NetfilterInfo{
|
||||
Pf: info.Pf,
|
||||
Hooknum: info.Hooknum,
|
||||
Priority: info.Priority,
|
||||
Flags: info.Flags,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ Link = (*netfilterLink)(nil)
|
||||
89
vendor/github.com/cilium/ebpf/link/netkit.go
generated
vendored
Normal file
89
vendor/github.com/cilium/ebpf/link/netkit.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
type NetkitOptions struct {
|
||||
// Index of the interface to attach to.
|
||||
Interface int
|
||||
// Program to attach.
|
||||
Program *ebpf.Program
|
||||
// One of the AttachNetkit* constants.
|
||||
Attach ebpf.AttachType
|
||||
// Attach relative to an anchor. Optional.
|
||||
Anchor Anchor
|
||||
// Only attach if the expected revision matches.
|
||||
ExpectedRevision uint64
|
||||
// Flags control the attach behaviour. Specify an Anchor instead of
|
||||
// F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional.
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
func AttachNetkit(opts NetkitOptions) (Link, error) {
|
||||
if opts.Interface < 0 {
|
||||
return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface)
|
||||
}
|
||||
|
||||
if opts.Flags&anchorFlags != 0 {
|
||||
return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target")
|
||||
}
|
||||
|
||||
attr := sys.LinkCreateNetkitAttr{
|
||||
ProgFd: uint32(opts.Program.FD()),
|
||||
AttachType: sys.AttachType(opts.Attach),
|
||||
TargetIfindex: uint32(opts.Interface),
|
||||
ExpectedRevision: opts.ExpectedRevision,
|
||||
Flags: opts.Flags,
|
||||
}
|
||||
|
||||
if opts.Anchor != nil {
|
||||
fdOrID, flags, err := opts.Anchor.anchor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("attach netkit link: %w", err)
|
||||
}
|
||||
|
||||
attr.RelativeFdOrId = fdOrID
|
||||
attr.Flags |= flags
|
||||
}
|
||||
|
||||
fd, err := sys.LinkCreateNetkit(&attr)
|
||||
runtime.KeepAlive(opts.Program)
|
||||
runtime.KeepAlive(opts.Anchor)
|
||||
if err != nil {
|
||||
if haveFeatErr := haveNetkit(); haveFeatErr != nil {
|
||||
return nil, haveFeatErr
|
||||
}
|
||||
return nil, fmt.Errorf("attach netkit link: %w", err)
|
||||
}
|
||||
|
||||
return &netkitLink{RawLink{fd, ""}}, nil
|
||||
}
|
||||
|
||||
type netkitLink struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
var _ Link = (*netkitLink)(nil)
|
||||
|
||||
func (netkit *netkitLink) Info() (*Info, error) {
|
||||
var info sys.NetkitLinkInfo
|
||||
if err := sys.ObjInfo(netkit.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("netkit link info: %s", err)
|
||||
}
|
||||
extra := &NetkitInfo{
|
||||
Ifindex: info.Ifindex,
|
||||
AttachType: info.AttachType,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
19
vendor/github.com/cilium/ebpf/link/netns.go
generated
vendored
19
vendor/github.com/cilium/ebpf/link/netns.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
// NetNsLink is a program attached to a network namespace.
|
||||
@@ -34,3 +35,21 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
|
||||
|
||||
return &NetNsLink{*link}, nil
|
||||
}
|
||||
|
||||
func (ns *NetNsLink) Info() (*Info, error) {
|
||||
var info sys.NetNsLinkInfo
|
||||
if err := sys.ObjInfo(ns.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("netns link info: %s", err)
|
||||
}
|
||||
extra := &NetNsInfo{
|
||||
NetnsIno: info.NetnsIno,
|
||||
AttachType: info.AttachType,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
102
vendor/github.com/cilium/ebpf/link/perf_event.go
generated
vendored
102
vendor/github.com/cilium/ebpf/link/perf_event.go
generated
vendored
@@ -3,6 +3,7 @@ package link
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
@@ -78,6 +79,18 @@ func (pe *perfEvent) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PerfEvent is implemented by some Link types which use a perf event under
|
||||
// the hood.
|
||||
type PerfEvent interface {
|
||||
// PerfEvent returns a file for the underlying perf event.
|
||||
//
|
||||
// It is the callers responsibility to close the returned file.
|
||||
//
|
||||
// Making changes to the associated perf event lead to
|
||||
// undefined behaviour.
|
||||
PerfEvent() (*os.File, error)
|
||||
}
|
||||
|
||||
// perfEventLink represents a bpf perf link.
|
||||
type perfEventLink struct {
|
||||
RawLink
|
||||
@@ -86,30 +99,16 @@ type perfEventLink struct {
|
||||
|
||||
func (pl *perfEventLink) isLink() {}
|
||||
|
||||
// Pinning requires the underlying perf event FD to stay open.
|
||||
//
|
||||
// | PerfEvent FD | BpfLink FD | Works |
|
||||
// |--------------|------------|-------|
|
||||
// | Open | Open | Yes |
|
||||
// | Closed | Open | No |
|
||||
// | Open | Closed | No (Pin() -> EINVAL) |
|
||||
// | Closed | Closed | No (Pin() -> EINVAL) |
|
||||
//
|
||||
// There is currently no pretty way to recover the perf event FD
|
||||
// when loading a pinned link, so leave as not supported for now.
|
||||
func (pl *perfEventLink) Pin(string) error {
|
||||
return fmt.Errorf("perf event link pin: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (pl *perfEventLink) Unpin() error {
|
||||
return fmt.Errorf("perf event link unpin: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (pl *perfEventLink) Close() error {
|
||||
if err := pl.fd.Close(); err != nil {
|
||||
return fmt.Errorf("perf link close: %w", err)
|
||||
}
|
||||
|
||||
// when created from pinned link
|
||||
if pl.pe == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pl.pe.Close(); err != nil {
|
||||
return fmt.Errorf("perf event close: %w", err)
|
||||
}
|
||||
@@ -120,6 +119,54 @@ func (pl *perfEventLink) Update(prog *ebpf.Program) error {
|
||||
return fmt.Errorf("perf event link update: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
var _ PerfEvent = (*perfEventLink)(nil)
|
||||
|
||||
func (pl *perfEventLink) PerfEvent() (*os.File, error) {
|
||||
// when created from pinned link
|
||||
if pl.pe == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
fd, err := pl.pe.fd.Dup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fd.File("perf-event"), nil
|
||||
}
|
||||
|
||||
func (pl *perfEventLink) Info() (*Info, error) {
|
||||
var info sys.PerfEventLinkInfo
|
||||
if err := sys.ObjInfo(pl.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("perf event link info: %s", err)
|
||||
}
|
||||
|
||||
var extra2 interface{}
|
||||
switch info.PerfEventType {
|
||||
case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE:
|
||||
var kprobeInfo sys.KprobeLinkInfo
|
||||
if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil {
|
||||
return nil, fmt.Errorf("kprobe link info: %s", err)
|
||||
}
|
||||
extra2 = &KprobeInfo{
|
||||
address: kprobeInfo.Addr,
|
||||
missed: kprobeInfo.Missed,
|
||||
}
|
||||
}
|
||||
|
||||
extra := &PerfEventInfo{
|
||||
Type: info.PerfEventType,
|
||||
extra: extra2,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// perfEventIoctl implements Link and handles the perf event lifecycle
|
||||
// via ioctl().
|
||||
type perfEventIoctl struct {
|
||||
@@ -154,6 +201,17 @@ func (pi *perfEventIoctl) Info() (*Info, error) {
|
||||
return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
var _ PerfEvent = (*perfEventIoctl)(nil)
|
||||
|
||||
func (pi *perfEventIoctl) PerfEvent() (*os.File, error) {
|
||||
fd, err := pi.fd.Dup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fd.File("perf-event"), nil
|
||||
}
|
||||
|
||||
// attach the given eBPF prog to the perf event stored in pe.
|
||||
// pe must contain a valid perf event fd.
|
||||
// prog's type must match the program type stored in pe.
|
||||
@@ -229,7 +287,11 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
|
||||
Wakeup: 1,
|
||||
}
|
||||
|
||||
fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||
cpu := 0
|
||||
if pid != perfAllThreads {
|
||||
cpu = -1
|
||||
}
|
||||
fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
|
||||
}
|
||||
|
||||
66
vendor/github.com/cilium/ebpf/link/program.go
generated
vendored
66
vendor/github.com/cilium/ebpf/link/program.go
generated
vendored
@@ -2,22 +2,27 @@ package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
type RawAttachProgramOptions struct {
|
||||
// File descriptor to attach to. This differs for each attach type.
|
||||
// Target to query. This is usually a file descriptor but may refer to
|
||||
// something else based on the attach type.
|
||||
Target int
|
||||
// Program to attach.
|
||||
Program *ebpf.Program
|
||||
// Program to replace (cgroups).
|
||||
Replace *ebpf.Program
|
||||
// Attach must match the attach type of Program (and Replace).
|
||||
// Attach must match the attach type of Program.
|
||||
Attach ebpf.AttachType
|
||||
// Flags control the attach behaviour. This differs for each attach type.
|
||||
// Attach relative to an anchor. Optional.
|
||||
Anchor Anchor
|
||||
// Flags control the attach behaviour. Specify an Anchor instead of
|
||||
// F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional.
|
||||
Flags uint32
|
||||
// Only attach if the internal revision matches the given value.
|
||||
ExpectedRevision uint64
|
||||
}
|
||||
|
||||
// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
|
||||
@@ -25,45 +30,72 @@ type RawAttachProgramOptions struct {
|
||||
// You should use one of the higher level abstractions available in this
|
||||
// package if possible.
|
||||
func RawAttachProgram(opts RawAttachProgramOptions) error {
|
||||
var replaceFd uint32
|
||||
if opts.Replace != nil {
|
||||
replaceFd = uint32(opts.Replace.FD())
|
||||
if opts.Flags&anchorFlags != 0 {
|
||||
return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
|
||||
}
|
||||
|
||||
attr := sys.ProgAttachAttr{
|
||||
TargetFd: uint32(opts.Target),
|
||||
TargetFdOrIfindex: uint32(opts.Target),
|
||||
AttachBpfFd: uint32(opts.Program.FD()),
|
||||
ReplaceBpfFd: replaceFd,
|
||||
AttachType: uint32(opts.Attach),
|
||||
AttachFlags: uint32(opts.Flags),
|
||||
ExpectedRevision: opts.ExpectedRevision,
|
||||
}
|
||||
|
||||
if opts.Anchor != nil {
|
||||
fdOrID, flags, err := opts.Anchor.anchor()
|
||||
if err != nil {
|
||||
return fmt.Errorf("attach program: %w", err)
|
||||
}
|
||||
|
||||
if flags == sys.BPF_F_REPLACE {
|
||||
// Ensure that replacing a program works on old kernels.
|
||||
attr.ReplaceBpfFd = fdOrID
|
||||
} else {
|
||||
attr.RelativeFdOrId = fdOrID
|
||||
attr.AttachFlags |= flags
|
||||
}
|
||||
}
|
||||
|
||||
if err := sys.ProgAttach(&attr); err != nil {
|
||||
if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
|
||||
return haveFeatErr
|
||||
}
|
||||
return fmt.Errorf("can't attach program: %w", err)
|
||||
return fmt.Errorf("attach program: %w", err)
|
||||
}
|
||||
runtime.KeepAlive(opts.Program)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type RawDetachProgramOptions struct {
|
||||
Target int
|
||||
Program *ebpf.Program
|
||||
Attach ebpf.AttachType
|
||||
}
|
||||
type RawDetachProgramOptions RawAttachProgramOptions
|
||||
|
||||
// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
|
||||
//
|
||||
// You should use one of the higher level abstractions available in this
|
||||
// package if possible.
|
||||
func RawDetachProgram(opts RawDetachProgramOptions) error {
|
||||
if opts.Flags&anchorFlags != 0 {
|
||||
return fmt.Errorf("disallowed flags: use Anchor to specify attach target")
|
||||
}
|
||||
|
||||
attr := sys.ProgDetachAttr{
|
||||
TargetFd: uint32(opts.Target),
|
||||
TargetFdOrIfindex: uint32(opts.Target),
|
||||
AttachBpfFd: uint32(opts.Program.FD()),
|
||||
AttachType: uint32(opts.Attach),
|
||||
ExpectedRevision: opts.ExpectedRevision,
|
||||
}
|
||||
|
||||
if opts.Anchor != nil {
|
||||
fdOrID, flags, err := opts.Anchor.anchor()
|
||||
if err != nil {
|
||||
return fmt.Errorf("detach program: %w", err)
|
||||
}
|
||||
|
||||
attr.RelativeFdOrId = fdOrID
|
||||
attr.AttachFlags |= flags
|
||||
}
|
||||
|
||||
if err := sys.ProgDetach(&attr); err != nil {
|
||||
if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
|
||||
return haveFeatErr
|
||||
|
||||
110
vendor/github.com/cilium/ebpf/link/query.go
generated
vendored
110
vendor/github.com/cilium/ebpf/link/query.go
generated
vendored
@@ -2,7 +2,6 @@ package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
@@ -11,53 +10,102 @@ import (
|
||||
|
||||
// QueryOptions defines additional parameters when querying for programs.
|
||||
type QueryOptions struct {
|
||||
// Path can be a path to a cgroup, netns or LIRC2 device
|
||||
Path string
|
||||
// Target to query. This is usually a file descriptor but may refer to
|
||||
// something else based on the attach type.
|
||||
Target int
|
||||
// Attach specifies the AttachType of the programs queried for
|
||||
Attach ebpf.AttachType
|
||||
// QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
|
||||
QueryFlags uint32
|
||||
}
|
||||
|
||||
// QueryPrograms retrieves ProgramIDs associated with the AttachType.
|
||||
// QueryResult describes which programs and links are active.
|
||||
type QueryResult struct {
|
||||
// List of attached programs.
|
||||
Programs []AttachedProgram
|
||||
|
||||
// Incremented by one every time the set of attached programs changes.
|
||||
// May be zero if not supported by the [ebpf.AttachType].
|
||||
Revision uint64
|
||||
}
|
||||
|
||||
// HaveLinkInfo returns true if the kernel supports querying link information
|
||||
// for a particular [ebpf.AttachType].
|
||||
func (qr *QueryResult) HaveLinkInfo() bool {
|
||||
return qr.Revision > 0
|
||||
}
|
||||
|
||||
type AttachedProgram struct {
|
||||
ID ebpf.ProgramID
|
||||
linkID ID
|
||||
}
|
||||
|
||||
// LinkID returns the ID associated with the program.
|
||||
//
|
||||
// Returns (nil, nil) if there are no programs attached to the queried kernel
|
||||
// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in
|
||||
// ErrNotSupported.
|
||||
func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
|
||||
if haveProgQuery() != nil {
|
||||
return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
|
||||
// Returns 0, false if the kernel doesn't support retrieving the ID or if the
|
||||
// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you
|
||||
// need to tell the two apart.
|
||||
func (ap *AttachedProgram) LinkID() (ID, bool) {
|
||||
return ap.linkID, ap.linkID != 0
|
||||
}
|
||||
|
||||
f, err := os.Open(opts.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open file: %s", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// QueryPrograms retrieves a list of programs for the given AttachType.
|
||||
//
|
||||
// Returns a slice of attached programs, which may be empty.
|
||||
// revision counts how many times the set of attached programs has changed and
|
||||
// may be zero if not supported by the [ebpf.AttachType].
|
||||
// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY
|
||||
func QueryPrograms(opts QueryOptions) (*QueryResult, error) {
|
||||
// query the number of programs to allocate correct slice size
|
||||
attr := sys.ProgQueryAttr{
|
||||
TargetFd: uint32(f.Fd()),
|
||||
TargetFdOrIfindex: uint32(opts.Target),
|
||||
AttachType: sys.AttachType(opts.Attach),
|
||||
QueryFlags: opts.QueryFlags,
|
||||
}
|
||||
err := sys.ProgQuery(&attr)
|
||||
if err != nil {
|
||||
if haveFeatErr := haveProgQuery(); haveFeatErr != nil {
|
||||
return nil, fmt.Errorf("query programs: %w", haveFeatErr)
|
||||
}
|
||||
return nil, fmt.Errorf("query programs: %w", err)
|
||||
}
|
||||
if attr.Count == 0 {
|
||||
return &QueryResult{Revision: attr.Revision}, nil
|
||||
}
|
||||
|
||||
// The minimum bpf_mprog revision is 1, so we can use the field to detect
|
||||
// whether the attach type supports link ids.
|
||||
haveLinkIDs := attr.Revision != 0
|
||||
|
||||
count := attr.Count
|
||||
progIds := make([]ebpf.ProgramID, count)
|
||||
attr = sys.ProgQueryAttr{
|
||||
TargetFdOrIfindex: uint32(opts.Target),
|
||||
AttachType: sys.AttachType(opts.Attach),
|
||||
QueryFlags: opts.QueryFlags,
|
||||
Count: count,
|
||||
ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])),
|
||||
}
|
||||
|
||||
var linkIds []ID
|
||||
if haveLinkIDs {
|
||||
linkIds = make([]ID, count)
|
||||
attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0]))
|
||||
}
|
||||
|
||||
if err := sys.ProgQuery(&attr); err != nil {
|
||||
return nil, fmt.Errorf("can't query program count: %w", err)
|
||||
return nil, fmt.Errorf("query programs: %w", err)
|
||||
}
|
||||
|
||||
// return nil if no progs are attached
|
||||
if attr.ProgCount == 0 {
|
||||
return nil, nil
|
||||
// NB: attr.Count might have changed between the two syscalls.
|
||||
var programs []AttachedProgram
|
||||
for i, id := range progIds[:attr.Count] {
|
||||
ap := AttachedProgram{ID: id}
|
||||
if haveLinkIDs {
|
||||
ap.linkID = linkIds[i]
|
||||
}
|
||||
programs = append(programs, ap)
|
||||
}
|
||||
|
||||
// we have at least one prog, so we query again
|
||||
progIds := make([]ebpf.ProgramID, attr.ProgCount)
|
||||
attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
|
||||
attr.ProgCount = uint32(len(progIds))
|
||||
if err := sys.ProgQuery(&attr); err != nil {
|
||||
return nil, fmt.Errorf("can't query program IDs: %w", err)
|
||||
}
|
||||
|
||||
return progIds, nil
|
||||
|
||||
return &QueryResult{programs, attr.Revision}, nil
|
||||
}
|
||||
|
||||
78
vendor/github.com/cilium/ebpf/link/syscalls.go
generated
vendored
78
vendor/github.com/cilium/ebpf/link/syscalls.go
generated
vendored
@@ -24,6 +24,10 @@ const (
|
||||
XDPType = sys.BPF_LINK_TYPE_XDP
|
||||
PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
|
||||
KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI
|
||||
TCXType = sys.BPF_LINK_TYPE_TCX
|
||||
UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI
|
||||
NetfilterType = sys.BPF_LINK_TYPE_NETFILTER
|
||||
NetkitType = sys.BPF_LINK_TYPE_NETKIT
|
||||
)
|
||||
|
||||
var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
|
||||
@@ -72,7 +76,7 @@ var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic repl
|
||||
// present.
|
||||
attr := sys.ProgAttachAttr{
|
||||
// We rely on this being checked after attachFlags.
|
||||
TargetFd: ^uint32(0),
|
||||
TargetFdOrIfindex: ^uint32(0),
|
||||
AttachBpfFd: uint32(prog.FD()),
|
||||
AttachType: uint32(ebpf.AttachCGroupInetIngress),
|
||||
AttachFlags: uint32(flagReplace),
|
||||
@@ -110,7 +114,7 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err
|
||||
// We rely on this being checked during the syscall.
|
||||
// With an otherwise correct payload we expect EBADF here
|
||||
// as an indication that the feature is present.
|
||||
TargetFd: ^uint32(0),
|
||||
TargetFdOrIfindex: ^uint32(0),
|
||||
AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
|
||||
}
|
||||
|
||||
@@ -124,3 +128,73 @@ var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() err
|
||||
}
|
||||
return errors.New("syscall succeeded unexpectedly")
|
||||
})
|
||||
|
||||
var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error {
|
||||
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||
Type: ebpf.SchedCLS,
|
||||
License: "MIT",
|
||||
Instructions: asm.Instructions{
|
||||
asm.Mov.Imm(asm.R0, 0),
|
||||
asm.Return(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
|
||||
defer prog.Close()
|
||||
attr := sys.LinkCreateTcxAttr{
|
||||
// We rely on this being checked during the syscall.
|
||||
// With an otherwise correct payload we expect ENODEV here
|
||||
// as an indication that the feature is present.
|
||||
TargetIfindex: ^uint32(0),
|
||||
ProgFd: uint32(prog.FD()),
|
||||
AttachType: sys.AttachType(ebpf.AttachTCXIngress),
|
||||
}
|
||||
|
||||
_, err = sys.LinkCreateTcx(&attr)
|
||||
|
||||
if errors.Is(err, unix.ENODEV) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return errors.New("syscall succeeded unexpectedly")
|
||||
})
|
||||
|
||||
var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error {
|
||||
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||
Type: ebpf.SchedCLS,
|
||||
License: "MIT",
|
||||
Instructions: asm.Instructions{
|
||||
asm.Mov.Imm(asm.R0, 0),
|
||||
asm.Return(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
|
||||
defer prog.Close()
|
||||
attr := sys.LinkCreateNetkitAttr{
|
||||
// We rely on this being checked during the syscall.
|
||||
// With an otherwise correct payload we expect ENODEV here
|
||||
// as an indication that the feature is present.
|
||||
TargetIfindex: ^uint32(0),
|
||||
ProgFd: uint32(prog.FD()),
|
||||
AttachType: sys.AttachType(ebpf.AttachNetkitPrimary),
|
||||
}
|
||||
|
||||
_, err = sys.LinkCreateNetkit(&attr)
|
||||
|
||||
if errors.Is(err, unix.ENODEV) {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return errors.New("syscall succeeded unexpectedly")
|
||||
})
|
||||
|
||||
89
vendor/github.com/cilium/ebpf/link/tcx.go
generated
vendored
Normal file
89
vendor/github.com/cilium/ebpf/link/tcx.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
type TCXOptions struct {
|
||||
// Index of the interface to attach to.
|
||||
Interface int
|
||||
// Program to attach.
|
||||
Program *ebpf.Program
|
||||
// One of the AttachTCX* constants.
|
||||
Attach ebpf.AttachType
|
||||
// Attach relative to an anchor. Optional.
|
||||
Anchor Anchor
|
||||
// Only attach if the expected revision matches.
|
||||
ExpectedRevision uint64
|
||||
// Flags control the attach behaviour. Specify an Anchor instead of
|
||||
// F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional.
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
func AttachTCX(opts TCXOptions) (Link, error) {
|
||||
if opts.Interface < 0 {
|
||||
return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface)
|
||||
}
|
||||
|
||||
if opts.Flags&anchorFlags != 0 {
|
||||
return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target")
|
||||
}
|
||||
|
||||
attr := sys.LinkCreateTcxAttr{
|
||||
ProgFd: uint32(opts.Program.FD()),
|
||||
AttachType: sys.AttachType(opts.Attach),
|
||||
TargetIfindex: uint32(opts.Interface),
|
||||
ExpectedRevision: opts.ExpectedRevision,
|
||||
Flags: opts.Flags,
|
||||
}
|
||||
|
||||
if opts.Anchor != nil {
|
||||
fdOrID, flags, err := opts.Anchor.anchor()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("attach tcx link: %w", err)
|
||||
}
|
||||
|
||||
attr.RelativeFdOrId = fdOrID
|
||||
attr.Flags |= flags
|
||||
}
|
||||
|
||||
fd, err := sys.LinkCreateTcx(&attr)
|
||||
runtime.KeepAlive(opts.Program)
|
||||
runtime.KeepAlive(opts.Anchor)
|
||||
if err != nil {
|
||||
if haveFeatErr := haveTCX(); haveFeatErr != nil {
|
||||
return nil, haveFeatErr
|
||||
}
|
||||
return nil, fmt.Errorf("attach tcx link: %w", err)
|
||||
}
|
||||
|
||||
return &tcxLink{RawLink{fd, ""}}, nil
|
||||
}
|
||||
|
||||
type tcxLink struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
var _ Link = (*tcxLink)(nil)
|
||||
|
||||
func (tcx *tcxLink) Info() (*Info, error) {
|
||||
var info sys.TcxLinkInfo
|
||||
if err := sys.ObjInfo(tcx.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("tcx link info: %s", err)
|
||||
}
|
||||
extra := &TCXInfo{
|
||||
Ifindex: info.Ifindex,
|
||||
AttachType: info.AttachType,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
2
vendor/github.com/cilium/ebpf/link/tracepoint.go
generated
vendored
2
vendor/github.com/cilium/ebpf/link/tracepoint.go
generated
vendored
@@ -30,6 +30,8 @@ type TracepointOptions struct {
|
||||
//
|
||||
// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
|
||||
// only possible as of kernel 4.14 (commit cf5f5ce).
|
||||
//
|
||||
// The returned Link may implement [PerfEvent].
|
||||
func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
|
||||
if group == "" || name == "" {
|
||||
return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
|
||||
|
||||
19
vendor/github.com/cilium/ebpf/link/tracing.go
generated
vendored
19
vendor/github.com/cilium/ebpf/link/tracing.go
generated
vendored
@@ -18,6 +18,25 @@ func (f *tracing) Update(new *ebpf.Program) error {
|
||||
return fmt.Errorf("tracing update: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
func (f *tracing) Info() (*Info, error) {
|
||||
var info sys.TracingLinkInfo
|
||||
if err := sys.ObjInfo(f.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("tracing link info: %s", err)
|
||||
}
|
||||
extra := &TracingInfo{
|
||||
TargetObjId: info.TargetObjId,
|
||||
TargetBtfId: info.TargetBtfId,
|
||||
AttachType: info.AttachType,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttachFreplace attaches the given eBPF program to the function it replaces.
|
||||
//
|
||||
// The program and name can either be provided at link time, or can be provided
|
||||
|
||||
28
vendor/github.com/cilium/ebpf/link/uprobe.go
generated
vendored
28
vendor/github.com/cilium/ebpf/link/uprobe.go
generated
vendored
@@ -36,10 +36,10 @@ var (
|
||||
type Executable struct {
|
||||
// Path of the executable on the filesystem.
|
||||
path string
|
||||
// Parsed ELF and dynamic symbols' addresses.
|
||||
addresses map[string]uint64
|
||||
// Parsed ELF and dynamic symbols' cachedAddresses.
|
||||
cachedAddresses map[string]uint64
|
||||
// Keep track of symbol table lazy load.
|
||||
addressesOnce sync.Once
|
||||
cacheAddressesOnce sync.Once
|
||||
}
|
||||
|
||||
// UprobeOptions defines additional parameters that will be used
|
||||
@@ -109,7 +109,7 @@ func OpenExecutable(path string) (*Executable, error) {
|
||||
|
||||
return &Executable{
|
||||
path: path,
|
||||
addresses: make(map[string]uint64),
|
||||
cachedAddresses: make(map[string]uint64),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func (ex *Executable) load(f *internal.SafeELFFile) error {
|
||||
}
|
||||
}
|
||||
|
||||
ex.addresses[s.Name] = address
|
||||
ex.cachedAddresses[s.Name] = address
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -162,13 +162,13 @@ func (ex *Executable) load(f *internal.SafeELFFile) error {
|
||||
// address calculates the address of a symbol in the executable.
|
||||
//
|
||||
// opts must not be nil.
|
||||
func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) {
|
||||
if opts.Address > 0 {
|
||||
return opts.Address + opts.Offset, nil
|
||||
func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) {
|
||||
if address > 0 {
|
||||
return address + offset, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
ex.addressesOnce.Do(func() {
|
||||
ex.cacheAddressesOnce.Do(func() {
|
||||
var f *internal.SafeELFFile
|
||||
f, err = internal.OpenSafeELFFile(ex.path)
|
||||
if err != nil {
|
||||
@@ -183,7 +183,7 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
|
||||
return 0, fmt.Errorf("lazy load symbols: %w", err)
|
||||
}
|
||||
|
||||
address, ok := ex.addresses[symbol]
|
||||
address, ok := ex.cachedAddresses[symbol]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
|
||||
"(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported)
|
||||
}
|
||||
|
||||
return address + opts.Offset, nil
|
||||
return address + offset, nil
|
||||
}
|
||||
|
||||
// Uprobe attaches the given eBPF program to a perf event that fires when the
|
||||
@@ -222,6 +222,8 @@ func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error
|
||||
//
|
||||
// Functions provided by shared libraries can currently not be traced and
|
||||
// will result in an ErrNotSupported.
|
||||
//
|
||||
// The returned Link may implement [PerfEvent].
|
||||
func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
|
||||
u, err := ex.uprobe(symbol, prog, opts, false)
|
||||
if err != nil {
|
||||
@@ -256,6 +258,8 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
|
||||
//
|
||||
// Functions provided by shared libraries can currently not be traced and
|
||||
// will result in an ErrNotSupported.
|
||||
//
|
||||
// The returned Link may implement [PerfEvent].
|
||||
func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
|
||||
u, err := ex.uprobe(symbol, prog, opts, true)
|
||||
if err != nil {
|
||||
@@ -284,7 +288,7 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti
|
||||
opts = &UprobeOptions{}
|
||||
}
|
||||
|
||||
offset, err := ex.address(symbol, opts)
|
||||
offset, err := ex.address(symbol, opts.Address, opts.Offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
216
vendor/github.com/cilium/ebpf/link/uprobe_multi.go
generated
vendored
Normal file
216
vendor/github.com/cilium/ebpf/link/uprobe_multi.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
package link
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
)
|
||||
|
||||
// UprobeMultiOptions defines additional parameters that will be used
|
||||
// when opening a UprobeMulti Link.
|
||||
type UprobeMultiOptions struct {
|
||||
// Symbol addresses. If set, overrides the addresses eventually parsed from
|
||||
// the executable. Mutually exclusive with UprobeMulti's symbols argument.
|
||||
Addresses []uint64
|
||||
|
||||
// Offsets into functions provided by UprobeMulti's symbols argument.
|
||||
// For example: to set uprobes to main+5 and _start+10, call UprobeMulti
|
||||
// with:
|
||||
// symbols: "main", "_start"
|
||||
// opt.Offsets: 5, 10
|
||||
Offsets []uint64
|
||||
|
||||
// Optional list of associated ref counter offsets.
|
||||
RefCtrOffsets []uint64
|
||||
|
||||
// Optional list of associated BPF cookies.
|
||||
Cookies []uint64
|
||||
|
||||
// Only set the uprobe_multi link on the given process ID, zero PID means
|
||||
// system-wide.
|
||||
PID uint32
|
||||
}
|
||||
|
||||
func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) {
|
||||
return ex.uprobeMulti(symbols, prog, opts, 0)
|
||||
}
|
||||
|
||||
func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) {
|
||||
|
||||
// The return probe is not limited for symbols entry, so there's no special
|
||||
// setup for return uprobes (other than the extra flag). The symbols, opts.Offsets
|
||||
// and opts.Addresses arrays follow the same logic as for entry uprobes.
|
||||
return ex.uprobeMulti(symbols, prog, opts, unix.BPF_F_UPROBE_MULTI_RETURN)
|
||||
}
|
||||
|
||||
func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) {
|
||||
if prog == nil {
|
||||
return nil, errors.New("cannot attach a nil program")
|
||||
}
|
||||
|
||||
if opts == nil {
|
||||
opts = &UprobeMultiOptions{}
|
||||
}
|
||||
|
||||
addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addrs := len(addresses)
|
||||
cookies := len(opts.Cookies)
|
||||
refCtrOffsets := len(opts.RefCtrOffsets)
|
||||
|
||||
if addrs == 0 {
|
||||
return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput)
|
||||
}
|
||||
if refCtrOffsets > 0 && refCtrOffsets != addrs {
|
||||
return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput)
|
||||
}
|
||||
if cookies > 0 && cookies != addrs {
|
||||
return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput)
|
||||
}
|
||||
|
||||
attr := &sys.LinkCreateUprobeMultiAttr{
|
||||
Path: sys.NewStringPointer(ex.path),
|
||||
ProgFd: uint32(prog.FD()),
|
||||
AttachType: sys.BPF_TRACE_UPROBE_MULTI,
|
||||
UprobeMultiFlags: flags,
|
||||
Count: uint32(addrs),
|
||||
Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])),
|
||||
Pid: opts.PID,
|
||||
}
|
||||
|
||||
if refCtrOffsets != 0 {
|
||||
attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0]))
|
||||
}
|
||||
if cookies != 0 {
|
||||
attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
|
||||
}
|
||||
|
||||
fd, err := sys.LinkCreateUprobeMulti(attr)
|
||||
if errors.Is(err, unix.ESRCH) {
|
||||
return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist)
|
||||
}
|
||||
if errors.Is(err, unix.EINVAL) {
|
||||
return nil, fmt.Errorf("%w (missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil {
|
||||
return nil, haveFeatErr
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &uprobeMultiLink{RawLink{fd, ""}}, nil
|
||||
}
|
||||
|
||||
func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) {
|
||||
n := len(symbols)
|
||||
if n == 0 {
|
||||
n = len(addresses)
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput)
|
||||
}
|
||||
|
||||
if symbols != nil && len(symbols) != n {
|
||||
return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n)
|
||||
}
|
||||
|
||||
if addresses != nil && len(addresses) != n {
|
||||
return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n)
|
||||
}
|
||||
|
||||
if offsets != nil && len(offsets) != n {
|
||||
return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n)
|
||||
}
|
||||
|
||||
results := make([]uint64, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
var sym string
|
||||
if symbols != nil {
|
||||
sym = symbols[i]
|
||||
}
|
||||
|
||||
var addr, off uint64
|
||||
if addresses != nil {
|
||||
addr = addresses[i]
|
||||
}
|
||||
|
||||
if offsets != nil {
|
||||
off = offsets[i]
|
||||
}
|
||||
|
||||
result, err := ex.address(sym, addr, off)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type uprobeMultiLink struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
var _ Link = (*uprobeMultiLink)(nil)
|
||||
|
||||
func (kml *uprobeMultiLink) Update(prog *ebpf.Program) error {
|
||||
return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported)
|
||||
}
|
||||
|
||||
var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6.6", func() error {
|
||||
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
|
||||
Name: "probe_upm_link",
|
||||
Type: ebpf.Kprobe,
|
||||
Instructions: asm.Instructions{
|
||||
asm.Mov.Imm(asm.R0, 0),
|
||||
asm.Return(),
|
||||
},
|
||||
AttachType: ebpf.AttachTraceUprobeMulti,
|
||||
License: "MIT",
|
||||
})
|
||||
if errors.Is(err, unix.E2BIG) {
|
||||
// Kernel doesn't support AttachType field.
|
||||
return internal.ErrNotSupported
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer prog.Close()
|
||||
|
||||
// We try to create uprobe multi link on '/' path which results in
|
||||
// error with -EBADF in case uprobe multi link is supported.
|
||||
fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{
|
||||
ProgFd: uint32(prog.FD()),
|
||||
AttachType: sys.BPF_TRACE_UPROBE_MULTI,
|
||||
Path: sys.NewStringPointer("/"),
|
||||
Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})),
|
||||
Count: 1,
|
||||
})
|
||||
switch {
|
||||
case errors.Is(err, unix.EBADF):
|
||||
return nil
|
||||
case errors.Is(err, unix.EINVAL):
|
||||
return internal.ErrNotSupported
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
// should not happen
|
||||
fd.Close()
|
||||
return errors.New("successfully attached uprobe_multi to /, kernel bug?")
|
||||
})
|
||||
28
vendor/github.com/cilium/ebpf/link/xdp.go
generated
vendored
28
vendor/github.com/cilium/ebpf/link/xdp.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
)
|
||||
|
||||
// XDPAttachFlags represents how XDP program will be attached to interface.
|
||||
@@ -50,5 +51,30 @@ func AttachXDP(opts XDPOptions) (Link, error) {
|
||||
Flags: uint32(opts.Flags),
|
||||
})
|
||||
|
||||
return rawLink, err
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to attach link: %w", err)
|
||||
}
|
||||
|
||||
return &xdpLink{*rawLink}, nil
|
||||
}
|
||||
|
||||
type xdpLink struct {
|
||||
RawLink
|
||||
}
|
||||
|
||||
func (xdp *xdpLink) Info() (*Info, error) {
|
||||
var info sys.XDPLinkInfo
|
||||
if err := sys.ObjInfo(xdp.fd, &info); err != nil {
|
||||
return nil, fmt.Errorf("xdp link info: %s", err)
|
||||
}
|
||||
extra := &XDPInfo{
|
||||
Ifindex: info.Ifindex,
|
||||
}
|
||||
|
||||
return &Info{
|
||||
info.Type,
|
||||
info.Id,
|
||||
ebpf.ProgramID(info.ProgId),
|
||||
extra,
|
||||
}, nil
|
||||
}
|
||||
|
||||
68
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
68
vendor/github.com/cilium/ebpf/linker.go
generated
vendored
@@ -1,13 +1,14 @@
|
||||
package ebpf
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
"slices"
|
||||
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/btf"
|
||||
@@ -120,7 +121,7 @@ func hasFunctionReferences(insns asm.Instructions) bool {
|
||||
//
|
||||
// Passing a nil target will relocate against the running kernel. insns are
|
||||
// modified in place.
|
||||
func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder) error {
|
||||
func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error {
|
||||
var relos []*btf.CORERelocation
|
||||
var reloInsns []*asm.Instruction
|
||||
iter := insns.Iterate()
|
||||
@@ -139,7 +140,26 @@ func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOr
|
||||
bo = internal.NativeEndian
|
||||
}
|
||||
|
||||
fixups, err := btf.CORERelocate(relos, target, bo)
|
||||
if len(targets) == 0 {
|
||||
kernelTarget, err := btf.LoadKernelSpec()
|
||||
if err != nil {
|
||||
return fmt.Errorf("load kernel spec: %w", err)
|
||||
}
|
||||
targets = append(targets, kernelTarget)
|
||||
|
||||
if kmodName != "" {
|
||||
kmodTarget, err := btf.LoadKernelModuleSpec(kmodName)
|
||||
// Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled.
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("load kernel module spec: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
targets = append(targets, kmodTarget)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fixups, err := btf.CORERelocate(relos, targets, bo, b.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,6 +264,10 @@ func fixupAndValidate(insns asm.Instructions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// POISON_CALL_KFUNC_BASE in libbpf.
|
||||
// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767
|
||||
const kfuncCallPoisonBase = 2002000000
|
||||
|
||||
// fixupKfuncs loops over all instructions in search for kfunc calls.
|
||||
// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant
|
||||
// and Instruction.Offset to the correct values.
|
||||
@@ -257,7 +281,7 @@ func fixupKfuncs(insns asm.Instructions) (_ handles, err error) {
|
||||
iter := insns.Iterate()
|
||||
for iter.Next() {
|
||||
ins := iter.Ins
|
||||
if ins.IsKfuncCall() {
|
||||
if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil {
|
||||
goto fixups
|
||||
}
|
||||
}
|
||||
@@ -277,7 +301,8 @@ fixups:
|
||||
for {
|
||||
ins := iter.Ins
|
||||
|
||||
if !ins.IsKfuncCall() {
|
||||
metadata := ins.Metadata.Get(kfuncMetaKey{})
|
||||
if metadata == nil {
|
||||
if !iter.Next() {
|
||||
// break loop if this was the last instruction in the stream.
|
||||
break
|
||||
@@ -286,15 +311,34 @@ fixups:
|
||||
}
|
||||
|
||||
// check meta, if no meta return err
|
||||
kfm, _ := ins.Metadata.Get(kfuncMeta{}).(*btf.Func)
|
||||
kfm, _ := metadata.(*kfuncMeta)
|
||||
if kfm == nil {
|
||||
return nil, fmt.Errorf("kfunc call has no kfuncMeta")
|
||||
return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta")
|
||||
}
|
||||
|
||||
target := btf.Type((*btf.Func)(nil))
|
||||
spec, module, err := findTargetInKernel(kernelSpec, kfm.Name, &target)
|
||||
spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target)
|
||||
if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) {
|
||||
if ins.IsKfuncCall() {
|
||||
// If the kfunc call is weak and not found, poison the call. Use a recognizable constant
|
||||
// to make it easier to debug. And set src to zero so the verifier doesn't complain
|
||||
// about the invalid imm/offset values before dead-code elimination.
|
||||
ins.Constant = kfuncCallPoisonBase
|
||||
ins.Src = 0
|
||||
} else if ins.OpCode.IsDWordLoad() {
|
||||
// If the kfunc DWordLoad is weak and not found, set its address to 0.
|
||||
ins.Constant = 0
|
||||
ins.Src = 0
|
||||
} else {
|
||||
return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata")
|
||||
}
|
||||
|
||||
iter.Next()
|
||||
continue
|
||||
}
|
||||
// Error on non-weak kfunc not found.
|
||||
if errors.Is(err, btf.ErrNotFound) {
|
||||
return nil, fmt.Errorf("kfunc %q: %w", kfm.Name, ErrNotSupported)
|
||||
return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -305,8 +349,8 @@ fixups:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := btf.CheckTypeCompatibility(kfm.Type, target.(*btf.Func).Type); err != nil {
|
||||
return nil, &incompatibleKfuncError{kfm.Name, err}
|
||||
if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil {
|
||||
return nil, &incompatibleKfuncError{kfm.Func.Name, err}
|
||||
}
|
||||
|
||||
id, err := spec.TypeID(target)
|
||||
|
||||
398
vendor/github.com/cilium/ebpf/map.go
generated
vendored
398
vendor/github.com/cilium/ebpf/map.go
generated
vendored
@@ -9,7 +9,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
@@ -27,6 +29,10 @@ var (
|
||||
ErrIterationAborted = errors.New("iteration aborted")
|
||||
ErrMapIncompatible = errors.New("map spec is incompatible with existing map")
|
||||
errMapNoBTFValue = errors.New("map spec does not contain a BTF Value")
|
||||
|
||||
// pre-allocating these errors here since they may get called in hot code paths
|
||||
// and cause unnecessary memory allocations
|
||||
errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist)
|
||||
)
|
||||
|
||||
// MapOptions control loading a map into the kernel.
|
||||
@@ -95,11 +101,20 @@ func (ms *MapSpec) Copy() *MapSpec {
|
||||
}
|
||||
|
||||
cpy := *ms
|
||||
cpy.Contents = slices.Clone(cpy.Contents)
|
||||
cpy.Key = btf.Copy(cpy.Key)
|
||||
cpy.Value = btf.Copy(cpy.Value)
|
||||
|
||||
cpy.Contents = make([]MapKV, len(ms.Contents))
|
||||
copy(cpy.Contents, ms.Contents)
|
||||
|
||||
if cpy.InnerMap == ms {
|
||||
cpy.InnerMap = &cpy
|
||||
} else {
|
||||
cpy.InnerMap = ms.InnerMap.Copy()
|
||||
}
|
||||
|
||||
if cpy.Extra != nil {
|
||||
extra := *cpy.Extra
|
||||
cpy.Extra = &extra
|
||||
}
|
||||
|
||||
return &cpy
|
||||
}
|
||||
@@ -133,7 +148,7 @@ func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) {
|
||||
spec.KeySize = 4
|
||||
spec.ValueSize = 4
|
||||
|
||||
n, err := internal.PossibleCPUs()
|
||||
n, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fixup perf event array: %w", err)
|
||||
}
|
||||
@@ -489,8 +504,14 @@ func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) erro
|
||||
return fmt.Errorf("map create: %w", haveFeatErr)
|
||||
}
|
||||
}
|
||||
if attr.BtfFd == 0 {
|
||||
return fmt.Errorf("map create: %w (without BTF k/v)", err)
|
||||
// BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size.
|
||||
if errors.Is(err, unix.EINVAL) &&
|
||||
(attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) {
|
||||
pageSize := uint32(os.Getpagesize())
|
||||
maxEntries := attr.MaxEntries
|
||||
if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) {
|
||||
return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("map create: %w", err)
|
||||
@@ -515,7 +536,7 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries
|
||||
return m, nil
|
||||
}
|
||||
|
||||
possibleCPUs, err := internal.PossibleCPUs()
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -561,11 +582,29 @@ func (m *Map) Info() (*MapInfo, error) {
|
||||
return newMapInfoFromFd(m.fd)
|
||||
}
|
||||
|
||||
// Handle returns a reference to the Map's type information in the kernel.
|
||||
//
|
||||
// Returns ErrNotSupported if the kernel has no BTF support, or if there is no
|
||||
// BTF associated with the Map.
|
||||
func (m *Map) Handle() (*btf.Handle, error) {
|
||||
info, err := m.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, ok := info.BTFID()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported)
|
||||
}
|
||||
|
||||
return btf.NewHandleFromID(id)
|
||||
}
|
||||
|
||||
// MapLookupFlags controls the behaviour of the map lookup calls.
|
||||
type MapLookupFlags uint64
|
||||
|
||||
// LookupLock look up the value of a spin-locked map.
|
||||
const LookupLock MapLookupFlags = 4
|
||||
const LookupLock MapLookupFlags = unix.BPF_F_LOCK
|
||||
|
||||
// Lookup retrieves a value from a Map.
|
||||
//
|
||||
@@ -642,11 +681,15 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
|
||||
slice, err := ensurePerCPUSlice(valueOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valueBytes := make([]byte, m.fullValueSize)
|
||||
if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
|
||||
return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
|
||||
}
|
||||
|
||||
func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
|
||||
@@ -663,17 +706,62 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags
|
||||
}
|
||||
|
||||
if err = sys.MapLookupElem(&attr); err != nil {
|
||||
if errors.Is(err, unix.ENOENT) {
|
||||
return errMapLookupKeyNotExist
|
||||
}
|
||||
return fmt.Errorf("lookup: %w", wrapMapError(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
|
||||
slice, err := ensurePerCPUSlice(valueOut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valueBytes := make([]byte, m.fullValueSize)
|
||||
if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
|
||||
return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes)
|
||||
}
|
||||
|
||||
// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary.
|
||||
func ensurePerCPUSlice(sliceOrPtr any) (any, error) {
|
||||
sliceOrPtrType := reflect.TypeOf(sliceOrPtr)
|
||||
if sliceOrPtrType.Kind() == reflect.Slice {
|
||||
// The target is a slice, the caller is responsible for ensuring that
|
||||
// size is correct.
|
||||
return sliceOrPtr, nil
|
||||
}
|
||||
|
||||
slicePtrType := sliceOrPtrType
|
||||
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice")
|
||||
}
|
||||
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sliceType := slicePtrType.Elem()
|
||||
slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
|
||||
|
||||
sliceElemType := sliceType.Elem()
|
||||
sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
|
||||
reflect.ValueOf(sliceOrPtr).Elem().Set(slice)
|
||||
if !sliceElemIsPointer {
|
||||
return slice.Interface(), nil
|
||||
}
|
||||
sliceElemType = sliceElemType.Elem()
|
||||
|
||||
for i := 0; i < possibleCPUs; i++ {
|
||||
newElem := reflect.New(sliceElemType)
|
||||
slice.Index(i).Set(newElem)
|
||||
}
|
||||
|
||||
return slice.Interface(), nil
|
||||
}
|
||||
|
||||
func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
|
||||
@@ -861,7 +949,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mmapProtectedPage = internal.Memoize(func() ([]byte, error) {
|
||||
var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) {
|
||||
return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED)
|
||||
})
|
||||
|
||||
@@ -917,14 +1005,23 @@ func (m *Map) guessNonExistentKey() ([]byte, error) {
|
||||
//
|
||||
// "keysOut" and "valuesOut" must be of type slice, a pointer
|
||||
// to a slice or buffer will not work.
|
||||
// "prevKey" is the key to start the batch lookup from, it will
|
||||
// *not* be included in the results. Use nil to start at the first key.
|
||||
// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
|
||||
// "cursor" to subsequent calls of this function to continue the batching
|
||||
// operation in the case of chunking.
|
||||
//
|
||||
// Warning: This API is not very safe to use as the kernel implementation for
|
||||
// batching relies on the user to be aware of subtle details with regarding to
|
||||
// different map type implementations.
|
||||
//
|
||||
// ErrKeyNotExist is returned when the batch lookup has reached
|
||||
// the end of all possible results, even when partial results
|
||||
// are returned. It should be used to evaluate when lookup is "done".
|
||||
func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
|
||||
func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("map batch lookup: %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// BatchLookupAndDelete looks up many elements in a map at once,
|
||||
@@ -932,47 +1029,121 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o
|
||||
// It then deletes all those elements.
|
||||
// "keysOut" and "valuesOut" must be of type slice, a pointer
|
||||
// to a slice or buffer will not work.
|
||||
// "prevKey" is the key to start the batch lookup from, it will
|
||||
// *not* be included in the results. Use nil to start at the first key.
|
||||
// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass
|
||||
// "cursor" to subsequent calls of this function to continue the batching
|
||||
// operation in the case of chunking.
|
||||
//
|
||||
// Warning: This API is not very safe to use as the kernel implementation for
|
||||
// batching relies on the user to be aware of subtle details with regarding to
|
||||
// different map type implementations.
|
||||
//
|
||||
// ErrKeyNotExist is returned when the batch lookup has reached
|
||||
// the end of all possible results, even when partial results
|
||||
// are returned. It should be used to evaluate when lookup is "done".
|
||||
func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
|
||||
func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("map batch lookup and delete: %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// MapBatchCursor represents a starting point for a batch operation.
|
||||
type MapBatchCursor struct {
|
||||
m *Map
|
||||
opaque []byte
|
||||
}
|
||||
|
||||
func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
if m.typ.hasPerCPUValue() {
|
||||
return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts)
|
||||
}
|
||||
|
||||
count, err := batchCount(keysOut, valuesOut)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize))
|
||||
|
||||
n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts)
|
||||
if errors.Is(err, unix.ENOSPC) {
|
||||
// Hash tables return ENOSPC when the size of the batch is smaller than
|
||||
// any bucket.
|
||||
return n, fmt.Errorf("%w (batch size too small?)", err)
|
||||
} else if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
err = valueBuf.Unmarshal(valuesOut)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
count, err := sliceLen(keysOut)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("keys: %w", err)
|
||||
}
|
||||
|
||||
valueBuf := make([]byte, count*int(m.fullValueSize))
|
||||
valuePtr := sys.NewSlicePointer(valueBuf)
|
||||
|
||||
n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts)
|
||||
if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return n, sysErr
|
||||
}
|
||||
|
||||
func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
|
||||
cursorLen := int(m.keySize)
|
||||
if cursorLen < 4 {
|
||||
// * generic_map_lookup_batch requires that batch_out is key_size bytes.
|
||||
// This is used by array and LPM maps.
|
||||
//
|
||||
// * __htab_map_lookup_and_delete_batch requires u32. This is used by the
|
||||
// various hash maps.
|
||||
//
|
||||
// Use a minimum of 4 bytes to avoid having to distinguish between the two.
|
||||
cursorLen = 4
|
||||
}
|
||||
|
||||
inBatch := cursor.opaque
|
||||
if inBatch == nil {
|
||||
// This is the first lookup, allocate a buffer to hold the cursor.
|
||||
cursor.opaque = make([]byte, cursorLen)
|
||||
cursor.m = m
|
||||
} else if cursor.m != m {
|
||||
// Prevent reuse of a cursor across maps. First, it's unlikely to work.
|
||||
// Second, the maps may require different cursorLen and cursor.opaque
|
||||
// may therefore be too short. This could lead to the kernel clobbering
|
||||
// user space memory.
|
||||
return 0, errors.New("a cursor may not be reused across maps")
|
||||
}
|
||||
|
||||
func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
|
||||
if err := haveBatchAPI(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if m.typ.hasPerCPUValue() {
|
||||
return 0, ErrNotSupported
|
||||
}
|
||||
keysValue := reflect.ValueOf(keysOut)
|
||||
if keysValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("keys must be a slice")
|
||||
}
|
||||
valuesValue := reflect.ValueOf(valuesOut)
|
||||
if valuesValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("valuesOut must be a slice")
|
||||
}
|
||||
count := keysValue.Len()
|
||||
if count != valuesValue.Len() {
|
||||
return 0, fmt.Errorf("keysOut and valuesOut must be the same length")
|
||||
}
|
||||
keyBuf := make([]byte, count*int(m.keySize))
|
||||
keyPtr := sys.NewSlicePointer(keyBuf)
|
||||
valueBuf := make([]byte, count*int(m.fullValueSize))
|
||||
valuePtr := sys.NewSlicePointer(valueBuf)
|
||||
nextBuf := makeMapSyscallOutput(nextKeyOut, int(m.keySize))
|
||||
|
||||
keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize))
|
||||
|
||||
attr := sys.MapLookupBatchAttr{
|
||||
MapFd: m.fd.Uint(),
|
||||
Keys: keyPtr,
|
||||
Keys: keyBuf.Pointer(),
|
||||
Values: valuePtr,
|
||||
Count: uint32(count),
|
||||
OutBatch: nextBuf.Pointer(),
|
||||
InBatch: sys.NewSlicePointer(inBatch),
|
||||
OutBatch: sys.NewSlicePointer(cursor.opaque),
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
@@ -980,30 +1151,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut
|
||||
attr.Flags = opts.Flags
|
||||
}
|
||||
|
||||
var err error
|
||||
if startKey != nil {
|
||||
attr.InBatch, err = marshalMapSyscallInput(startKey, int(m.keySize))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
_, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
|
||||
sysErr = wrapMapError(sysErr)
|
||||
if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
|
||||
return 0, sysErr
|
||||
}
|
||||
|
||||
err = nextBuf.Unmarshal(nextKeyOut)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = sysenc.Unmarshal(keysOut, keyBuf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = sysenc.Unmarshal(valuesOut, valueBuf)
|
||||
if err != nil {
|
||||
if err := keyBuf.Unmarshal(keysOut); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -1016,29 +1170,24 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut
|
||||
// to a slice or buffer will not work.
|
||||
func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) {
|
||||
if m.typ.hasPerCPUValue() {
|
||||
return 0, ErrNotSupported
|
||||
return m.batchUpdatePerCPU(keys, values, opts)
|
||||
}
|
||||
keysValue := reflect.ValueOf(keys)
|
||||
if keysValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("keys must be a slice")
|
||||
}
|
||||
valuesValue := reflect.ValueOf(values)
|
||||
if valuesValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("values must be a slice")
|
||||
}
|
||||
var (
|
||||
count = keysValue.Len()
|
||||
valuePtr sys.Pointer
|
||||
err error
|
||||
)
|
||||
if count != valuesValue.Len() {
|
||||
return 0, fmt.Errorf("keys and values must be the same length")
|
||||
}
|
||||
keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
|
||||
|
||||
count, err := batchCount(keys, values)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
valuePtr, err = marshalMapSyscallInput(values, count*int(m.valueSize))
|
||||
|
||||
valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return m.batchUpdate(count, keys, valuePtr, opts)
|
||||
}
|
||||
|
||||
func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) {
|
||||
keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -1065,17 +1214,28 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er
|
||||
return int(attr.Count), nil
|
||||
}
|
||||
|
||||
func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) {
|
||||
count, err := sliceLen(keys)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("keys: %w", err)
|
||||
}
|
||||
|
||||
valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts)
|
||||
}
|
||||
|
||||
// BatchDelete batch deletes entries in the map by keys.
|
||||
// "keys" must be of type slice, a pointer to a slice or buffer will not work.
|
||||
func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
|
||||
if m.typ.hasPerCPUValue() {
|
||||
return 0, ErrNotSupported
|
||||
count, err := sliceLen(keys)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("keys: %w", err)
|
||||
}
|
||||
keysValue := reflect.ValueOf(keys)
|
||||
if keysValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("keys must be a slice")
|
||||
}
|
||||
count := keysValue.Len()
|
||||
|
||||
keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot marshal keys: %v", err)
|
||||
@@ -1102,6 +1262,24 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
|
||||
return int(attr.Count), nil
|
||||
}
|
||||
|
||||
func batchCount(keys, values any) (int, error) {
|
||||
keysLen, err := sliceLen(keys)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("keys: %w", err)
|
||||
}
|
||||
|
||||
valuesLen, err := sliceLen(values)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("values: %w", err)
|
||||
}
|
||||
|
||||
if keysLen != valuesLen {
|
||||
return 0, fmt.Errorf("keys and values must have the same length")
|
||||
}
|
||||
|
||||
return keysLen, nil
|
||||
}
|
||||
|
||||
// Iterate traverses a map.
|
||||
//
|
||||
// It's safe to create multiple iterators at the same time.
|
||||
@@ -1366,7 +1544,9 @@ func marshalMap(m *Map, length int) ([]byte, error) {
|
||||
// See Map.Iterate.
|
||||
type MapIterator struct {
|
||||
target *Map
|
||||
curKey []byte
|
||||
// Temporary storage to avoid allocations in Next(). This is any instead
|
||||
// of []byte to avoid allocations.
|
||||
cursor any
|
||||
count, maxEntries uint32
|
||||
done bool
|
||||
err error
|
||||
@@ -1394,34 +1574,30 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// For array-like maps NextKeyBytes returns nil only on after maxEntries
|
||||
// For array-like maps NextKey returns nil only after maxEntries
|
||||
// iterations.
|
||||
for mi.count <= mi.maxEntries {
|
||||
var nextKey []byte
|
||||
if mi.curKey == nil {
|
||||
// Pass nil interface to NextKeyBytes to make sure the Map's first key
|
||||
if mi.cursor == nil {
|
||||
// Pass nil interface to NextKey to make sure the Map's first key
|
||||
// is returned. If we pass an uninitialized []byte instead, it'll see a
|
||||
// non-nil interface and try to marshal it.
|
||||
nextKey, mi.err = mi.target.NextKeyBytes(nil)
|
||||
|
||||
mi.curKey = make([]byte, mi.target.keySize)
|
||||
mi.cursor = make([]byte, mi.target.keySize)
|
||||
mi.err = mi.target.NextKey(nil, mi.cursor)
|
||||
} else {
|
||||
nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey)
|
||||
mi.err = mi.target.NextKey(mi.cursor, mi.cursor)
|
||||
}
|
||||
if mi.err != nil {
|
||||
|
||||
if errors.Is(mi.err, ErrKeyNotExist) {
|
||||
mi.done = true
|
||||
mi.err = nil
|
||||
return false
|
||||
} else if mi.err != nil {
|
||||
mi.err = fmt.Errorf("get next key: %w", mi.err)
|
||||
return false
|
||||
}
|
||||
|
||||
if nextKey == nil {
|
||||
mi.done = true
|
||||
return false
|
||||
}
|
||||
|
||||
mi.curKey = nextKey
|
||||
|
||||
mi.count++
|
||||
mi.err = mi.target.Lookup(nextKey, valueOut)
|
||||
mi.err = mi.target.Lookup(mi.cursor, valueOut)
|
||||
if errors.Is(mi.err, ErrKeyNotExist) {
|
||||
// Even though the key should be valid, we couldn't look up
|
||||
// its value. If we're iterating a hash map this is probably
|
||||
@@ -1438,10 +1614,11 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
buf := mi.cursor.([]byte)
|
||||
if ptr, ok := keyOut.(unsafe.Pointer); ok {
|
||||
copy(unsafe.Slice((*byte)(ptr), len(nextKey)), nextKey)
|
||||
copy(unsafe.Slice((*byte)(ptr), len(buf)), buf)
|
||||
} else {
|
||||
mi.err = sysenc.Unmarshal(keyOut, nextKey)
|
||||
mi.err = sysenc.Unmarshal(keyOut, buf)
|
||||
}
|
||||
|
||||
return mi.err == nil
|
||||
@@ -1481,3 +1658,12 @@ func NewMapFromID(id MapID) (*Map, error) {
|
||||
|
||||
return newMapFromFD(fd)
|
||||
}
|
||||
|
||||
// sliceLen returns the length if the value is a slice or an error otherwise.
|
||||
func sliceLen(slice any) (int, error) {
|
||||
sliceValue := reflect.ValueOf(slice)
|
||||
if sliceValue.Kind() != reflect.Slice {
|
||||
return 0, fmt.Errorf("%T is not a slice", slice)
|
||||
}
|
||||
return sliceValue.Len(), nil
|
||||
}
|
||||
|
||||
149
vendor/github.com/cilium/ebpf/marshalers.go
generated
vendored
149
vendor/github.com/cilium/ebpf/marshalers.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cilium/ebpf/internal"
|
||||
@@ -43,79 +44,125 @@ func makeMapSyscallOutput(dst any, length int) sysenc.Buffer {
|
||||
return sysenc.SyscallOutput(dst, length)
|
||||
}
|
||||
|
||||
// marshalPerCPUValue encodes a slice containing one value per
|
||||
// appendPerCPUSlice encodes a slice containing one value per
|
||||
// possible CPU into a buffer of bytes.
|
||||
//
|
||||
// Values are initialized to zero if the slice has less elements than CPUs.
|
||||
func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
|
||||
func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) {
|
||||
sliceType := reflect.TypeOf(slice)
|
||||
if sliceType.Kind() != reflect.Slice {
|
||||
return sys.Pointer{}, errors.New("per-CPU value requires slice")
|
||||
}
|
||||
|
||||
possibleCPUs, err := internal.PossibleCPUs()
|
||||
if err != nil {
|
||||
return sys.Pointer{}, err
|
||||
return nil, errors.New("per-CPU value requires slice")
|
||||
}
|
||||
|
||||
sliceValue := reflect.ValueOf(slice)
|
||||
sliceLen := sliceValue.Len()
|
||||
if sliceLen > possibleCPUs {
|
||||
return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
|
||||
return nil, fmt.Errorf("per-CPU value greater than number of CPUs")
|
||||
}
|
||||
|
||||
alignedElemLength := internal.Align(elemLength, 8)
|
||||
buf := make([]byte, alignedElemLength*possibleCPUs)
|
||||
|
||||
// Grow increases the slice's capacity, _if_necessary_
|
||||
buf = slices.Grow(buf, alignedElemLength*possibleCPUs)
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
elem := sliceValue.Index(i).Interface()
|
||||
elemBytes, err := sysenc.Marshal(elem, elemLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = elemBytes.AppendTo(buf)
|
||||
buf = append(buf, make([]byte, alignedElemLength-elemLength)...)
|
||||
}
|
||||
|
||||
// Ensure buf is zero-padded full size.
|
||||
buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// marshalPerCPUValue encodes a slice containing one value per
|
||||
// possible CPU into a buffer of bytes.
|
||||
//
|
||||
// Values are initialized to zero if the slice has less elements than CPUs.
|
||||
func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return sys.Pointer{}, err
|
||||
}
|
||||
|
||||
offset := i * alignedElemLength
|
||||
elemBytes.CopyTo(buf[offset : offset+elemLength])
|
||||
alignedElemLength := internal.Align(elemLength, 8)
|
||||
buf := make([]byte, 0, alignedElemLength*possibleCPUs)
|
||||
buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength)
|
||||
if err != nil {
|
||||
return sys.Pointer{}, err
|
||||
}
|
||||
|
||||
return sys.NewSlicePointer(buf), nil
|
||||
}
|
||||
|
||||
// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing
|
||||
// one value per possible CPU into a buffer of bytes.
|
||||
func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) {
|
||||
sliceType := reflect.TypeOf(slice)
|
||||
if sliceType.Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("batch value requires a slice")
|
||||
}
|
||||
sliceValue := reflect.ValueOf(slice)
|
||||
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sliceValue.Len() != batchLen*possibleCPUs {
|
||||
return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
|
||||
batchLen*possibleCPUs, sliceValue.Len())
|
||||
}
|
||||
alignedElemLength := internal.Align(elemLength, 8)
|
||||
buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs)
|
||||
for i := 0; i < batchLen; i++ {
|
||||
batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
|
||||
buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("batch %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// unmarshalPerCPUValue decodes a buffer into a slice containing one value per
|
||||
// possible CPU.
|
||||
//
|
||||
// slicePtr must be a pointer to a slice.
|
||||
func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error {
|
||||
slicePtrType := reflect.TypeOf(slicePtr)
|
||||
if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
|
||||
return fmt.Errorf("per-cpu value requires pointer to slice")
|
||||
// slice must be a literal slice and not a pointer.
|
||||
func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error {
|
||||
sliceType := reflect.TypeOf(slice)
|
||||
if sliceType.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("per-CPU value requires a slice")
|
||||
}
|
||||
|
||||
possibleCPUs, err := internal.PossibleCPUs()
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sliceType := slicePtrType.Elem()
|
||||
slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
|
||||
sliceValue := reflect.ValueOf(slice)
|
||||
if sliceValue.Len() != possibleCPUs {
|
||||
return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
|
||||
possibleCPUs, sliceValue.Len())
|
||||
}
|
||||
|
||||
sliceElemType := sliceType.Elem()
|
||||
sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
|
||||
if sliceElemIsPointer {
|
||||
sliceElemType = sliceElemType.Elem()
|
||||
}
|
||||
|
||||
stride := internal.Align(elemLength, 8)
|
||||
for i := 0; i < possibleCPUs; i++ {
|
||||
var elem any
|
||||
v := sliceValue.Index(i)
|
||||
if sliceElemIsPointer {
|
||||
newElem := reflect.New(sliceElemType)
|
||||
slice.Index(i).Set(newElem)
|
||||
elem = newElem.Interface()
|
||||
} else {
|
||||
elem = slice.Index(i).Addr().Interface()
|
||||
if !v.Elem().CanAddr() {
|
||||
return fmt.Errorf("per-CPU slice elements cannot be nil")
|
||||
}
|
||||
elem = v.Elem().Addr().Interface()
|
||||
} else {
|
||||
elem = v.Addr().Interface()
|
||||
}
|
||||
|
||||
err := sysenc.Unmarshal(elem, buf[:elemLength])
|
||||
if err != nil {
|
||||
return fmt.Errorf("cpu %d: %w", i, err)
|
||||
@@ -123,7 +170,41 @@ func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error {
|
||||
|
||||
buf = buf[stride:]
|
||||
}
|
||||
|
||||
reflect.ValueOf(slicePtr).Elem().Set(slice)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice
|
||||
// containing one value per possible CPU.
|
||||
//
|
||||
// slice must have length batchLen * PossibleCPUs().
|
||||
func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error {
|
||||
sliceType := reflect.TypeOf(slice)
|
||||
if sliceType.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("batch requires a slice")
|
||||
}
|
||||
|
||||
sliceValue := reflect.ValueOf(slice)
|
||||
possibleCPUs, err := PossibleCPU()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sliceValue.Len() != batchLen*possibleCPUs {
|
||||
return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d",
|
||||
sliceValue.Len(), batchLen*possibleCPUs)
|
||||
}
|
||||
|
||||
fullValueSize := possibleCPUs * internal.Align(elemLength, 8)
|
||||
if len(buf) != batchLen*fullValueSize {
|
||||
return fmt.Errorf("input buffer has incorrect length, expected %d, got %d",
|
||||
len(buf), batchLen*fullValueSize)
|
||||
}
|
||||
|
||||
for i := 0; i < batchLen; i++ {
|
||||
elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface()
|
||||
if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil {
|
||||
return fmt.Errorf("batch %d: %w", i, err)
|
||||
}
|
||||
buf = buf[fullValueSize:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
233
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
233
vendor/github.com/cilium/ebpf/prog.go
generated
vendored
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/cilium/ebpf/asm"
|
||||
"github.com/cilium/ebpf/btf"
|
||||
"github.com/cilium/ebpf/internal"
|
||||
"github.com/cilium/ebpf/internal/kallsyms"
|
||||
"github.com/cilium/ebpf/internal/sys"
|
||||
"github.com/cilium/ebpf/internal/sysenc"
|
||||
"github.com/cilium/ebpf/internal/unix"
|
||||
@@ -23,6 +24,18 @@ import (
|
||||
// ErrNotSupported is returned whenever the kernel doesn't support a feature.
|
||||
var ErrNotSupported = internal.ErrNotSupported
|
||||
|
||||
// errBadRelocation is returned when the verifier rejects a program due to a
|
||||
// bad CO-RE relocation.
|
||||
//
|
||||
// This error is detected based on heuristics and therefore may not be reliable.
|
||||
var errBadRelocation = errors.New("bad CO-RE relocation")
|
||||
|
||||
// errUnknownKfunc is returned when the verifier rejects a program due to an
|
||||
// unknown kfunc.
|
||||
//
|
||||
// This error is detected based on heuristics and therefore may not be reliable.
|
||||
var errUnknownKfunc = errors.New("unknown kfunc")
|
||||
|
||||
// ProgramID represents the unique ID of an eBPF program.
|
||||
type ProgramID uint32
|
||||
|
||||
@@ -33,13 +46,13 @@ const (
|
||||
outputPad = 256 + 2
|
||||
)
|
||||
|
||||
// DefaultVerifierLogSize is the default number of bytes allocated for the
|
||||
// verifier log.
|
||||
// Deprecated: the correct log size is now detected automatically and this
|
||||
// constant is unused.
|
||||
const DefaultVerifierLogSize = 64 * 1024
|
||||
|
||||
// maxVerifierLogSize is the maximum size of verifier log buffer the kernel
|
||||
// will accept before returning EINVAL.
|
||||
const maxVerifierLogSize = math.MaxUint32 >> 2
|
||||
// minVerifierLogSize is the default number of bytes allocated for the
|
||||
// verifier log.
|
||||
const minVerifierLogSize = 64 * 1024
|
||||
|
||||
// ProgramOptions control loading a program into the kernel.
|
||||
type ProgramOptions struct {
|
||||
@@ -53,22 +66,15 @@ type ProgramOptions struct {
|
||||
// verifier output enabled. Upon error, the program load will be repeated
|
||||
// with LogLevelBranch and the given (or default) LogSize value.
|
||||
//
|
||||
// Setting this to a non-zero value will unconditionally enable the verifier
|
||||
// Unless LogDisabled is set, setting this to a non-zero value will enable the verifier
|
||||
// log, populating the [ebpf.Program.VerifierLog] field on successful loads
|
||||
// and including detailed verifier errors if the program is rejected. This
|
||||
// will always allocate an output buffer, but will result in only a single
|
||||
// attempt at loading the program.
|
||||
LogLevel LogLevel
|
||||
|
||||
// Controls the output buffer size for the verifier log, in bytes. See the
|
||||
// documentation on ProgramOptions.LogLevel for details about how this value
|
||||
// is used.
|
||||
//
|
||||
// If this value is set too low to fit the verifier log, the resulting
|
||||
// [ebpf.VerifierError]'s Truncated flag will be true, and the error string
|
||||
// will also contain a hint to that effect.
|
||||
//
|
||||
// Defaults to DefaultVerifierLogSize.
|
||||
// Deprecated: the correct log buffer size is determined automatically
|
||||
// and this field is ignored.
|
||||
LogSize int
|
||||
|
||||
// Disables the verifier log completely, regardless of other options.
|
||||
@@ -80,6 +86,14 @@ type ProgramOptions struct {
|
||||
// (containers) or where it is in a non-standard location. Defaults to
|
||||
// use the kernel BTF from a well-known location if nil.
|
||||
KernelTypes *btf.Spec
|
||||
|
||||
// Type information used for CO-RE relocations of kernel modules,
|
||||
// indexed by module name.
|
||||
//
|
||||
// This is useful in environments where the kernel BTF is not available
|
||||
// (containers) or where it is in a non-standard location. Defaults to
|
||||
// use the kernel module BTF from a well-known location if nil.
|
||||
KernelModuleTypes map[string]*btf.Spec
|
||||
}
|
||||
|
||||
// ProgramSpec defines a Program.
|
||||
@@ -148,6 +162,28 @@ func (ps *ProgramSpec) Tag() (string, error) {
|
||||
return ps.Instructions.Tag(internal.NativeEndian)
|
||||
}
|
||||
|
||||
// KernelModule returns the kernel module, if any, the AttachTo function is contained in.
|
||||
func (ps *ProgramSpec) KernelModule() (string, error) {
|
||||
if ps.AttachTo == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
switch ps.Type {
|
||||
default:
|
||||
return "", nil
|
||||
case Tracing:
|
||||
switch ps.AttachType {
|
||||
default:
|
||||
return "", nil
|
||||
case AttachTraceFEntry:
|
||||
case AttachTraceFExit:
|
||||
}
|
||||
fallthrough
|
||||
case Kprobe:
|
||||
return kallsyms.KernelModule(ps.AttachTo)
|
||||
}
|
||||
}
|
||||
|
||||
// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a
|
||||
// program is rejected by the verifier.
|
||||
//
|
||||
@@ -197,6 +233,15 @@ func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
return prog, err
|
||||
}
|
||||
|
||||
var (
|
||||
coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel))
|
||||
// This log message was introduced by ebb676daa1a3 ("bpf: Print function name in
|
||||
// addition to function id") which first appeared in v4.10 and has remained
|
||||
// unchanged since.
|
||||
coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel))
|
||||
kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase))
|
||||
)
|
||||
|
||||
func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
|
||||
if len(spec.Instructions) == 0 {
|
||||
return nil, errors.New("instructions cannot be empty")
|
||||
@@ -210,10 +255,6 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
|
||||
}
|
||||
|
||||
if opts.LogSize < 0 {
|
||||
return nil, errors.New("ProgramOptions.LogSize must be a positive value; disable verifier logs using ProgramOptions.LogDisabled")
|
||||
}
|
||||
|
||||
// Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load")
|
||||
// require the version field to be set to the value of the KERNEL_VERSION
|
||||
// macro for kprobe-type programs.
|
||||
@@ -242,14 +283,41 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
insns := make(asm.Instructions, len(spec.Instructions))
|
||||
copy(insns, spec.Instructions)
|
||||
|
||||
handle, fib, lib, err := btf.MarshalExtInfos(insns)
|
||||
if err != nil && !errors.Is(err, btf.ErrNotSupported) {
|
||||
return nil, fmt.Errorf("load ext_infos: %w", err)
|
||||
kmodName, err := spec.KernelModule()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("kernel module search: %w", err)
|
||||
}
|
||||
if handle != nil {
|
||||
defer handle.Close()
|
||||
|
||||
attr.ProgBtfFd = uint32(handle.FD())
|
||||
var targets []*btf.Spec
|
||||
if opts.KernelTypes != nil {
|
||||
targets = append(targets, opts.KernelTypes)
|
||||
}
|
||||
if kmodName != "" && opts.KernelModuleTypes != nil {
|
||||
if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok {
|
||||
targets = append(targets, modBTF)
|
||||
}
|
||||
}
|
||||
|
||||
var b btf.Builder
|
||||
if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil {
|
||||
return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
|
||||
}
|
||||
|
||||
errExtInfos := haveProgramExtInfos()
|
||||
if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) {
|
||||
// There is at least one CO-RE relocation which relies on a stable local
|
||||
// type ID.
|
||||
// Return ErrNotSupported instead of E2BIG if there is no BTF support.
|
||||
return nil, errExtInfos
|
||||
}
|
||||
|
||||
if errExtInfos == nil {
|
||||
// Only add func and line info if the kernel supports it. This allows
|
||||
// BPF compiled with modern toolchains to work on old kernels.
|
||||
fib, lib, err := btf.MarshalExtInfos(insns, &b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal ext_infos: %w", err)
|
||||
}
|
||||
|
||||
attr.FuncInfoRecSize = btf.FuncInfoSize
|
||||
attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
|
||||
@@ -260,8 +328,14 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
attr.LineInfo = sys.NewSlicePointer(lib)
|
||||
}
|
||||
|
||||
if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil {
|
||||
return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
|
||||
if !b.Empty() {
|
||||
handle, err := btf.NewHandle(&b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load BTF: %w", err)
|
||||
}
|
||||
defer handle.Close()
|
||||
|
||||
attr.ProgBtfFd = uint32(handle.FD())
|
||||
}
|
||||
|
||||
kconfig, err := resolveKconfigReferences(insns)
|
||||
@@ -319,39 +393,67 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
}
|
||||
}
|
||||
|
||||
if opts.LogSize == 0 {
|
||||
opts.LogSize = DefaultVerifierLogSize
|
||||
}
|
||||
|
||||
// The caller requested a specific verifier log level. Set up the log buffer.
|
||||
// The caller requested a specific verifier log level. Set up the log buffer
|
||||
// so that there is a chance of loading the program in a single shot.
|
||||
var logBuf []byte
|
||||
if !opts.LogDisabled && opts.LogLevel != 0 {
|
||||
logBuf = make([]byte, opts.LogSize)
|
||||
logBuf = make([]byte, minVerifierLogSize)
|
||||
attr.LogLevel = opts.LogLevel
|
||||
attr.LogSize = uint32(len(logBuf))
|
||||
attr.LogBuf = sys.NewSlicePointer(logBuf)
|
||||
}
|
||||
|
||||
fd, err := sys.ProgLoad(attr)
|
||||
for {
|
||||
var fd *sys.FD
|
||||
fd, err = sys.ProgLoad(attr)
|
||||
if err == nil {
|
||||
return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
|
||||
}
|
||||
|
||||
// An error occurred loading the program, but the caller did not explicitly
|
||||
// enable the verifier log. Re-run with branch-level verifier logs enabled to
|
||||
// obtain more info. Preserve the original error to return it to the caller.
|
||||
// An undersized log buffer will result in ENOSPC regardless of the underlying
|
||||
// cause.
|
||||
var err2 error
|
||||
if !opts.LogDisabled && opts.LogLevel == 0 {
|
||||
logBuf = make([]byte, opts.LogSize)
|
||||
attr.LogLevel = LogLevelBranch
|
||||
attr.LogSize = uint32(len(logBuf))
|
||||
attr.LogBuf = sys.NewSlicePointer(logBuf)
|
||||
|
||||
_, err2 = sys.ProgLoad(attr)
|
||||
if opts.LogDisabled {
|
||||
break
|
||||
}
|
||||
|
||||
if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize {
|
||||
// The log buffer already has the correct size.
|
||||
break
|
||||
}
|
||||
|
||||
if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) {
|
||||
// Logging is enabled and the error is not ENOSPC, so we can infer
|
||||
// that the log buffer is large enough.
|
||||
break
|
||||
}
|
||||
|
||||
if attr.LogLevel == 0 {
|
||||
// Logging is not enabled but loading the program failed. Enable
|
||||
// basic logging.
|
||||
attr.LogLevel = LogLevelBranch
|
||||
}
|
||||
|
||||
// Make an educated guess how large the buffer should be. Start
|
||||
// at minVerifierLogSize and then double the size.
|
||||
logSize := uint32(max(len(logBuf)*2, minVerifierLogSize))
|
||||
if int(logSize) < len(logBuf) {
|
||||
return nil, errors.New("overflow while probing log buffer size")
|
||||
}
|
||||
|
||||
if attr.LogTrueSize != 0 {
|
||||
// The kernel has given us a hint how large the log buffer has to be.
|
||||
logSize = attr.LogTrueSize
|
||||
}
|
||||
|
||||
logBuf = make([]byte, logSize)
|
||||
attr.LogSize = logSize
|
||||
attr.LogBuf = sys.NewSlicePointer(logBuf)
|
||||
}
|
||||
|
||||
end := bytes.IndexByte(logBuf, 0)
|
||||
if end < 0 {
|
||||
end = len(logBuf)
|
||||
}
|
||||
|
||||
tail := logBuf[max(end-256, 0):end]
|
||||
switch {
|
||||
case errors.Is(err, unix.EPERM):
|
||||
if len(logBuf) > 0 && logBuf[0] == 0 {
|
||||
@@ -360,22 +462,31 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, er
|
||||
return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
|
||||
}
|
||||
|
||||
fallthrough
|
||||
|
||||
case errors.Is(err, unix.EINVAL):
|
||||
if hasFunctionReferences(spec.Instructions) {
|
||||
if bytes.Contains(tail, coreBadCall) {
|
||||
err = errBadRelocation
|
||||
break
|
||||
} else if bytes.Contains(tail, kfuncBadCall) {
|
||||
err = errUnknownKfunc
|
||||
break
|
||||
}
|
||||
|
||||
case errors.Is(err, unix.EACCES):
|
||||
if bytes.Contains(tail, coreBadLoad) {
|
||||
err = errBadRelocation
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// hasFunctionReferences may be expensive, so check it last.
|
||||
if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) &&
|
||||
hasFunctionReferences(spec.Instructions) {
|
||||
if err := haveBPFToBPFCalls(); err != nil {
|
||||
return nil, fmt.Errorf("load program: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.LogSize > maxVerifierLogSize {
|
||||
return nil, fmt.Errorf("load program: %w (ProgramOptions.LogSize exceeds maximum value of %d)", err, maxVerifierLogSize)
|
||||
}
|
||||
}
|
||||
|
||||
truncated := errors.Is(err, unix.ENOSPC) || errors.Is(err2, unix.ENOSPC)
|
||||
return nil, internal.ErrorWithLog("load program", err, logBuf, truncated)
|
||||
return nil, internal.ErrorWithLog("load program", err, logBuf)
|
||||
}
|
||||
|
||||
// NewProgramFromFD creates a program from a raw fd.
|
||||
@@ -554,7 +665,7 @@ type RunOptions struct {
|
||||
}
|
||||
|
||||
// Test runs the Program in the kernel with the given input and returns the
|
||||
// value returned by the eBPF program. outLen may be zero.
|
||||
// value returned by the eBPF program.
|
||||
//
|
||||
// Note: the kernel expects at least 14 bytes input for an ethernet header for
|
||||
// XDP and SKB programs.
|
||||
@@ -703,10 +814,6 @@ func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
|
||||
Cpu: opts.CPU,
|
||||
}
|
||||
|
||||
if attr.Repeat == 0 {
|
||||
attr.Repeat = 1
|
||||
}
|
||||
|
||||
retry:
|
||||
for {
|
||||
err := sys.ProgRun(&attr)
|
||||
@@ -715,7 +822,7 @@ retry:
|
||||
}
|
||||
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
if attr.Repeat == 1 {
|
||||
if attr.Repeat <= 1 {
|
||||
// Older kernels check whether enough repetitions have been
|
||||
// executed only after checking for pending signals.
|
||||
//
|
||||
|
||||
178
vendor/github.com/cilium/ebpf/run-tests.sh
generated
vendored
178
vendor/github.com/cilium/ebpf/run-tests.sh
generated
vendored
@@ -1,178 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Test the current package under a different kernel.
|
||||
# Requires virtme and qemu to be installed.
|
||||
# Examples:
|
||||
# Run all tests on a 5.4 kernel
|
||||
# $ ./run-tests.sh 5.4
|
||||
# Run a subset of tests:
|
||||
# $ ./run-tests.sh 5.4 ./link
|
||||
# Run using a local kernel image
|
||||
# $ ./run-tests.sh /path/to/bzImage
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
script="$(realpath "$0")"
|
||||
readonly script
|
||||
|
||||
quote_env() {
|
||||
for var in "$@"; do
|
||||
if [ -v "$var" ]; then
|
||||
printf "%s=%q " "$var" "${!var}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
declare -a preserved_env=(
|
||||
PATH
|
||||
CI_MAX_KERNEL_VERSION
|
||||
TEST_SEED
|
||||
KERNEL_VERSION
|
||||
)
|
||||
|
||||
# This script is a bit like a Matryoshka doll since it keeps re-executing itself
|
||||
# in various different contexts:
|
||||
#
|
||||
# 1. invoked by the user like run-tests.sh 5.4
|
||||
# 2. invoked by go test like run-tests.sh --exec-vm
|
||||
# 3. invoked by init in the vm like run-tests.sh --exec-test
|
||||
#
|
||||
# This allows us to use all available CPU on the host machine to compile our
|
||||
# code, and then only use the VM to execute the test. This is because the VM
|
||||
# is usually slower at compiling than the host.
|
||||
if [[ "${1:-}" = "--exec-vm" ]]; then
|
||||
shift
|
||||
|
||||
input="$1"
|
||||
shift
|
||||
|
||||
# Use sudo if /dev/kvm isn't accessible by the current user.
|
||||
sudo=""
|
||||
if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
|
||||
sudo="sudo"
|
||||
fi
|
||||
readonly sudo
|
||||
|
||||
testdir="$(dirname "$1")"
|
||||
output="$(mktemp -d)"
|
||||
printf -v cmd "%q " "$@"
|
||||
|
||||
if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then
|
||||
# stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a
|
||||
# blocking substitute.
|
||||
mkfifo "${output}/fake-stdin"
|
||||
# Open for reading and writing to avoid blocking.
|
||||
exec 0<> "${output}/fake-stdin"
|
||||
rm "${output}/fake-stdin"
|
||||
fi
|
||||
|
||||
for ((i = 0; i < 3; i++)); do
|
||||
if ! $sudo virtme-run --kimg "${input}/boot/vmlinuz" --memory 768M --pwd \
|
||||
--rwdir="${testdir}=${testdir}" \
|
||||
--rodir=/run/input="${input}" \
|
||||
--rwdir=/run/output="${output}" \
|
||||
--script-sh "$(quote_env "${preserved_env[@]}") \"$script\" --exec-test $cmd" \
|
||||
--kopt possible_cpus=2; then # need at least two CPUs for some tests
|
||||
exit 23
|
||||
fi
|
||||
|
||||
if [[ -e "${output}/status" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
if [[ -v CI ]]; then
|
||||
echo "Retrying test run due to qemu crash"
|
||||
continue
|
||||
fi
|
||||
|
||||
exit 42
|
||||
done
|
||||
|
||||
rc=$(<"${output}/status")
|
||||
$sudo rm -r "$output"
|
||||
exit $rc
|
||||
elif [[ "${1:-}" = "--exec-test" ]]; then
|
||||
shift
|
||||
|
||||
mount -t bpf bpf /sys/fs/bpf
|
||||
mount -t tracefs tracefs /sys/kernel/debug/tracing
|
||||
|
||||
if [[ -d "/run/input/bpf" ]]; then
|
||||
export KERNEL_SELFTESTS="/run/input/bpf"
|
||||
fi
|
||||
|
||||
if [[ -d "/run/input/lib/modules" ]]; then
|
||||
find /run/input/lib/modules -type f -name bpf_testmod.ko -exec insmod {} \;
|
||||
fi
|
||||
|
||||
dmesg --clear
|
||||
rc=0
|
||||
"$@" || rc=$?
|
||||
dmesg
|
||||
echo $rc > "/run/output/status"
|
||||
exit $rc # this return code is "swallowed" by qemu
|
||||
fi
|
||||
|
||||
if [[ -z "${1:-}" ]]; then
|
||||
echo "Expecting kernel version or path as first argument"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly input="$(mktemp -d)"
|
||||
readonly tmp_dir="${TMPDIR:-/tmp}"
|
||||
|
||||
fetch() {
|
||||
echo Fetching "${1}"
|
||||
pushd "${tmp_dir}" > /dev/null
|
||||
curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}"
|
||||
local ret=$?
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
machine="$(uname -m)"
|
||||
readonly machine
|
||||
|
||||
if [[ -f "${1}" ]]; then
|
||||
readonly kernel="${1}"
|
||||
cp "${1}" "${input}/bzImage"
|
||||
else
|
||||
# LINUX_VERSION_CODE test compares this to discovered value.
|
||||
export KERNEL_VERSION="${1}"
|
||||
|
||||
if [ "${machine}" = "x86_64" ]; then
|
||||
readonly kernel="linux-${1}-amd64.tgz"
|
||||
readonly selftests="linux-${1}-amd64-selftests-bpf.tgz"
|
||||
elif [ "${machine}" = "aarch64" ]; then
|
||||
readonly kernel="linux-${1}-arm64.tgz"
|
||||
readonly selftests=""
|
||||
else
|
||||
echo "Arch ${machine} is not supported"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
fetch "${kernel}"
|
||||
tar xf "${tmp_dir}/${kernel}" -C "${input}"
|
||||
|
||||
if [ -n "${selftests}" ] && fetch "${selftests}"; then
|
||||
echo "Decompressing selftests"
|
||||
mkdir "${input}/bpf"
|
||||
tar --strip-components=5 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
|
||||
else
|
||||
echo "No selftests found, disabling"
|
||||
fi
|
||||
fi
|
||||
shift
|
||||
|
||||
args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
|
||||
if (( $# > 0 )); then
|
||||
args=("$@")
|
||||
fi
|
||||
|
||||
export GOFLAGS=-mod=readonly
|
||||
export CGO_ENABLED=0
|
||||
|
||||
echo Testing on "${kernel}"
|
||||
go test -exec "$script --exec-vm $input" "${args[@]}"
|
||||
echo "Test successful on ${kernel}"
|
||||
|
||||
rm -r "${input}"
|
||||
33
vendor/github.com/cilium/ebpf/syscalls.go
generated
vendored
33
vendor/github.com/cilium/ebpf/syscalls.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
@@ -302,3 +303,35 @@ var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func
|
||||
|
||||
return evt.Close()
|
||||
})
|
||||
|
||||
var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error {
|
||||
insns := asm.Instructions{
|
||||
asm.Mov.Imm(asm.R0, 0),
|
||||
asm.Return(),
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
|
||||
if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
|
||||
return err
|
||||
}
|
||||
bytecode := buf.Bytes()
|
||||
|
||||
_, err := sys.ProgLoad(&sys.ProgLoadAttr{
|
||||
ProgType: sys.ProgType(SocketFilter),
|
||||
License: sys.NewStringPointer("MIT"),
|
||||
Insns: sys.NewSlicePointer(bytecode),
|
||||
InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
|
||||
FuncInfoCnt: 1,
|
||||
ProgBtfFd: math.MaxUint32,
|
||||
})
|
||||
|
||||
if errors.Is(err, unix.EBADF) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if errors.Is(err, unix.E2BIG) {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
164
vendor/github.com/cilium/ebpf/types.go
generated
vendored
164
vendor/github.com/cilium/ebpf/types.go
generated
vendored
@@ -125,38 +125,39 @@ type ProgramType uint32
|
||||
|
||||
// eBPF program types
|
||||
const (
|
||||
UnspecifiedProgram ProgramType = iota
|
||||
SocketFilter
|
||||
Kprobe
|
||||
SchedCLS
|
||||
SchedACT
|
||||
TracePoint
|
||||
XDP
|
||||
PerfEvent
|
||||
CGroupSKB
|
||||
CGroupSock
|
||||
LWTIn
|
||||
LWTOut
|
||||
LWTXmit
|
||||
SockOps
|
||||
SkSKB
|
||||
CGroupDevice
|
||||
SkMsg
|
||||
RawTracepoint
|
||||
CGroupSockAddr
|
||||
LWTSeg6Local
|
||||
LircMode2
|
||||
SkReuseport
|
||||
FlowDissector
|
||||
CGroupSysctl
|
||||
RawTracepointWritable
|
||||
CGroupSockopt
|
||||
Tracing
|
||||
StructOps
|
||||
Extension
|
||||
LSM
|
||||
SkLookup
|
||||
Syscall
|
||||
UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC)
|
||||
SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER)
|
||||
Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE)
|
||||
SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS)
|
||||
SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT)
|
||||
TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT)
|
||||
XDP = ProgramType(sys.BPF_PROG_TYPE_XDP)
|
||||
PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT)
|
||||
CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB)
|
||||
CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK)
|
||||
LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN)
|
||||
LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT)
|
||||
LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT)
|
||||
SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS)
|
||||
SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB)
|
||||
CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE)
|
||||
SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG)
|
||||
RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT)
|
||||
CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR)
|
||||
LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL)
|
||||
LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2)
|
||||
SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT)
|
||||
FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR)
|
||||
CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL)
|
||||
RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE)
|
||||
CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT)
|
||||
Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING)
|
||||
StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS)
|
||||
Extension = ProgramType(sys.BPF_PROG_TYPE_EXT)
|
||||
LSM = ProgramType(sys.BPF_PROG_TYPE_LSM)
|
||||
SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP)
|
||||
Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL)
|
||||
Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER)
|
||||
)
|
||||
|
||||
// AttachType of the eBPF program, needed to differentiate allowed context accesses in
|
||||
@@ -170,49 +171,62 @@ type AttachType uint32
|
||||
const AttachNone AttachType = 0
|
||||
|
||||
const (
|
||||
AttachCGroupInetIngress AttachType = iota
|
||||
AttachCGroupInetEgress
|
||||
AttachCGroupInetSockCreate
|
||||
AttachCGroupSockOps
|
||||
AttachSkSKBStreamParser
|
||||
AttachSkSKBStreamVerdict
|
||||
AttachCGroupDevice
|
||||
AttachSkMsgVerdict
|
||||
AttachCGroupInet4Bind
|
||||
AttachCGroupInet6Bind
|
||||
AttachCGroupInet4Connect
|
||||
AttachCGroupInet6Connect
|
||||
AttachCGroupInet4PostBind
|
||||
AttachCGroupInet6PostBind
|
||||
AttachCGroupUDP4Sendmsg
|
||||
AttachCGroupUDP6Sendmsg
|
||||
AttachLircMode2
|
||||
AttachFlowDissector
|
||||
AttachCGroupSysctl
|
||||
AttachCGroupUDP4Recvmsg
|
||||
AttachCGroupUDP6Recvmsg
|
||||
AttachCGroupGetsockopt
|
||||
AttachCGroupSetsockopt
|
||||
AttachTraceRawTp
|
||||
AttachTraceFEntry
|
||||
AttachTraceFExit
|
||||
AttachModifyReturn
|
||||
AttachLSMMac
|
||||
AttachTraceIter
|
||||
AttachCgroupInet4GetPeername
|
||||
AttachCgroupInet6GetPeername
|
||||
AttachCgroupInet4GetSockname
|
||||
AttachCgroupInet6GetSockname
|
||||
AttachXDPDevMap
|
||||
AttachCgroupInetSockRelease
|
||||
AttachXDPCPUMap
|
||||
AttachSkLookup
|
||||
AttachXDP
|
||||
AttachSkSKBVerdict
|
||||
AttachSkReuseportSelect
|
||||
AttachSkReuseportSelectOrMigrate
|
||||
AttachPerfEvent
|
||||
AttachTraceKprobeMulti
|
||||
AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS)
|
||||
AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS)
|
||||
AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE)
|
||||
AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS)
|
||||
AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER)
|
||||
AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT)
|
||||
AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE)
|
||||
AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT)
|
||||
AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND)
|
||||
AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND)
|
||||
AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT)
|
||||
AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT)
|
||||
AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND)
|
||||
AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND)
|
||||
AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG)
|
||||
AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG)
|
||||
AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2)
|
||||
AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR)
|
||||
AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL)
|
||||
AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG)
|
||||
AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG)
|
||||
AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT)
|
||||
AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT)
|
||||
AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP)
|
||||
AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY)
|
||||
AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT)
|
||||
AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN)
|
||||
AttachLSMMac = AttachType(sys.BPF_LSM_MAC)
|
||||
AttachTraceIter = AttachType(sys.BPF_TRACE_ITER)
|
||||
AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME)
|
||||
AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME)
|
||||
AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME)
|
||||
AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME)
|
||||
AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP)
|
||||
AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE)
|
||||
AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP)
|
||||
AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP)
|
||||
AttachXDP = AttachType(sys.BPF_XDP)
|
||||
AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT)
|
||||
AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT)
|
||||
AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)
|
||||
AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT)
|
||||
AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI)
|
||||
AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP)
|
||||
AttachStructOps = AttachType(sys.BPF_STRUCT_OPS)
|
||||
AttachNetfilter = AttachType(sys.BPF_NETFILTER)
|
||||
AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS)
|
||||
AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS)
|
||||
AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI)
|
||||
AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT)
|
||||
AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG)
|
||||
AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG)
|
||||
AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME)
|
||||
AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME)
|
||||
AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY)
|
||||
AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER)
|
||||
)
|
||||
|
||||
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
|
||||
|
||||
5
vendor/github.com/cilium/ebpf/types_string.go
generated
vendored
5
vendor/github.com/cilium/ebpf/types_string.go
generated
vendored
@@ -86,11 +86,12 @@ func _() {
|
||||
_ = x[LSM-29]
|
||||
_ = x[SkLookup-30]
|
||||
_ = x[Syscall-31]
|
||||
_ = x[Netfilter-32]
|
||||
}
|
||||
|
||||
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall"
|
||||
const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter"
|
||||
|
||||
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301}
|
||||
var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310}
|
||||
|
||||
func (i ProgramType) String() string {
|
||||
if i >= ProgramType(len(_ProgramType_index)-1) {
|
||||
|
||||
94
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
94
vendor/golang.org/x/exp/maps/maps.go
generated
vendored
@@ -1,94 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package maps defines various functions useful with maps of any type.
|
||||
package maps
|
||||
|
||||
// Keys returns the keys of the map m.
|
||||
// The keys will be in an indeterminate order.
|
||||
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
|
||||
r := make([]K, 0, len(m))
|
||||
for k := range m {
|
||||
r = append(r, k)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Values returns the values of the map m.
|
||||
// The values will be in an indeterminate order.
|
||||
func Values[M ~map[K]V, K comparable, V any](m M) []V {
|
||||
r := make([]V, 0, len(m))
|
||||
for _, v := range m {
|
||||
r = append(r, v)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Equal reports whether two maps contain the same key/value pairs.
|
||||
// Values are compared using ==.
|
||||
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || v1 != v2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFunc is like Equal, but compares values using eq.
|
||||
// Keys are still compared with ==.
|
||||
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for k, v1 := range m1 {
|
||||
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Clear removes all entries from m, leaving it empty.
|
||||
func Clear[M ~map[K]V, K comparable, V any](m M) {
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a copy of m. This is a shallow clone:
|
||||
// the new keys and values are set using ordinary assignment.
|
||||
func Clone[M ~map[K]V, K comparable, V any](m M) M {
|
||||
// Preserve nil in case it matters.
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
r := make(M, len(m))
|
||||
for k, v := range m {
|
||||
r[k] = v
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Copy copies all key/value pairs in src adding them to dst.
|
||||
// When a key in src is already present in dst,
|
||||
// the value in dst will be overwritten by the value associated
|
||||
// with the key in src.
|
||||
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFunc deletes any key/value pairs from m for which del returns true.
|
||||
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
|
||||
for k, v := range m {
|
||||
if del(k, v) {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
258
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
258
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
@@ -1,258 +0,0 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package slices defines various functions useful with slices of any type.
|
||||
// Unless otherwise specified, these functions all apply to the elements
|
||||
// of a slice at index 0 <= i < len(s).
|
||||
//
|
||||
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
|
||||
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
|
||||
// or the sorting may fail to sort correctly. A common case is when sorting slices of
|
||||
// floating-point numbers containing NaN values.
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// Equal reports whether two slices are equal: the same length and all
|
||||
// elements equal. If the lengths are different, Equal returns false.
|
||||
// Otherwise, the elements are compared in increasing index order, and the
|
||||
// comparison stops at the first unequal pair.
|
||||
// Floating point NaNs are not considered equal.
|
||||
func Equal[E comparable](s1, s2 []E) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFunc reports whether two slices are equal using a comparison
|
||||
// function on each pair of elements. If the lengths are different,
|
||||
// EqualFunc returns false. Otherwise, the elements are compared in
|
||||
// increasing index order, and the comparison stops at the first index
|
||||
// for which eq returns false.
|
||||
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i, v1 := range s1 {
|
||||
v2 := s2[i]
|
||||
if !eq(v1, v2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare compares the elements of s1 and s2.
|
||||
// The elements are compared sequentially, starting at index 0,
|
||||
// until one element is not equal to the other.
|
||||
// The result of comparing the first non-matching elements is returned.
|
||||
// If both slices are equal until one of them ends, the shorter slice is
|
||||
// considered less than the longer one.
|
||||
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
|
||||
// Comparisons involving floating point NaNs are ignored.
|
||||
func Compare[E constraints.Ordered](s1, s2 []E) int {
|
||||
s2len := len(s2)
|
||||
for i, v1 := range s1 {
|
||||
if i >= s2len {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
switch {
|
||||
case v1 < v2:
|
||||
return -1
|
||||
case v1 > v2:
|
||||
return +1
|
||||
}
|
||||
}
|
||||
if len(s1) < s2len {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareFunc is like Compare but uses a comparison function
|
||||
// on each pair of elements. The elements are compared in increasing
|
||||
// index order, and the comparisons stop after the first time cmp
|
||||
// returns non-zero.
|
||||
// The result is the first non-zero result of cmp; if cmp always
|
||||
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
|
||||
// and +1 if len(s1) > len(s2).
|
||||
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
|
||||
s2len := len(s2)
|
||||
for i, v1 := range s1 {
|
||||
if i >= s2len {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
if c := cmp(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < s2len {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Index returns the index of the first occurrence of v in s,
|
||||
// or -1 if not present.
|
||||
func Index[E comparable](s []E, v E) int {
|
||||
for i, vs := range s {
|
||||
if v == vs {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// IndexFunc returns the first index i satisfying f(s[i]),
|
||||
// or -1 if none do.
|
||||
func IndexFunc[E any](s []E, f func(E) bool) int {
|
||||
for i, v := range s {
|
||||
if f(v) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Contains reports whether v is present in s.
|
||||
func Contains[E comparable](s []E, v E) bool {
|
||||
return Index(s, v) >= 0
|
||||
}
|
||||
|
||||
// ContainsFunc reports whether at least one
|
||||
// element e of s satisfies f(e).
|
||||
func ContainsFunc[E any](s []E, f func(E) bool) bool {
|
||||
return IndexFunc(s, f) >= 0
|
||||
}
|
||||
|
||||
// Insert inserts the values v... into s at index i,
|
||||
// returning the modified slice.
|
||||
// In the returned slice r, r[i] == v[0].
|
||||
// Insert panics if i is out of range.
|
||||
// This function is O(len(s) + len(v)).
|
||||
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
|
||||
tot := len(s) + len(v)
|
||||
if tot <= cap(s) {
|
||||
s2 := s[:tot]
|
||||
copy(s2[i+len(v):], s[i:])
|
||||
copy(s2[i:], v)
|
||||
return s2
|
||||
}
|
||||
s2 := make(S, tot)
|
||||
copy(s2, s[:i])
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[i:])
|
||||
return s2
|
||||
}
|
||||
|
||||
// Delete removes the elements s[i:j] from s, returning the modified slice.
|
||||
// Delete panics if s[i:j] is not a valid slice of s.
|
||||
// Delete modifies the contents of the slice s; it does not create a new slice.
|
||||
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
|
||||
// make a single call deleting them all together than to delete one at a time.
|
||||
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
|
||||
// elements contain pointers you might consider zeroing those elements so that
|
||||
// objects they reference can be garbage collected.
|
||||
func Delete[S ~[]E, E any](s S, i, j int) S {
|
||||
_ = s[i:j] // bounds check
|
||||
|
||||
return append(s[:i], s[j:]...)
|
||||
}
|
||||
|
||||
// Replace replaces the elements s[i:j] by the given v, and returns the
|
||||
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
|
||||
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
|
||||
_ = s[i:j] // verify that i:j is a valid subslice
|
||||
tot := len(s[:i]) + len(v) + len(s[j:])
|
||||
if tot <= cap(s) {
|
||||
s2 := s[:tot]
|
||||
copy(s2[i+len(v):], s[j:])
|
||||
copy(s2[i:], v)
|
||||
return s2
|
||||
}
|
||||
s2 := make(S, tot)
|
||||
copy(s2, s[:i])
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[j:])
|
||||
return s2
|
||||
}
|
||||
|
||||
// Clone returns a copy of the slice.
|
||||
// The elements are copied using assignment, so this is a shallow clone.
|
||||
func Clone[S ~[]E, E any](s S) S {
|
||||
// Preserve nil in case it matters.
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return append(S([]E{}), s...)
|
||||
}
|
||||
|
||||
// Compact replaces consecutive runs of equal elements with a single copy.
|
||||
// This is like the uniq command found on Unix.
|
||||
// Compact modifies the contents of the slice s; it does not create a new slice.
|
||||
// When Compact discards m elements in total, it might not modify the elements
|
||||
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
|
||||
// zeroing those elements so that objects they reference can be garbage collected.
|
||||
func Compact[S ~[]E, E comparable](s S) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
last := s[0]
|
||||
for _, v := range s[1:] {
|
||||
if v != last {
|
||||
s[i] = v
|
||||
i++
|
||||
last = v
|
||||
}
|
||||
}
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// CompactFunc is like Compact but uses a comparison function.
|
||||
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
last := s[0]
|
||||
for _, v := range s[1:] {
|
||||
if !eq(v, last) {
|
||||
s[i] = v
|
||||
i++
|
||||
last = v
|
||||
}
|
||||
}
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||
// another n elements. After Grow(n), at least n elements can be appended
|
||||
// to the slice without another allocation. If n is negative or too large to
|
||||
// allocate the memory, Grow panics.
|
||||
func Grow[S ~[]E, E any](s S, n int) S {
|
||||
if n < 0 {
|
||||
panic("cannot be negative")
|
||||
}
|
||||
if n -= cap(s) - len(s); n > 0 {
|
||||
// TODO(https://go.dev/issue/53888): Make using []E instead of S
|
||||
// to workaround a compiler bug where the runtime.growslice optimization
|
||||
// does not take effect. Revert when the compiler is fixed.
|
||||
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
func Clip[S ~[]E, E any](s S) S {
|
||||
return s[:len(s):len(s)]
|
||||
}
|
||||
126
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
126
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Sort sorts a slice of any ordered type in ascending order.
|
||||
// Sort may fail to sort correctly when sorting slices of floating-point
|
||||
// numbers containing Not-a-number (NaN) values.
|
||||
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
|
||||
// instead if the input may contain NaNs.
|
||||
func Sort[E constraints.Ordered](x []E) {
|
||||
n := len(x)
|
||||
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
|
||||
}
|
||||
|
||||
// SortFunc sorts the slice x in ascending order as determined by the less function.
|
||||
// This sort is not guaranteed to be stable.
|
||||
//
|
||||
// SortFunc requires that less is a strict weak ordering.
|
||||
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
|
||||
func SortFunc[E any](x []E, less func(a, b E) bool) {
|
||||
n := len(x)
|
||||
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
|
||||
}
|
||||
|
||||
// SortStableFunc sorts the slice x while keeping the original order of equal
|
||||
// elements, using less to compare elements.
|
||||
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
|
||||
stableLessFunc(x, len(x), less)
|
||||
}
|
||||
|
||||
// IsSorted reports whether x is sorted in ascending order.
|
||||
func IsSorted[E constraints.Ordered](x []E) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if x[i] < x[i-1] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
|
||||
// comparison function.
|
||||
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if less(x[i], x[i-1]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BinarySearch searches for target in a sorted slice and returns the position
|
||||
// where target is found, or the position where target would appear in the
|
||||
// sort order; it also returns a bool saying whether the target is really found
|
||||
// in the slice. The slice must be sorted in increasing order.
|
||||
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
|
||||
// Inlining is faster than calling BinarySearchFunc with a lambda.
|
||||
n := len(x)
|
||||
// Define x[-1] < target and x[n] >= target.
|
||||
// Invariant: x[i-1] < target, x[j] >= target.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if x[h] < target {
|
||||
i = h + 1 // preserves x[i-1] < target
|
||||
} else {
|
||||
j = h // preserves x[j] >= target
|
||||
}
|
||||
}
|
||||
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||
return i, i < n && x[i] == target
|
||||
}
|
||||
|
||||
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
|
||||
// function. The slice must be sorted in increasing order, where "increasing" is
|
||||
// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
|
||||
// parameters: 0 if a == b, a negative number if a < b and a positive number if
|
||||
// a > b.
|
||||
func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
|
||||
n := len(x)
|
||||
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
|
||||
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmp(x[h], target) < 0 {
|
||||
i = h + 1 // preserves cmp(x[i - 1], target) < 0
|
||||
} else {
|
||||
j = h // preserves cmp(x[j], target) >= 0
|
||||
}
|
||||
}
|
||||
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
|
||||
return i, i < n && cmp(x[i], target) == 0
|
||||
}
|
||||
|
||||
type sortedHint int // hint for pdqsort when choosing the pivot
|
||||
|
||||
const (
|
||||
unknownHint sortedHint = iota
|
||||
increasingHint
|
||||
decreasingHint
|
||||
)
|
||||
|
||||
// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||
type xorshift uint64
|
||||
|
||||
func (r *xorshift) Next() uint64 {
|
||||
*r ^= *r << 13
|
||||
*r ^= *r >> 17
|
||||
*r ^= *r << 5
|
||||
return uint64(*r)
|
||||
}
|
||||
|
||||
func nextPowerOfTwo(length int) uint {
|
||||
return 1 << bits.Len(uint(length))
|
||||
}
|
||||
479
vendor/golang.org/x/exp/slices/zsortfunc.go
generated
vendored
479
vendor/golang.org/x/exp/slices/zsortfunc.go
generated
vendored
@@ -1,479 +0,0 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
// insertionSortLessFunc sorts data[a:b] using insertion sort.
|
||||
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && less(data[j], data[j-1]); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownLessFunc implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && less(data[first+child], data[first+child+1]) {
|
||||
child++
|
||||
}
|
||||
if !less(data[first+root], data[first+child]) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownLessFunc(data, i, hi, first, less)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownLessFunc(data, lo, i, first, less)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortLessFunc sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortLessFunc(data, a, b, less)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortLessFunc(data, a, b, less)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsLessFunc(data, a, b, less)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotLessFunc(data, a, b, less)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeLessFunc(data, a, b, less)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortLessFunc(data, a, b, less) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !less(data[a-1], data[pivot]) {
|
||||
mid := partitionEqualLessFunc(data, a, b, pivot, less)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortLessFunc(data, a, mid, limit, less)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortLessFunc(data, mid+1, b, limit, less)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionLessFunc does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && less(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !less(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && less(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !less(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !less(data[a], data[i]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && less(data[a], data[j]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !less(data[i], data[i-1]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !less(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !less(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotLessFunc chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentLessFunc(data, i, &swaps, less)
|
||||
j = medianAdjacentLessFunc(data, j, &swaps, less)
|
||||
k = medianAdjacentLessFunc(data, k, &swaps, less)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianLessFunc(data, i, j, k, &swaps, less)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
|
||||
if less(data[b], data[a]) {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
|
||||
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||
b, c = order2LessFunc(data, b, c, swaps, less)
|
||||
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
|
||||
return medianLessFunc(data, a-1, a, a+1, swaps, less)
|
||||
}
|
||||
|
||||
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortLessFunc(data, a, b, less)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortLessFunc(data, a, n, less)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeLessFunc(data, a, a+blockSize, b, less)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeLessFunc(data, a, m, n, less)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if less(data[h], data[a]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !less(data[m], data[h]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !less(data[p-c], data[c]) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateLessFunc(data, start, m, end, less)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeLessFunc(data, a, start, mid, less)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeLessFunc(data, mid, end, b, less)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeLessFunc(data, m-i, m, j, less)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeLessFunc(data, m-i, m+j-i, i, less)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeLessFunc(data, m-i, m, i, less)
|
||||
}
|
||||
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
481
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
@@ -1,481 +0,0 @@
|
||||
// Code generated by gen_sort_variants.go; DO NOT EDIT.
|
||||
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// insertionSortOrdered sorts data[a:b] using insertion sort.
|
||||
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (data[j] < data[j-1]); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownOrdered implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && (data[first+child] < data[first+child+1]) {
|
||||
child++
|
||||
}
|
||||
if !(data[first+root] < data[first+child]) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownOrdered(data, i, hi, first)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownOrdered(data, lo, i, first)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortOrdered sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
wasBalanced = true // whether the last partitioning was reasonably balanced
|
||||
wasPartitioned = true // whether the slice was already partitioned
|
||||
)
|
||||
|
||||
for {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortOrdered(data, a, b)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsOrdered(data, a, b)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotOrdered(data, a, b)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeOrdered(data, a, b)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
pivot = (b - 1) - (pivot - a)
|
||||
hint = increasingHint
|
||||
}
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortOrdered(data, a, b) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !(data[a-1] < data[pivot]) {
|
||||
mid := partitionEqualOrdered(data, a, b, pivot)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortOrdered(data, a, mid, limit)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortOrdered(data, mid+1, b, limit)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionOrdered does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && (data[i] < data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(data[j] < data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, true
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && (data[i] < data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(data[j] < data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
data[j], data[a] = data[a], data[j]
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !(data[a] < data[i]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && (data[a] < data[j]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
break
|
||||
}
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !(data[i] < data[i-1]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == b {
|
||||
return true
|
||||
}
|
||||
|
||||
if b-a < shortestShifting {
|
||||
return false
|
||||
}
|
||||
|
||||
data[i], data[i-1] = data[i-1], data[i]
|
||||
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !(data[j] < data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !(data[j] < data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
modulus := nextPowerOfTwo(length)
|
||||
|
||||
for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
|
||||
other := int(uint(random.Next()) & (modulus - 1))
|
||||
if other >= length {
|
||||
other -= length
|
||||
}
|
||||
data[idx], data[a+other] = data[a+other], data[idx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotOrdered chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
)
|
||||
|
||||
l := b - a
|
||||
|
||||
var (
|
||||
swaps int
|
||||
i = a + l/4*1
|
||||
j = a + l/4*2
|
||||
k = a + l/4*3
|
||||
)
|
||||
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentOrdered(data, i, &swaps)
|
||||
j = medianAdjacentOrdered(data, j, &swaps)
|
||||
k = medianAdjacentOrdered(data, k, &swaps)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianOrdered(data, i, j, k, &swaps)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
case 0:
|
||||
return j, increasingHint
|
||||
case maxSwaps:
|
||||
return j, decreasingHint
|
||||
default:
|
||||
return j, unknownHint
|
||||
}
|
||||
}
|
||||
|
||||
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
|
||||
if data[b] < data[a] {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
b, c = order2Ordered(data, b, c, swaps)
|
||||
a, b = order2Ordered(data, a, b, swaps)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
|
||||
return medianOrdered(data, a-1, a, a+1, swaps)
|
||||
}
|
||||
|
||||
func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
data[i], data[j] = data[j], data[i]
|
||||
i++
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableOrdered[E constraints.Ordered](data []E, n int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortOrdered(data, a, b)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortOrdered(data, a, n)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeOrdered(data, a, a+blockSize, b)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeOrdered(data, a, m, n)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
// Computer Science, pages 714-723. Springer, 2004.
|
||||
//
|
||||
// Let M = m-a and N = b-n. Wolog M < N.
|
||||
// The recursion depth is bound by ceil(log(N+M)).
|
||||
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
|
||||
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
|
||||
//
|
||||
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
|
||||
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
|
||||
// in the paper carries through for Swap operations, especially as the block
|
||||
// swapping rotate uses only O(M+N) Swaps.
|
||||
//
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
if m-a == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] >= data[a] for m <= i < b.
|
||||
// Exit the search loop with i == b in case no such index exists.
|
||||
i := m
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if data[h] < data[a] {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[a] reaches the position before i.
|
||||
for k := a; k < i-1; k++ {
|
||||
data[k], data[k+1] = data[k+1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[m] into data[a:m]
|
||||
// if data[m:b] only contains one element.
|
||||
if b-m == 1 {
|
||||
// Use binary search to find the lowest index i
|
||||
// such that data[i] > data[m] for a <= i < m.
|
||||
// Exit the search loop with i == m in case no such index exists.
|
||||
i := a
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !(data[m] < data[h]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
// Swap values until data[m] reaches the position i.
|
||||
for k := m; k > i; k-- {
|
||||
data[k], data[k-1] = data[k-1], data[k]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
mid := int(uint(a+b) >> 1)
|
||||
n := mid + m
|
||||
var start, r int
|
||||
if m > mid {
|
||||
start = n - b
|
||||
r = mid
|
||||
} else {
|
||||
start = a
|
||||
r = m
|
||||
}
|
||||
p := n - 1
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !(data[p-c] < data[c]) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
}
|
||||
}
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateOrdered(data, start, m, end)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeOrdered(data, a, start, mid)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeOrdered(data, mid, end, b)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeOrdered(data, m-i, m, j)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeOrdered(data, m-i, m+j-i, i)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeOrdered(data, m-i, m, i)
|
||||
}
|
||||
7
vendor/modules.txt
vendored
7
vendor/modules.txt
vendored
@@ -2,12 +2,13 @@
|
||||
## explicit; go 1.16
|
||||
github.com/checkpoint-restore/go-criu/v6
|
||||
github.com/checkpoint-restore/go-criu/v6/rpc
|
||||
# github.com/cilium/ebpf v0.12.3
|
||||
## explicit; go 1.20
|
||||
# github.com/cilium/ebpf v0.16.0
|
||||
## explicit; go 1.21
|
||||
github.com/cilium/ebpf
|
||||
github.com/cilium/ebpf/asm
|
||||
github.com/cilium/ebpf/btf
|
||||
github.com/cilium/ebpf/internal
|
||||
github.com/cilium/ebpf/internal/kallsyms
|
||||
github.com/cilium/ebpf/internal/kconfig
|
||||
github.com/cilium/ebpf/internal/sys
|
||||
github.com/cilium/ebpf/internal/sysenc
|
||||
@@ -77,8 +78,6 @@ github.com/vishvananda/netns
|
||||
# golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
|
||||
## explicit; go 1.18
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/maps
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/net v0.24.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/bpf
|
||||
|
||||
Reference in New Issue
Block a user