User space needs access to kernel BTF for many modern features of BPF.
Right now each process needs to read the BTF blob either in pieces or
as a whole. Allow mmaping the sysfs file so that processes can directly
access the memory allocated for it in the kernel.
Signed-off-by: Lorenz Bauer <lmb@isovalent.com>
---
include/asm-generic/vmlinux.lds.h | 3 ++-
kernel/bpf/sysfs_btf.c | 36 ++++++++++++++++++++++++++++++++++--
2 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 58a635a6d5bdf0c53c267c2a3d21a5ed8678ce73..1750390735fac7637cc4d2fa05f96cb2a36aa448 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
*/
#ifdef CONFIG_DEBUG_INFO_BTF
#define BTF \
+ . = ALIGN(PAGE_SIZE); \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
BOUNDED_SECTION_BY(.BTF, _BTF) \
} \
- . = ALIGN(4); \
+ . = ALIGN(PAGE_SIZE); \
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
*(.BTF_ids) \
}
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index 81d6cf90584a7157929c50f62a5c6862e7a3d081..f4b59b1c2e5b11ffffa80662ad39334c730019ee 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -7,18 +7,50 @@
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/sysfs.h>
+#include <linux/mm.h>
+#include <linux/io.h>
/* See scripts/link-vmlinux.sh, gen_btf() func for details */
extern char __start_BTF[];
extern char __stop_BTF[];
+struct kobject *btf_kobj;
+
+static int btf_vmlinux_mmap(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ phys_addr_t start = virt_to_phys(__start_BTF);
+ size_t btf_size = __stop_BTF - __start_BTF;
+ size_t vm_size = vma->vm_end - vma->vm_start;
+ unsigned long pfn = start >> PAGE_SHIFT;
+ unsigned long pages = PAGE_ALIGN(btf_size) >> PAGE_SHIFT;
+
+ if (kobj != btf_kobj)
+ return -EINVAL;
+
+ if (vma->vm_pgoff)
+ return -EINVAL;
+
+ if (vma->vm_flags & (VM_WRITE|VM_EXEC|VM_MAYSHARE))
+ return -EACCES;
+
+ if (pfn + pages < pfn)
+ return -EINVAL;
+
+ if (vm_size >> PAGE_SHIFT > pages)
+ return -EINVAL;
+
+ vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC|VM_MAYWRITE);
+ return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
+}
+
static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
.attr = { .name = "vmlinux", .mode = 0444, },
.read_new = sysfs_bin_attr_simple_read,
+ .mmap = btf_vmlinux_mmap,
};
-struct kobject *btf_kobj;
-
static int __init btf_vmlinux_init(void)
{
bin_attr_btf_vmlinux.private = __start_BTF;
--
2.49.0
On Fri, May 2, 2025 at 3:20 AM Lorenz Bauer <lmb@isovalent.com> wrote:
>
> User space needs access to kernel BTF for many modern features of BPF.
> Right now each process needs to read the BTF blob either in pieces or
> as a whole. Allow mmaping the sysfs file so that processes can directly
> access the memory allocated for it in the kernel.
>
> Signed-off-by: Lorenz Bauer <lmb@isovalent.com>
> ---
> include/asm-generic/vmlinux.lds.h | 3 ++-
> kernel/bpf/sysfs_btf.c | 36 ++++++++++++++++++++++++++++++++++--
> 2 files changed, 36 insertions(+), 3 deletions(-)
>
> diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
> index 58a635a6d5bdf0c53c267c2a3d21a5ed8678ce73..1750390735fac7637cc4d2fa05f96cb2a36aa448 100644
> --- a/include/asm-generic/vmlinux.lds.h
> +++ b/include/asm-generic/vmlinux.lds.h
> @@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
> */
> #ifdef CONFIG_DEBUG_INFO_BTF
> #define BTF \
> + . = ALIGN(PAGE_SIZE); \
> .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
> BOUNDED_SECTION_BY(.BTF, _BTF) \
> } \
> - . = ALIGN(4); \
> + . = ALIGN(PAGE_SIZE); \
> .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
> *(.BTF_ids) \
> }
> diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
> index 81d6cf90584a7157929c50f62a5c6862e7a3d081..f4b59b1c2e5b11ffffa80662ad39334c730019ee 100644
> --- a/kernel/bpf/sysfs_btf.c
> +++ b/kernel/bpf/sysfs_btf.c
> @@ -7,18 +7,50 @@
> #include <linux/kobject.h>
> #include <linux/init.h>
> #include <linux/sysfs.h>
> +#include <linux/mm.h>
> +#include <linux/io.h>
>
> /* See scripts/link-vmlinux.sh, gen_btf() func for details */
> extern char __start_BTF[];
> extern char __stop_BTF[];
>
> +struct kobject *btf_kobj;
> +
> +static int btf_vmlinux_mmap(struct file *filp, struct kobject *kobj,
> + const struct bin_attribute *attr,
> + struct vm_area_struct *vma)
> +{
> + phys_addr_t start = virt_to_phys(__start_BTF);
> + size_t btf_size = __stop_BTF - __start_BTF;
> + size_t vm_size = vma->vm_end - vma->vm_start;
> + unsigned long pfn = start >> PAGE_SHIFT;
> + unsigned long pages = PAGE_ALIGN(btf_size) >> PAGE_SHIFT;
> +
> + if (kobj != btf_kobj)
> + return -EINVAL;
> +
> + if (vma->vm_pgoff)
> + return -EINVAL;
> +
> + if (vma->vm_flags & (VM_WRITE|VM_EXEC|VM_MAYSHARE))
> + return -EACCES;
> +
> + if (pfn + pages < pfn)
> + return -EINVAL;
> +
> + if (vm_size >> PAGE_SHIFT > pages)
> + return -EINVAL;
> +
> + vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC|VM_MAYWRITE);
> + return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
remap_pfn_range() should be avoided.
See big comment in kernel/events/core.c in map_range().
The following seems to work:
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index f4b59b1c2e5b..7d0fd28070d8 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -20,13 +20,13 @@ static int btf_vmlinux_mmap(struct file *filp,
struct kobject *kobj,
const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
- phys_addr_t start = virt_to_phys(__start_BTF);
+ unsigned long addr = (unsigned long)__start_BTF;
size_t btf_size = __stop_BTF - __start_BTF;
size_t vm_size = vma->vm_end - vma->vm_start;
- unsigned long pfn = start >> PAGE_SHIFT;
unsigned long pages = PAGE_ALIGN(btf_size) >> PAGE_SHIFT;
+ int i, err = 0;
- if (kobj != btf_kobj)
+ if (kobj != btf_kobj || !pages)
return -EINVAL;
if (vma->vm_pgoff)
@@ -35,14 +35,17 @@ static int btf_vmlinux_mmap(struct file *filp,
struct kobject *kobj,
if (vma->vm_flags & (VM_WRITE|VM_EXEC|VM_MAYSHARE))
return -EACCES;
- if (pfn + pages < pfn)
- return -EINVAL;
-
if (vm_size >> PAGE_SHIFT > pages)
return -EINVAL;
vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC|VM_MAYWRITE);
- return remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
vma->vm_page_prot);
+
+ for (i = 0; i < pages && !err; i++, addr += PAGE_SIZE)
+ err = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ virt_to_page(addr));
+ if (err)
+ zap_page_range_single(vma, vma->vm_start, pages *
PAGE_SIZE, NULL);
+ return err;
}
Great that you added:
/* Check padding is zeroed */
for (int i = 0; i < trailing; i++) {
if (((__u8 *)raw_data)[btf_size + i] != 0) {
PRINT_FAIL("tail of BTF is not zero at page
offset %d\n", i);
goto cleanup;
}
}
but this part is puzzling:
trailing = page_size - (btf_size % page_size) % page_size;
On Fri, May 2, 2025 at 6:15 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
> remap_pfn_range() should be avoided.
> See big comment in kernel/events/core.c in map_range().
>
> The following seems to work:
Thanks, this helped a lot.
> but this part is puzzling:
> trailing = page_size - (btf_size % page_size) % page_size;
The intention is to calculate how many bytes of trailing zeroes to
expect while accounting for the case where btf_size % page_size == 0.
I could replace this with a check
end = btf_size + (page_size - 1) / page_size * page_size;
for (i = btf_size; i < end; i++) ...
Better?
In the meantime I've looked at allowing mmap of kmods. I'm not sure
it's worth the effort:
1. Allocations of btf->data in btf_parse_module() would have to use
vmalloc_user() so that allocations are page aligned and zeroed
appropriately. This will be a bit more expensive on systems with large
pages and / or many small kmod BTFs. We could only allow mmap of BTF
>= PAGE_SIZE, at additional complexity.
2. We need to hold a refcount on struct btf for each mmapped kernel
module, so that btf->data doesn't get freed. Taking the refcount can
happen in the sysfs mmap handler, but dropping it is tricky. kernfs /
sysfs doesn't allow using vm_ops->close (see kernfs_fop_mmap). It
seems possible to use struct kernfs_ops->release(), but I don't
understand at all how that deals with multiple mmaps of the same file
in a single process. Also makes me wonder what happens when a process
mmaps the kmod BTF, the module is unloaded and then the process
attempts to access the mmap. My cursory understanding is that this
would raise a fault, which isn't great at all.
If nobody objects / has solutions I'll send a v3 of my original patch
with reviews addressed but without being able to mmap kmods.
Thanks
Lorenz
On Mon, May 5, 2025 at 7:37 AM Lorenz Bauer <lmb@isovalent.com> wrote: > > On Fri, May 2, 2025 at 6:15 PM Alexei Starovoitov > <alexei.starovoitov@gmail.com> wrote: > > remap_pfn_range() should be avoided. > > See big comment in kernel/events/core.c in map_range(). > > > > The following seems to work: > > Thanks, this helped a lot. > > > but this part is puzzling: > > trailing = page_size - (btf_size % page_size) % page_size; > > The intention is to calculate how many bytes of trailing zeroes to > expect while accounting for the case where btf_size % page_size == 0. Well, if it was: trailing = page_size - (btf_size % page_size); then it would be clear. Extra '% page_size' makes it odd. > I could replace this with a check > > end = btf_size + (page_size - 1) / page_size * page_size; it's equivalent to end = btf_size; '(page_size - 1) / page_size' is always zero. > for (i = btf_size; i < end; i++) ... > > Better? > > In the meantime I've looked at allowing mmap of kmods. I'm not sure > it's worth the effort: > > 1. Allocations of btf->data in btf_parse_module() would have to use > vmalloc_user() so that allocations are page aligned and zeroed > appropriately. This will be a bit more expensive on systems with large > pages and / or many small kmod BTFs. since we kvmemdup(BTF seciton) now anyway, making it vmalloc-ed isn't a big deal. > We could only allow mmap of BTF > >= PAGE_SIZE, at additional complexity. I wouldn't go this route. Too much special casing for user space. Unless you mean that 'if (btf_size < PAGE_SIZE) dont_vmalloc' will be the kernel internal decision that is invisible to user space and libbpf-like libraries would try to mmap first anyway and always fallback to reading ? > 2. We need to hold a refcount on struct btf for each mmapped kernel > module, so that btf->data doesn't get freed. Taking the refcount can > happen in the sysfs mmap handler, but dropping it is tricky. kernfs / > sysfs doesn't allow using vm_ops->close (see kernfs_fop_mmap). It > seems possible to use struct kernfs_ops->release(), but I don't > understand at all how that deals with multiple mmaps of the same file > in a single process. Also makes me wonder what happens when a process > mmaps the kmod BTF, the module is unloaded and then the process > attempts to access the mmap. My cursory understanding is that this > would raise a fault, which isn't great at all. that gets tricky indeed. > > If nobody objects / has solutions I'll send a v3 of my original patch > with reviews addressed but without being able to mmap kmods. Makes sense to me. We can always follow up.
On Fri, May 2, 2025 at 11:20 AM Lorenz Bauer <lmb@isovalent.com> wrote: > > User space needs access to kernel BTF for many modern features of BPF. > Right now each process needs to read the BTF blob either in pieces or > as a whole. Allow mmaping the sysfs file so that processes can directly > access the memory allocated for it in the kernel. I just realised that there is also code which exposes module BTF via sysfs, which my code currently doesn't handle. I'll send a v3.
© 2016 - 2026 Red Hat, Inc.