Introduce a bpf_has_frame_pointer() helper that unwinders can call to
determine whether a given instruction pointer is within the valid frame
pointer region of a BPF JIT program or trampoline (i.e., after the
prologue, before the epilogue).
This will enable livepatch (with the ORC unwinder) to reliably unwind
through BPF JIT frames.
Acked-by: Song Liu <song@kernel.org>
Acked-and-tested-by: Andrey Grodzovsky<andrey.grodzovsky@crowdstrike.com>
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/net/bpf_jit_comp.c | 12 ++++++++++++
include/linux/bpf.h | 3 +++
kernel/bpf/core.c | 16 ++++++++++++++++
3 files changed, 31 insertions(+)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index de5083cb1d37..3ec4fa94086a 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1661,6 +1661,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
emit_prologue(&prog, image, stack_depth,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
+
+ bpf_prog->aux->ksym.fp_start = prog - temp;
+
/* Exception callback will clobber callee regs for its own use, and
* restore the original callee regs from main prog's stack frame.
*/
@@ -2716,6 +2719,8 @@ st: if (is_imm8(insn->off))
pop_r12(&prog);
}
EMIT1(0xC9); /* leave */
+ bpf_prog->aux->ksym.fp_end = prog - temp;
+
emit_return(&prog, image + addrs[i - 1] + (prog - temp));
break;
@@ -3299,6 +3304,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
}
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
+ if (im)
+ im->ksym.fp_start = prog - (u8 *)rw_image;
+
if (!is_imm8(stack_size)) {
/* sub rsp, stack_size */
EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
@@ -3436,7 +3444,11 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
+
EMIT1(0xC9); /* leave */
+ if (im)
+ im->ksym.fp_end = prog - (u8 *)rw_image;
+
if (flags & BPF_TRAMP_F_SKIP_FRAME) {
/* skip our return address and return to parent */
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d808253f2e94..e3f56e8443da 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1257,6 +1257,8 @@ struct bpf_ksym {
struct list_head lnode;
struct latch_tree_node tnode;
bool prog;
+ u32 fp_start;
+ u32 fp_end;
};
enum bpf_tramp_prog_type {
@@ -1483,6 +1485,7 @@ void bpf_image_ksym_add(struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
+bool bpf_has_frame_pointer(unsigned long ip);
int bpf_jit_charge_modmem(u32 size);
void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d595fe512498..7cd8382d1152 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -760,6 +760,22 @@ struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
NULL;
}
+bool bpf_has_frame_pointer(unsigned long ip)
+{
+ struct bpf_ksym *ksym;
+ unsigned long offset;
+
+ guard(rcu)();
+
+ ksym = bpf_ksym_find(ip);
+ if (!ksym || !ksym->fp_start || !ksym->fp_end)
+ return false;
+
+ offset = ip - ksym->start;
+
+ return offset >= ksym->fp_start && offset < ksym->fp_end;
+}
+
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
--
2.51.1
On Wed, Dec 03, 2025 at 07:32:15PM -0800, Josh Poimboeuf wrote:
> Introduce a bpf_has_frame_pointer() helper that unwinders can call to
> determine whether a given instruction pointer is within the valid frame
> pointer region of a BPF JIT program or trampoline (i.e., after the
> prologue, before the epilogue).
>
> This will enable livepatch (with the ORC unwinder) to reliably unwind
> through BPF JIT frames.
>
> Acked-by: Song Liu <song@kernel.org>
> Acked-and-tested-by: Andrey Grodzovsky<andrey.grodzovsky@crowdstrike.com>
> Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
> ---
> arch/x86/net/bpf_jit_comp.c | 12 ++++++++++++
> include/linux/bpf.h | 3 +++
> kernel/bpf/core.c | 16 ++++++++++++++++
> 3 files changed, 31 insertions(+)
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index de5083cb1d37..3ec4fa94086a 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -1661,6 +1661,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
> emit_prologue(&prog, image, stack_depth,
> bpf_prog_was_classic(bpf_prog), tail_call_reachable,
> bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
> +
> + bpf_prog->aux->ksym.fp_start = prog - temp;
> +
> /* Exception callback will clobber callee regs for its own use, and
> * restore the original callee regs from main prog's stack frame.
> */
> @@ -2716,6 +2719,8 @@ st: if (is_imm8(insn->off))
> pop_r12(&prog);
> }
> EMIT1(0xC9); /* leave */
> + bpf_prog->aux->ksym.fp_end = prog - temp;
> +
> emit_return(&prog, image + addrs[i - 1] + (prog - temp));
> break;
>
> @@ -3299,6 +3304,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
> }
> EMIT1(0x55); /* push rbp */
> EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
> + if (im)
> + im->ksym.fp_start = prog - (u8 *)rw_image;
> +
> if (!is_imm8(stack_size)) {
> /* sub rsp, stack_size */
> EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
> @@ -3436,7 +3444,11 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
> emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
>
> emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
> +
> EMIT1(0xC9); /* leave */
> + if (im)
> + im->ksym.fp_end = prog - (u8 *)rw_image;
is the null check needed? there are other places in the function that
use 'im' without that
thanks,
jirka
> +
> if (flags & BPF_TRAMP_F_SKIP_FRAME) {
> /* skip our return address and return to parent */
> EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index d808253f2e94..e3f56e8443da 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1257,6 +1257,8 @@ struct bpf_ksym {
> struct list_head lnode;
> struct latch_tree_node tnode;
> bool prog;
> + u32 fp_start;
> + u32 fp_end;
> };
>
> enum bpf_tramp_prog_type {
> @@ -1483,6 +1485,7 @@ void bpf_image_ksym_add(struct bpf_ksym *ksym);
> void bpf_image_ksym_del(struct bpf_ksym *ksym);
> void bpf_ksym_add(struct bpf_ksym *ksym);
> void bpf_ksym_del(struct bpf_ksym *ksym);
> +bool bpf_has_frame_pointer(unsigned long ip);
> int bpf_jit_charge_modmem(u32 size);
> void bpf_jit_uncharge_modmem(u32 size);
> bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index d595fe512498..7cd8382d1152 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -760,6 +760,22 @@ struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
> NULL;
> }
>
> +bool bpf_has_frame_pointer(unsigned long ip)
> +{
> + struct bpf_ksym *ksym;
> + unsigned long offset;
> +
> + guard(rcu)();
> +
> + ksym = bpf_ksym_find(ip);
> + if (!ksym || !ksym->fp_start || !ksym->fp_end)
> + return false;
> +
> + offset = ip - ksym->start;
> +
> + return offset >= ksym->fp_start && offset < ksym->fp_end;
> +}
> +
> const struct exception_table_entry *search_bpf_extables(unsigned long addr)
> {
> const struct exception_table_entry *e = NULL;
> --
> 2.51.1
>
>
On Thu, Dec 04, 2025 at 03:04:23PM +0100, Jiri Olsa wrote: > On Wed, Dec 03, 2025 at 07:32:15PM -0800, Josh Poimboeuf wrote: > > EMIT1(0xC9); /* leave */ > > + if (im) > > + im->ksym.fp_end = prog - (u8 *)rw_image; > > is the null check needed? there are other places in the function that > use 'im' without that That was a NULL pointer dereference found by BPF CI. bpf_struct_ops_prepare_trampoline() calls arch_prepare_bpf_trampoline() with NULL im. -- Josh
© 2016 - 2025 Red Hat, Inc.