Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
to obtain better performance.
In !CONFIG_SMP case, the percpu variable is just a normal variable, and
we can read the current_task directly.
Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
---
v5:
- don't support the !CONFIG_SMP case
v4:
- handle the !CONFIG_SMP case
v3:
- implement it in the verifier with BPF_MOV64_PERCPU_REG() instead of in
x86_64 JIT.
---
kernel/bpf/verifier.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9de0ec0c3ed9..c4e2ffadfb1f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -17739,6 +17739,10 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
switch (imm) {
#ifdef CONFIG_X86_64
case BPF_FUNC_get_smp_processor_id:
+#ifdef CONFIG_SMP
+ case BPF_FUNC_get_current_task_btf:
+ case BPF_FUNC_get_current_task:
+#endif
return env->prog->jit_requested && bpf_jit_supports_percpu_insn();
#endif
default:
@@ -23319,6 +23323,24 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
insn = new_prog->insnsi + i + delta;
goto next_insn;
}
+
+ /* Implement bpf_get_current_task() and bpf_get_current_task_btf() inline. */
+ if ((insn->imm == BPF_FUNC_get_current_task || insn->imm == BPF_FUNC_get_current_task_btf) &&
+ verifier_inlines_helper_call(env, insn->imm)) {
+ insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)¤t_task);
+ insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
+ insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
+ cnt = 3;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
#endif
/* Implement bpf_get_func_arg inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
--
2.52.0
On Mon, 2026-01-19 at 15:02 +0800, Menglong Dong wrote:
> Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
^^^^^^
Nit: this change is no longer x86 specific.
> to obtain better performance.
>
> In !CONFIG_SMP case, the percpu variable is just a normal variable, and
> we can read the current_task directly.
>
> Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
[...]
> @@ -23319,6 +23323,24 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
> insn = new_prog->insnsi + i + delta;
> goto next_insn;
> }
> +
> + /* Implement bpf_get_current_task() and bpf_get_current_task_btf() inline. */
> + if ((insn->imm == BPF_FUNC_get_current_task || insn->imm == BPF_FUNC_get_current_task_btf) &&
> + verifier_inlines_helper_call(env, insn->imm)) {
> + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)¤t_task);
> + insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
> + insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
^^^^^^^
Silly question:
This assumes pointer size of 8 bytes.
Which is true for all archs where bpf_jit_supports_percpu_insn()
returns true at the moment. Do we want an additional safety check
here?
> + cnt = 3;
> +
> + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
> + if (!new_prog)
> + return -ENOMEM;
> +
> + delta += cnt - 1;
> + env->prog = prog = new_prog;
> + insn = new_prog->insnsi + i + delta;
> + goto next_insn;
> + }
> #endif
> /* Implement bpf_get_func_arg inline. */
> if (prog_type == BPF_PROG_TYPE_TRACING &&
© 2016 - 2026 Red Hat, Inc.