Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
to obtain better performance. The instruction we use here is:
65 48 8B 04 25 [offset] // mov rax, gs:[offset]
Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
---
v2:
- check the variable type in emit_ldx_percpu_r0 with __verify_pcpu_ptr
- remove the usage of const_current_task
---
arch/x86/net/bpf_jit_comp.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e3b1c4b1d550..f5ff7c77aad7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1300,6 +1300,25 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
}
+static void __emit_ldx_percpu_r0(u8 **pprog, __force unsigned long ptr)
+{
+ u8 *prog = *pprog;
+
+ /* mov rax, gs:[ptr] */
+ EMIT2(0x65, 0x48);
+ EMIT2(0x8B, 0x04);
+ EMIT1(0x25);
+ EMIT((u32)ptr, 4);
+
+ *pprog = prog;
+}
+
+#define emit_ldx_percpu_r0(prog, variable) \
+ do { \
+ __verify_pcpu_ptr(&(variable)); \
+ __emit_ldx_percpu_r0(&prog, (__force unsigned long)&(variable));\
+ } while (0)
+
static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
@@ -2441,6 +2460,12 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL: {
u8 *ip = image + addrs[i - 1];
+ if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task ||
+ insn->imm == BPF_FUNC_get_current_task_btf)) {
+ emit_ldx_percpu_r0(prog, current_task);
+ break;
+ }
+
func = (u8 *) __bpf_call_base + imm32;
if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
LOAD_TAIL_CALL_CNT_PTR(stack_depth);
@@ -4082,3 +4107,14 @@ bool bpf_jit_supports_timed_may_goto(void)
{
return true;
}
+
+bool bpf_jit_inlines_helper_call(s32 imm)
+{
+ switch (imm) {
+ case BPF_FUNC_get_current_task:
+ case BPF_FUNC_get_current_task_btf:
+ return true;
+ default:
+ return false;
+ }
+}
--
2.52.0
On Sun, Jan 4, 2026 at 5:17 AM Menglong Dong <menglong8.dong@gmail.com> wrote:
>
> Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
> to obtain better performance. The instruction we use here is:
>
> 65 48 8B 04 25 [offset] // mov rax, gs:[offset]
>
> Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> ---
> v2:
> - check the variable type in emit_ldx_percpu_r0 with __verify_pcpu_ptr
> - remove the usage of const_current_task
> ---
> arch/x86/net/bpf_jit_comp.c | 36 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 36 insertions(+)
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index e3b1c4b1d550..f5ff7c77aad7 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -1300,6 +1300,25 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
> emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
> }
>
> +static void __emit_ldx_percpu_r0(u8 **pprog, __force unsigned long ptr)
> +{
> + u8 *prog = *pprog;
> +
> + /* mov rax, gs:[ptr] */
> + EMIT2(0x65, 0x48);
> + EMIT2(0x8B, 0x04);
> + EMIT1(0x25);
> + EMIT((u32)ptr, 4);
> +
> + *pprog = prog;
> +}
Why asm?
Let's use BPF_MOV64_PERCPU_REG() similar to the way
BPF_FUNC_get_smp_processor_id inlining is handled.
pw-bot: cr
On Sun, 2026-01-04 at 21:16 +0800, Menglong Dong wrote:
> Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
> to obtain better performance. The instruction we use here is:
>
> 65 48 8B 04 25 [offset] // mov rax, gs:[offset]
>
> Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> ---
> v2:
> - check the variable type in emit_ldx_percpu_r0 with __verify_pcpu_ptr
> - remove the usage of const_current_task
> ---
> arch/x86/net/bpf_jit_comp.c | 36 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 36 insertions(+)
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index e3b1c4b1d550..f5ff7c77aad7 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -1300,6 +1300,25 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
> emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
> }
>
> +static void __emit_ldx_percpu_r0(u8 **pprog, __force unsigned long ptr)
> +{
> + u8 *prog = *pprog;
> +
> + /* mov rax, gs:[ptr] */
> + EMIT2(0x65, 0x48);
> + EMIT2(0x8B, 0x04);
> + EMIT1(0x25);
> + EMIT((u32)ptr, 4);
> +
> + *pprog = prog;
> +}
> +
> +#define emit_ldx_percpu_r0(prog, variable) \
> + do { \
> + __verify_pcpu_ptr(&(variable)); \
> + __emit_ldx_percpu_r0(&prog, (__force unsigned long)&(variable));\
> + } while (0)
> +
> static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
> u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
> {
> @@ -2441,6 +2460,12 @@ st: if (is_imm8(insn->off))
> case BPF_JMP | BPF_CALL: {
> u8 *ip = image + addrs[i - 1];
>
> + if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task ||
> + insn->imm == BPF_FUNC_get_current_task_btf)) {
I think this should be guarded by IS_ENABLED(CONFIG_SMP).
The current.h:get_current() used
arch/x86/include/asm/percpu.h:this_cpu_read_stable() that is unrolled
to __raw_cpu_read_stable(), which uses __force_percpu_arg(), which uses
__force_percpu_prefix, which is defined differently depending on CONFIG_SMP.
> + emit_ldx_percpu_r0(prog, current_task);
> + break;
> + }
> +
> func = (u8 *) __bpf_call_base + imm32;
> if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
> LOAD_TAIL_CALL_CNT_PTR(stack_depth);
> @@ -4082,3 +4107,14 @@ bool bpf_jit_supports_timed_may_goto(void)
> {
> return true;
> }
> +
> +bool bpf_jit_inlines_helper_call(s32 imm)
> +{
> + switch (imm) {
> + case BPF_FUNC_get_current_task:
> + case BPF_FUNC_get_current_task_btf:
> + return true;
> + default:
> + return false;
> + }
> +}
On 2026/1/6 01:45 Eduard Zingerman <eddyz87@gmail.com> write:
> On Sun, 2026-01-04 at 21:16 +0800, Menglong Dong wrote:
> > Inline bpf_get_current_task() and bpf_get_current_task_btf() for x86_64
> > to obtain better performance. The instruction we use here is:
> >
> > 65 48 8B 04 25 [offset] // mov rax, gs:[offset]
> >
> > Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> > ---
> > v2:
> > - check the variable type in emit_ldx_percpu_r0 with __verify_pcpu_ptr
> > - remove the usage of const_current_task
> > ---
> > arch/x86/net/bpf_jit_comp.c | 36 ++++++++++++++++++++++++++++++++++++
> > 1 file changed, 36 insertions(+)
> >
> > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> > index e3b1c4b1d550..f5ff7c77aad7 100644
> > --- a/arch/x86/net/bpf_jit_comp.c
> > +++ b/arch/x86/net/bpf_jit_comp.c
> > @@ -1300,6 +1300,25 @@ static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
> > emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
> > }
> >
> > +static void __emit_ldx_percpu_r0(u8 **pprog, __force unsigned long ptr)
> > +{
> > + u8 *prog = *pprog;
> > +
> > + /* mov rax, gs:[ptr] */
> > + EMIT2(0x65, 0x48);
> > + EMIT2(0x8B, 0x04);
> > + EMIT1(0x25);
> > + EMIT((u32)ptr, 4);
> > +
> > + *pprog = prog;
> > +}
> > +
> > +#define emit_ldx_percpu_r0(prog, variable) \
> > + do { \
> > + __verify_pcpu_ptr(&(variable)); \
> > + __emit_ldx_percpu_r0(&prog, (__force unsigned long)&(variable));\
> > + } while (0)
> > +
> > static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
> > u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
> > {
> > @@ -2441,6 +2460,12 @@ st: if (is_imm8(insn->off))
> > case BPF_JMP | BPF_CALL: {
> > u8 *ip = image + addrs[i - 1];
> >
> > + if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task ||
> > + insn->imm == BPF_FUNC_get_current_task_btf)) {
>
> I think this should be guarded by IS_ENABLED(CONFIG_SMP).
> The current.h:get_current() used
> arch/x86/include/asm/percpu.h:this_cpu_read_stable() that is unrolled
> to __raw_cpu_read_stable(), which uses __force_percpu_arg(), which uses
> __force_percpu_prefix, which is defined differently depending on CONFIG_SMP.
Yeah, I missed this part. I'll use BPF_MOV64_PERCPU_REG() in
the next version, which should avoid this problem.
Thanks!
Menglong Dong
>
> > + emit_ldx_percpu_r0(prog, current_task);
> > + break;
> > + }
> > +
> > func = (u8 *) __bpf_call_base + imm32;
> > if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
> > LOAD_TAIL_CALL_CNT_PTR(stack_depth);
> > @@ -4082,3 +4107,14 @@ bool bpf_jit_supports_timed_may_goto(void)
> > {
> > return true;
> > }
> > +
> > +bool bpf_jit_inlines_helper_call(s32 imm)
> > +{
> > + switch (imm) {
> > + case BPF_FUNC_get_current_task:
> > + case BPF_FUNC_get_current_task_btf:
> > + return true;
> > + default:
> > + return false;
> > + }
> > +}
>
© 2016 - 2026 Red Hat, Inc.