[PATCH RFC bpf-next 2/7] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP

Menglong Dong posted 7 patches 2 months, 3 weeks ago
There is a newer version of this series
[PATCH RFC bpf-next 2/7] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Menglong Dong 2 months, 3 weeks ago
Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
should use "jmp".

Meanwhile, adjust the direct call in the ftrace_regs_caller.

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
---
 arch/x86/Kconfig            |  1 +
 arch/x86/kernel/ftrace.c    |  7 ++++++-
 arch/x86/kernel/ftrace_64.S | 12 +++++++++++-
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fa3b616af03a..462250a20311 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -230,6 +230,7 @@ config X86
 	select HAVE_DYNAMIC_FTRACE_WITH_ARGS	if X86_64
 	select HAVE_FTRACE_REGS_HAVING_PT_REGS	if X86_64
 	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+	select HAVE_DYNAMIC_FTRACE_WITH_JMP	if X86_64
 	select HAVE_SAMPLE_FTRACE_DIRECT	if X86_64
 	select HAVE_SAMPLE_FTRACE_DIRECT_MULTI	if X86_64
 	select HAVE_EBPF_JIT
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4450acec9390..0543b57f54ee 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 	 * No need to translate into a callthunk. The trampoline does
 	 * the depth accounting itself.
 	 */
-	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+	if (ftrace_is_jmp(addr)) {
+		addr = ftrace_jmp_get(addr);
+		return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
+	} else {
+		return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+	}
 }
 
 static int ftrace_verify_code(unsigned long ip, const char *old_code)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 367da3638167..068242e9c857 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
 	ANNOTATE_NOENDBR
 	RET
 
+1:
+	testb	$1, %al
+	jz	2f
+	andq $0xfffffffffffffffe, %rax
+	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
+	restore_mcount_regs
+	/* Restore flags */
+	popfq
+	RET
+
 	/* Swap the flags with orig_rax */
-1:	movq MCOUNT_REG_SIZE(%rsp), %rdi
+2:	movq MCOUNT_REG_SIZE(%rsp), %rdi
 	movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
 	movq %rax, MCOUNT_REG_SIZE(%rsp)
 
-- 
2.51.2
Re: [PATCH RFC bpf-next 2/7] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Steven Rostedt 2 months, 3 weeks ago
On Fri, 14 Nov 2025 17:24:45 +0800
Menglong Dong <menglong8.dong@gmail.com> wrote:

> --- a/arch/x86/kernel/ftrace_64.S
> +++ b/arch/x86/kernel/ftrace_64.S
> @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
>  	ANNOTATE_NOENDBR
>  	RET
>  
> +1:
> +	testb	$1, %al
> +	jz	2f
> +	andq $0xfffffffffffffffe, %rax
> +	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
> +	restore_mcount_regs
> +	/* Restore flags */
> +	popfq
> +	RET
> +
>  	/* Swap the flags with orig_rax */
> -1:	movq MCOUNT_REG_SIZE(%rsp), %rdi
> +2:	movq MCOUNT_REG_SIZE(%rsp), %rdi
>  	movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
>  	movq %rax, MCOUNT_REG_SIZE(%rsp)
>  

So in this case we have:

 original_caller:
 call foo -> foo:
             call fentry -> fentry:
                            [do ftrace callbacks ]
                            move tramp_addr to stack
                            RET -> tramp_addr
                                            tramp_addr:
                                            [..]
                                            call foo_body -> foo_body:
                                                             [..]
                                                             RET -> back to tramp_addr
                                            [..]
                                            RET -> back to original_caller

I guess that looks balanced.

-- Steve
Re: [PATCH RFC bpf-next 2/7] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Menglong Dong 2 months, 3 weeks ago
On Sat, Nov 15, 2025 at 12:39 AM Steven Rostedt <rostedt@goodmis.org> wrote:
>
> On Fri, 14 Nov 2025 17:24:45 +0800
> Menglong Dong <menglong8.dong@gmail.com> wrote:
>
> > --- a/arch/x86/kernel/ftrace_64.S
> > +++ b/arch/x86/kernel/ftrace_64.S
> > @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
> >       ANNOTATE_NOENDBR
> >       RET
> >
> > +1:
> > +     testb   $1, %al
> > +     jz      2f
> > +     andq $0xfffffffffffffffe, %rax
> > +     movq %rax, MCOUNT_REG_SIZE+8(%rsp)
> > +     restore_mcount_regs
> > +     /* Restore flags */
> > +     popfq
> > +     RET
> > +
> >       /* Swap the flags with orig_rax */
> > -1:   movq MCOUNT_REG_SIZE(%rsp), %rdi
> > +2:   movq MCOUNT_REG_SIZE(%rsp), %rdi
> >       movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
> >       movq %rax, MCOUNT_REG_SIZE(%rsp)
> >
>
> So in this case we have:
>
>  original_caller:
>  call foo -> foo:
>              call fentry -> fentry:
>                             [do ftrace callbacks ]
>                             move tramp_addr to stack
>                             RET -> tramp_addr
>                                             tramp_addr:
>                                             [..]
>                                             call foo_body -> foo_body:
>                                                              [..]
>                                                              RET -> back to tramp_addr
>                                             [..]
>                                             RET -> back to original_caller

Nice flow chart, which I think we can put in the commit log.

>
> I guess that looks balanced.

Yes, it is balanced.

>
> -- Steve
>
>