[PATCH bpf-next v3 2/6] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP

Menglong Dong posted 6 patches 1 week, 6 days ago
[PATCH bpf-next v3 2/6] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Menglong Dong 1 week, 6 days ago
Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
should use "jmp".

Meanwhile, adjust the direct call in the ftrace_regs_caller. The RSB is
balanced in the "jmp" mode. Take the function "foo" for example:

 original_caller:
 call foo -> foo:
         call fentry -> fentry:
                 [do ftrace callbacks ]
                 move tramp_addr to stack
                 RET -> tramp_addr
                         tramp_addr:
                         [..]
                         call foo_body -> foo_body:
                                 [..]
                                 RET -> back to tramp_addr
                         [..]
                         RET -> back to original_caller

Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
---
 arch/x86/Kconfig            |  1 +
 arch/x86/kernel/ftrace.c    |  7 ++++++-
 arch/x86/kernel/ftrace_64.S | 12 +++++++++++-
 3 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fa3b616af03a..462250a20311 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -230,6 +230,7 @@ config X86
 	select HAVE_DYNAMIC_FTRACE_WITH_ARGS	if X86_64
 	select HAVE_FTRACE_REGS_HAVING_PT_REGS	if X86_64
 	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+	select HAVE_DYNAMIC_FTRACE_WITH_JMP	if X86_64
 	select HAVE_SAMPLE_FTRACE_DIRECT	if X86_64
 	select HAVE_SAMPLE_FTRACE_DIRECT_MULTI	if X86_64
 	select HAVE_EBPF_JIT
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4450acec9390..0543b57f54ee 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 	 * No need to translate into a callthunk. The trampoline does
 	 * the depth accounting itself.
 	 */
-	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+	if (ftrace_is_jmp(addr)) {
+		addr = ftrace_jmp_get(addr);
+		return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
+	} else {
+		return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
+	}
 }
 
 static int ftrace_verify_code(unsigned long ip, const char *old_code)
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 823dbdd0eb41..a132608265f6 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
 	ANNOTATE_NOENDBR
 	RET
 
+1:
+	testb	$1, %al
+	jz	2f
+	andq $0xfffffffffffffffe, %rax
+	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
+	restore_mcount_regs
+	/* Restore flags */
+	popfq
+	RET
+
 	/* Swap the flags with orig_rax */
-1:	movq MCOUNT_REG_SIZE(%rsp), %rdi
+2:	movq MCOUNT_REG_SIZE(%rsp), %rdi
 	movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
 	movq %rax, MCOUNT_REG_SIZE(%rsp)
 
-- 
2.51.2
Re: [PATCH bpf-next v3 2/6] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Jiri Olsa 1 week, 6 days ago
On Tue, Nov 18, 2025 at 08:36:30PM +0800, Menglong Dong wrote:
> Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
> we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
> should use "jmp".
> 
> Meanwhile, adjust the direct call in the ftrace_regs_caller. The RSB is
> balanced in the "jmp" mode. Take the function "foo" for example:
> 
>  original_caller:
>  call foo -> foo:
>          call fentry -> fentry:
>                  [do ftrace callbacks ]
>                  move tramp_addr to stack
>                  RET -> tramp_addr
>                          tramp_addr:
>                          [..]
>                          call foo_body -> foo_body:
>                                  [..]
>                                  RET -> back to tramp_addr
>                          [..]
>                          RET -> back to original_caller
> 
> Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> ---
>  arch/x86/Kconfig            |  1 +
>  arch/x86/kernel/ftrace.c    |  7 ++++++-
>  arch/x86/kernel/ftrace_64.S | 12 +++++++++++-
>  3 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index fa3b616af03a..462250a20311 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -230,6 +230,7 @@ config X86
>  	select HAVE_DYNAMIC_FTRACE_WITH_ARGS	if X86_64
>  	select HAVE_FTRACE_REGS_HAVING_PT_REGS	if X86_64
>  	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
> +	select HAVE_DYNAMIC_FTRACE_WITH_JMP	if X86_64
>  	select HAVE_SAMPLE_FTRACE_DIRECT	if X86_64
>  	select HAVE_SAMPLE_FTRACE_DIRECT_MULTI	if X86_64
>  	select HAVE_EBPF_JIT
> diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
> index 4450acec9390..0543b57f54ee 100644
> --- a/arch/x86/kernel/ftrace.c
> +++ b/arch/x86/kernel/ftrace.c
> @@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
>  	 * No need to translate into a callthunk. The trampoline does
>  	 * the depth accounting itself.
>  	 */
> -	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
> +	if (ftrace_is_jmp(addr)) {
> +		addr = ftrace_jmp_get(addr);
> +		return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
> +	} else {
> +		return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
> +	}
>  }
>  
>  static int ftrace_verify_code(unsigned long ip, const char *old_code)
> diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
> index 823dbdd0eb41..a132608265f6 100644
> --- a/arch/x86/kernel/ftrace_64.S
> +++ b/arch/x86/kernel/ftrace_64.S
> @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
>  	ANNOTATE_NOENDBR
>  	RET
>  
> +1:
> +	testb	$1, %al
> +	jz	2f
> +	andq $0xfffffffffffffffe, %rax
> +	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
> +	restore_mcount_regs
> +	/* Restore flags */
> +	popfq
> +	RET

is this hunk the reason for the 0x1 jmp-bit you set in the address?

I wonder if we introduced new flag in dyn_ftrace::flags for this,
then we'd need to have extra ftrace trampoline for jmp ftrace_ops

jirka
Re: [PATCH bpf-next v3 2/6] x86/ftrace: implement DYNAMIC_FTRACE_WITH_JMP
Posted by Menglong Dong 1 week, 6 days ago
On 2025/11/19 06:01, Jiri Olsa wrote:
> On Tue, Nov 18, 2025 at 08:36:30PM +0800, Menglong Dong wrote:
> > Implement the DYNAMIC_FTRACE_WITH_JMP for x86_64. In ftrace_call_replace,
> > we will use JMP32_INSN_OPCODE instead of CALL_INSN_OPCODE if the address
> > should use "jmp".
> > 
> > Meanwhile, adjust the direct call in the ftrace_regs_caller. The RSB is
> > balanced in the "jmp" mode. Take the function "foo" for example:
> > 
> >  original_caller:
> >  call foo -> foo:
> >          call fentry -> fentry:
> >                  [do ftrace callbacks ]
> >                  move tramp_addr to stack
> >                  RET -> tramp_addr
> >                          tramp_addr:
> >                          [..]
> >                          call foo_body -> foo_body:
> >                                  [..]
> >                                  RET -> back to tramp_addr
> >                          [..]
> >                          RET -> back to original_caller
> > 
> > Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
> > ---
> >  arch/x86/Kconfig            |  1 +
> >  arch/x86/kernel/ftrace.c    |  7 ++++++-
> >  arch/x86/kernel/ftrace_64.S | 12 +++++++++++-
> >  3 files changed, 18 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> > index fa3b616af03a..462250a20311 100644
> > --- a/arch/x86/Kconfig
> > +++ b/arch/x86/Kconfig
> > @@ -230,6 +230,7 @@ config X86
> >  	select HAVE_DYNAMIC_FTRACE_WITH_ARGS	if X86_64
> >  	select HAVE_FTRACE_REGS_HAVING_PT_REGS	if X86_64
> >  	select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
> > +	select HAVE_DYNAMIC_FTRACE_WITH_JMP	if X86_64
> >  	select HAVE_SAMPLE_FTRACE_DIRECT	if X86_64
> >  	select HAVE_SAMPLE_FTRACE_DIRECT_MULTI	if X86_64
> >  	select HAVE_EBPF_JIT
> > diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
> > index 4450acec9390..0543b57f54ee 100644
> > --- a/arch/x86/kernel/ftrace.c
> > +++ b/arch/x86/kernel/ftrace.c
> > @@ -74,7 +74,12 @@ static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
> >  	 * No need to translate into a callthunk. The trampoline does
> >  	 * the depth accounting itself.
> >  	 */
> > -	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
> > +	if (ftrace_is_jmp(addr)) {
> > +		addr = ftrace_jmp_get(addr);
> > +		return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
> > +	} else {
> > +		return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
> > +	}
> >  }
> >  
> >  static int ftrace_verify_code(unsigned long ip, const char *old_code)
> > diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
> > index 823dbdd0eb41..a132608265f6 100644
> > --- a/arch/x86/kernel/ftrace_64.S
> > +++ b/arch/x86/kernel/ftrace_64.S
> > @@ -285,8 +285,18 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
> >  	ANNOTATE_NOENDBR
> >  	RET
> >  
> > +1:
> > +	testb	$1, %al
> > +	jz	2f
> > +	andq $0xfffffffffffffffe, %rax
> > +	movq %rax, MCOUNT_REG_SIZE+8(%rsp)
> > +	restore_mcount_regs
> > +	/* Restore flags */
> > +	popfq
> > +	RET
> 
> is this hunk the reason for the 0x1 jmp-bit you set in the address?

Exactly!

> 
> I wonder if we introduced new flag in dyn_ftrace::flags for this,
> then we'd need to have extra ftrace trampoline for jmp ftrace_ops

We don't introduce new dyn_ftrace::flags. I tried to introduce
FTRACE_FL_JMP and FTRACE_FL_JMP_EN for this propose before I
added the jmp-bit to the address. It's hard to do it this way.

First, we need to introduce a ftrace_regs_jmp_caller, which will
be used for the "jmp" mode. However, it's difficult when we need
to change a call_address to jmp_address in __modify_ftrace_direct(),
as it will change the "entry->direct" directly. And maybe we need
reconstruct the direct call to implement it this way.

I were almost giving up before I thought the jmp-bit, which allow
us the update the address from call mode to jmp mode atomically.

Thanks!
Menglong Dong

> 
> jirka
> 
>