[PATCH v8 05/12] arm64: syscall: Rework el0_svc_common()

Jinjie Ruan posted 12 patches 5 days, 16 hours ago
[PATCH v8 05/12] arm64: syscall: Rework el0_svc_common()
Posted by Jinjie Ruan 5 days, 16 hours ago
The generic syscall syscall_exit_work() has the following content:

| audit_syscall_exit(regs)
| trace_sys_exit(regs, ...)
| ptrace_report_syscall_exit(regs, step)

The generic syscall syscall_exit_to_user_mode_work() has
the following form:

| unsigned long work = READ_ONCE(current_thread_info()->syscall_work)
| rseq_syscall()
| if (unlikely(work & SYSCALL_WORK_EXIT))
|	syscall_exit_work(regs, work)

In preparation for moving arm64 over to the generic entry code,
rework el0_svc_common() as below:

- Rename syscall_trace_exit() to syscall_exit_work().

- Add syscall_exit_to_user_mode_prepare() function to replace
  the combination of read_thread_flags() and syscall_exit_work(),
  also move the syscall exit check logic into it. Move has_syscall_work()
  helper into asm/syscall.h for reuse.

- As currently rseq_syscall() is always called and itself is controlled
  by the CONFIG_DEBUG_RSEQ macro, so the CONFIG_DEBUG_RSEQ check
  is removed.

Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
 arch/arm64/include/asm/syscall.h |  7 ++++++-
 arch/arm64/kernel/ptrace.c       | 14 +++++++++++---
 arch/arm64/kernel/syscall.c      | 20 +-------------------
 3 files changed, 18 insertions(+), 23 deletions(-)

diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index d69f590a989b..6225981fbbdb 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -114,7 +114,12 @@ static inline int syscall_get_arch(struct task_struct *task)
 	return AUDIT_ARCH_AARCH64;
 }
 
+static inline bool has_syscall_work(unsigned long flags)
+{
+	return unlikely(flags & _TIF_SYSCALL_WORK);
+}
+
 int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags);
-void syscall_trace_exit(struct pt_regs *regs, unsigned long flags);
+void syscall_exit_to_user_mode_prepare(struct pt_regs *regs);
 
 #endif	/* __ASM_SYSCALL_H */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index dfdd886dc0a9..233a7688ac94 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -2409,10 +2409,8 @@ int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags)
 	return syscall;
 }
 
-void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
+static void syscall_exit_work(struct pt_regs *regs, unsigned long flags)
 {
-	rseq_syscall(regs);
-
 	audit_syscall_exit(regs);
 
 	if (flags & _TIF_SYSCALL_TRACEPOINT)
@@ -2422,6 +2420,16 @@ void syscall_trace_exit(struct pt_regs *regs, unsigned long flags)
 		report_syscall_exit(regs);
 }
 
+void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+	unsigned long flags = read_thread_flags();
+
+	rseq_syscall(regs);
+
+	if (has_syscall_work(flags) || flags & _TIF_SINGLESTEP)
+		syscall_exit_work(regs, flags);
+}
+
 /*
  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index ec31f82d2e9f..65021d0f49e1 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -65,11 +65,6 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
 	choose_random_kstack_offset(get_random_u16());
 }
 
-static inline bool has_syscall_work(unsigned long flags)
-{
-	return unlikely(flags & _TIF_SYSCALL_WORK);
-}
-
 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 			   const syscall_fn_t syscall_table[])
 {
@@ -130,21 +125,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 	}
 
 	invoke_syscall(regs, scno, sc_nr, syscall_table);
-
-	/*
-	 * The tracing status may have changed under our feet, so we have to
-	 * check again. However, if we were tracing entry, then we always trace
-	 * exit regardless, as the old entry assembly did.
-	 */
-	if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
-		flags = read_thread_flags();
-		if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
-			return;
-	}
-
 trace_exit:
-	flags = read_thread_flags();
-	syscall_trace_exit(regs, flags);
+	syscall_exit_to_user_mode_prepare(regs);
 }
 
 void do_el0_svc(struct pt_regs *regs)
-- 
2.34.1
Re: [PATCH v8 05/12] arm64: syscall: Rework el0_svc_common()
Posted by Kevin Brodsky 4 days, 10 hours ago
On 26/11/2025 08:14, Jinjie Ruan wrote:
> The generic syscall syscall_exit_work() has the following content:
>
> | audit_syscall_exit(regs)
> | trace_sys_exit(regs, ...)
> | ptrace_report_syscall_exit(regs, step)
>
> The generic syscall syscall_exit_to_user_mode_work() has
> the following form:
>
> | unsigned long work = READ_ONCE(current_thread_info()->syscall_work)
> | rseq_syscall()
> | if (unlikely(work & SYSCALL_WORK_EXIT))
> |	syscall_exit_work(regs, work)
>
> In preparation for moving arm64 over to the generic entry code,
> rework el0_svc_common() as below:
>
> - Rename syscall_trace_exit() to syscall_exit_work().
>
> - Add syscall_exit_to_user_mode_prepare() function to replace
>   the combination of read_thread_flags() and syscall_exit_work(),
>   also move the syscall exit check logic into it. Move has_syscall_work()
>   helper into asm/syscall.h for reuse.
>
> - As currently rseq_syscall() is always called and itself is controlled
>   by the CONFIG_DEBUG_RSEQ macro, so the CONFIG_DEBUG_RSEQ check
>   is removed.
>
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>

Reviewed-by: Kevin Brodsky <kevin.brodsky@arm.com>