The generic entry expand secure_computing() in place and call
__secure_computing() directly.
In order to switch to the generic entry for arm64, refactor
secure_computing() for syscall_trace_enter().
No functional changes.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
arch/arm64/kernel/ptrace.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index da9687d30bcf..72d4d987ba3b 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -2392,8 +2392,11 @@ int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags)
}
/* Do the secure computing after ptrace; failures should be fast. */
- if (secure_computing() == -1)
- return NO_SYSCALL;
+ if (flags & _TIF_SECCOMP) {
+ ret = __secure_computing();
+ if (ret == -1)
+ return NO_SYSCALL;
+ }
/* Either of the above might have changed the syscall number */
syscall = syscall_get_nr(current, regs);
@@ -2411,7 +2414,7 @@ int syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long flags)
audit_syscall_entry(syscall, regs->orig_x0, regs->regs[1],
regs->regs[2], regs->regs[3]);
- return syscall;
+ return ret ? : syscall;
}
static void syscall_exit_work(struct pt_regs *regs, unsigned long flags)
--
2.34.1