From nobody Thu Dec 18 18:02:54 2025 Received: from szxga08-in.huawei.com (szxga08-in.huawei.com [45.249.212.255]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 975176EB56 for ; Thu, 27 Jun 2024 08:09:36 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.255 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1719475778; cv=none; b=uWQRY2ZMl82nBN97TT4TXHcQRbK5tbUBRcA1hpYZYdsqsFIgdL5LkzAxYZ5Qlvex6g/HhAdZ7Mxci26CJoRaHsohq77vLxCBfZHic3YZafBFediPSuBSLmbE+FwRMTyaC8qvsM1lQzATPII/HliGLVvNNe3FNXtsJstXY9lTTfE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1719475778; c=relaxed/simple; bh=EgLKeT6641PYmctBh8JLWey9td+G6VxSXBqn082e0Nw=; h=From:To:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=DrYhxWbfMuDQESj+urzXfxPClGkSpoCbXFkzdM+sDWphb2iVannDIZcZaQeZZqGeRlu/py/zVy1i0E97dwqBxOeiicM978+77ZS/0rVTUP9Hz0pONOV66TgTgVO8Zu0n6K+jGfZ2ZK35GOXirGIt9skijRXTwPQjQwTuZbekiEk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.255 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.163.252]) by szxga08-in.huawei.com (SkyGuard) with ESMTP id 4W8rhc0WWxz1T4Tr; Thu, 27 Jun 2024 16:05:04 +0800 (CST) Received: from kwepemi100008.china.huawei.com (unknown [7.221.188.57]) by mail.maildlp.com (Postfix) with ESMTPS id 5797918007E; Thu, 27 Jun 2024 16:09:29 +0800 (CST) Received: from huawei.com (10.90.53.73) by kwepemi100008.china.huawei.com (7.221.188.57) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.1.2507.39; Thu, 27 Jun 2024 16:09:28 +0800 From: Jinjie Ruan To: , , , , , , , , , , , , , , , , , Subject: [PATCH v2 2/3] arm64: Prepare to switch to generic entry Date: Thu, 27 Jun 2024 16:12:08 +0800 Message-ID: <20240627081209.3511918-3-ruanjinjie@huawei.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240627081209.3511918-1-ruanjinjie@huawei.com> References: <20240627081209.3511918-1-ruanjinjie@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: dggems705-chm.china.huawei.com (10.3.19.182) To kwepemi100008.china.huawei.com (7.221.188.57) Content-Type: text/plain; charset="utf-8" Prepare to switch to generic entry for arm64: - Implement regs_irqs_disabled() using interrupts_enabled() macro. - Make on_thread_stack() compatible with generic entry. - Split report_syscall() to report_syscall_enter() and report_syscall_exit() to make it clear to switch to generic entry. Signed-off-by: Jinjie Ruan --- v2: - Refactor report_syscall(). - Update the commit message. --- arch/arm64/include/asm/ptrace.h | 5 +++++ arch/arm64/include/asm/stacktrace.h | 5 ++++- arch/arm64/kernel/ptrace.c | 29 ++++++++++++++++++++--------- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrac= e.h index 47ec58031f11..1857748ff017 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -360,6 +360,11 @@ static inline unsigned long regs_get_kernel_argument(s= truct pt_regs *regs, return 0; } =20 +static inline int regs_irqs_disabled(struct pt_regs *regs) +{ + return !interrupts_enabled(regs); +} + /* We must avoid circular header include via sched.h */ struct task_struct; int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/s= tacktrace.h index 66ec8caa6ac0..36bc1831f906 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -57,7 +57,10 @@ static inline bool on_task_stack(const struct task_struc= t *tsk, return stackinfo_on_stack(&info, sp, size); } =20 -#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1= )) +static __always_inline bool on_thread_stack(void) +{ + return on_task_stack(current, current_stack_pointer, 1); +} =20 #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow= _stack); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 0d022599eb61..60fd85d5119d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -2184,7 +2184,7 @@ enum ptrace_syscall_dir { PTRACE_SYSCALL_EXIT, }; =20 -static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir d= ir) +static void report_syscall_enter(struct pt_regs *regs) { int regno; unsigned long saved_reg; @@ -2207,13 +2207,24 @@ static void report_syscall(struct pt_regs *regs, en= um ptrace_syscall_dir dir) */ regno =3D (is_compat_task() ? 12 : 7); saved_reg =3D regs->regs[regno]; - regs->regs[regno] =3D dir; + regs->regs[regno] =3D PTRACE_SYSCALL_ENTER; =20 - if (dir =3D=3D PTRACE_SYSCALL_ENTER) { - if (ptrace_report_syscall_entry(regs)) - forget_syscall(regs); - regs->regs[regno] =3D saved_reg; - } else if (!test_thread_flag(TIF_SINGLESTEP)) { + if (ptrace_report_syscall_entry(regs)) + forget_syscall(regs); + regs->regs[regno] =3D saved_reg; +} + +static void report_syscall_exit(struct pt_regs *regs) +{ + int regno; + unsigned long saved_reg; + + /* See comment for report_syscall_enter() */ + regno =3D (is_compat_task() ? 12 : 7); + saved_reg =3D regs->regs[regno]; + regs->regs[regno] =3D PTRACE_SYSCALL_EXIT; + + if (!test_thread_flag(TIF_SINGLESTEP)) { ptrace_report_syscall_exit(regs, 0); regs->regs[regno] =3D saved_reg; } else { @@ -2233,7 +2244,7 @@ int syscall_trace_enter(struct pt_regs *regs) unsigned long flags =3D read_thread_flags(); =20 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { - report_syscall(regs, PTRACE_SYSCALL_ENTER); + report_syscall_enter(regs); if (flags & _TIF_SYSCALL_EMU) return NO_SYSCALL; } @@ -2261,7 +2272,7 @@ void syscall_trace_exit(struct pt_regs *regs) trace_sys_exit(regs, syscall_get_return_value(current, regs)); =20 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) - report_syscall(regs, PTRACE_SYSCALL_EXIT); + report_syscall_exit(regs); =20 rseq_syscall(regs); } --=20 2.34.1