From nobody Sun Dec 28 00:13:26 2025 Received: from mx0b-001b2d01.pphosted.com (mx0b-001b2d01.pphosted.com [148.163.158.5]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9833CF9D9 for ; Mon, 18 Dec 2023 07:45:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=ibm.com header.i=@ibm.com header.b="klnx9C8E" Received: from pps.filterd (m0353723.ppops.net [127.0.0.1]) by mx0a-001b2d01.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI7c4TH008028; Mon, 18 Dec 2023 07:45:27 GMT DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ibm.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding; s=pp1; bh=O+m799fDNDuTFzAl1Uz02x4uU4icAhsA+6Orlujt//4=; b=klnx9C8EMOskCaRlEPTCgPTo6Ym/UfjJrX+7NQH9Z/iNWNOWsixPSlcTmFR0mjcUoa2C vgBvtqXlV+P/zbzAxiS5nVOZ92cQDB3EjmY3ZT1nR8ULaGcFYJ8rtt6XciEAME3yvqOx T+GulfHNmzB4Q6dZl3alpSjDxDQDc5Y+UA+We4LPZTQRS6du9TAg/zEtK8EXITnaNlqh CK4NYT18egv2C+UkpYyuI3xsw7k0p9btuU43obUuIHAfVLA0HFVCaH+41BjBUq+tR5FZ 4H3odmoVdsuR2M7EcIDIW6qQ8qG8DNQ29dnK0m8Q9eNuZ1AUJyyNDMybEoWBC7v71xcm 1w== Received: from ppma13.dal12v.mail.ibm.com (dd.9e.1632.ip4.static.sl-reverse.com [50.22.158.221]) by mx0a-001b2d01.pphosted.com (PPS) with ESMTPS id 3v2huvg5ej-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:26 +0000 Received: from pps.filterd (ppma13.dal12v.mail.ibm.com [127.0.0.1]) by ppma13.dal12v.mail.ibm.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI7QqN4027073; Mon, 18 Dec 2023 07:45:26 GMT Received: from smtprelay02.fra02v.mail.ibm.com ([9.218.2.226]) by ppma13.dal12v.mail.ibm.com (PPS) with ESMTPS id 3v1rejq3kb-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:26 +0000 Received: from smtpav02.fra02v.mail.ibm.com (smtpav02.fra02v.mail.ibm.com [10.20.54.101]) by smtprelay02.fra02v.mail.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id 3BI7jO6U16122450 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK); Mon, 18 Dec 2023 07:45:24 GMT Received: from smtpav02.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 5986120040; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from smtpav02.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 4624E2004B; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from tuxmaker.boeblingen.de.ibm.com (unknown [9.152.85.9]) by smtpav02.fra02v.mail.ibm.com (Postfix) with ESMTPS; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: by tuxmaker.boeblingen.de.ibm.com (Postfix, from userid 55390) id 11E17E0826; Mon, 18 Dec 2023 08:45:24 +0100 (CET) From: Sven Schnelle To: Thomas Gleixner , Peter Zijlstra , Andy Lutomirski Cc: linux-kernel@vger.kernel.org, Heiko Carstens Subject: [PATCH v2 1/3] entry: move exit to usermode functions to header file Date: Mon, 18 Dec 2023 08:45:18 +0100 Message-Id: <20231218074520.1998026-2-svens@linux.ibm.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20231218074520.1998026-1-svens@linux.ibm.com> References: <20231218074520.1998026-1-svens@linux.ibm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-TM-AS-GCONF: 00 X-Proofpoint-GUID: ONIUIVRsCoRPg6BhrHZAaXm3D6pucdIT X-Proofpoint-ORIG-GUID: ONIUIVRsCoRPg6BhrHZAaXm3D6pucdIT X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-18_04,2023-12-14_01,2023-05-22_02 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 mlxlogscore=763 malwarescore=0 clxscore=1015 phishscore=0 mlxscore=0 suspectscore=0 adultscore=0 spamscore=0 lowpriorityscore=0 impostorscore=0 bulkscore=0 priorityscore=1501 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.12.0-2311290000 definitions=main-2312180054 Content-Type: text/plain; charset="utf-8" To allow inlining, move exit_to_user_mode() to entry-common.h. Signed-off-by: Sven Schnelle --- include/linux/entry-common.h | 52 +++++++++++++++++++++++++++++++++++- kernel/entry/common.c | 50 +++++----------------------------- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index d95ab85f96ba..b08aceb26e8e 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -7,6 +7,10 @@ #include #include #include +#include +#include +#include +#include =20 #include =20 @@ -258,6 +262,42 @@ static __always_inline void arch_exit_to_user_mode(voi= d) { } */ void arch_do_signal_or_restart(struct pt_regs *regs); =20 +/** + * exit_to_user_mode_loop - do any pending work before leaving to user spa= ce + */ +unsigned long exit_to_user_mode_loop(struct pt_regs *regs, + unsigned long ti_work); + +/** + * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required + * + * 1) check that interrupts are disabled + * 2) call tick_nohz_user_enter_prepare() + * 3) call exit_to_user_mode_loop() if any flags from + * EXIT_TO_USER_MODE_WORK are set + * 4) check that interrupts are still disabled + */ +static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) +{ + unsigned long ti_work; + + lockdep_assert_irqs_disabled(); + + /* Flush pending rcuog wakeup before the last need_resched() check */ + tick_nohz_user_enter_prepare(); + + ti_work =3D read_thread_flags(); + if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) + ti_work =3D exit_to_user_mode_loop(regs, ti_work); + + arch_exit_to_user_mode_prepare(regs, ti_work); + + /* Ensure that kernel state is sane for a return to userspace */ + kmap_assert_nomap(); + lockdep_assert_irqs_disabled(); + lockdep_sys_exit(); +} + /** * exit_to_user_mode - Fixup state when exiting to user mode * @@ -276,7 +316,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs); * non-instrumentable. * The caller has to invoke syscall_exit_to_user_mode_work() before this. */ -void exit_to_user_mode(void); +static __always_inline void exit_to_user_mode(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(); + instrumentation_end(); + + user_enter_irqoff(); + arch_exit_to_user_mode(); + lockdep_hardirqs_on(CALLER_ADDR0); +} =20 /** * syscall_exit_to_user_mode_work - Handle work before returning to user m= ode diff --git a/kernel/entry/common.c b/kernel/entry/common.c index d7ee4bc3f2ba..113bd3e8e73e 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -123,29 +123,14 @@ noinstr void syscall_enter_from_user_mode_prepare(str= uct pt_regs *regs) instrumentation_end(); } =20 -/* See comment for exit_to_user_mode() in entry-common.h */ -static __always_inline void __exit_to_user_mode(void) -{ - instrumentation_begin(); - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(); - instrumentation_end(); - - user_enter_irqoff(); - arch_exit_to_user_mode(); - lockdep_hardirqs_on(CALLER_ADDR0); -} - -void noinstr exit_to_user_mode(void) -{ - __exit_to_user_mode(); -} - /* Workaround to allow gradual conversion of architecture code */ void __weak arch_do_signal_or_restart(struct pt_regs *regs) { } =20 -static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, - unsigned long ti_work) +/** + * exit_to_user_mode_loop - do any pending work before leaving to user spa= ce + */ +__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs, + unsigned long ti_work) { /* * Before returning to user space ensure that all pending work @@ -190,27 +175,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_= regs *regs, return ti_work; } =20 -static void exit_to_user_mode_prepare(struct pt_regs *regs) -{ - unsigned long ti_work; - - lockdep_assert_irqs_disabled(); - - /* Flush pending rcuog wakeup before the last need_resched() check */ - tick_nohz_user_enter_prepare(); - - ti_work =3D read_thread_flags(); - if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) - ti_work =3D exit_to_user_mode_loop(regs, ti_work); - - arch_exit_to_user_mode_prepare(regs, ti_work); - - /* Ensure that kernel state is sane for a return to userspace */ - kmap_assert_nomap(); - lockdep_assert_irqs_disabled(); - lockdep_sys_exit(); -} - /* * If SYSCALL_EMU is set, then the only reason to report is when * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall @@ -295,7 +259,7 @@ __visible noinstr void syscall_exit_to_user_mode(struct= pt_regs *regs) instrumentation_begin(); __syscall_exit_to_user_mode_work(regs); instrumentation_end(); - __exit_to_user_mode(); + exit_to_user_mode(); } =20 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) @@ -308,7 +272,7 @@ noinstr void irqentry_exit_to_user_mode(struct pt_regs = *regs) instrumentation_begin(); exit_to_user_mode_prepare(regs); instrumentation_end(); - __exit_to_user_mode(); + exit_to_user_mode(); } =20 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) --=20 2.40.1 From nobody Sun Dec 28 00:13:26 2025 Received: from mx0a-001b2d01.pphosted.com (mx0a-001b2d01.pphosted.com [148.163.156.1]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3819FE553 for ; Mon, 18 Dec 2023 07:45:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=ibm.com header.i=@ibm.com header.b="Hj91gRty" Received: from pps.filterd (m0353729.ppops.net [127.0.0.1]) by mx0a-001b2d01.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI7ZUOS011115; Mon, 18 Dec 2023 07:45:28 GMT DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ibm.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding; s=pp1; bh=GEniAdUQCRgZQi4eZ2dmeoTq2XuNKM3RqrVv1xpRjaQ=; b=Hj91gRtyb+4swPm66EqL/HxlZOlAg+zqSDPk2uJRuzHUQlen/h1EsScNh9Thhps/D236 YjI5LL+PlQHfLVowR/2Xdz2/fpmX47deXo+ZMX4C4AXml14+Xo9CCQy6m/K8CUg3RUhB Gf+r+wLmr6/g54nPhyIy0dI+8lf5GUOsDV8xnVbZ5tr9sDQWwlezIM+5ivcfZoBrCvI6 8L5JB4emlu5i1gTwi3Yh84twor+/WoaHnV6AQw767jbArVh3dGdA/vPhjkgXJDrFxNF1 5/GIl+2QQAKfdl+xH5NsLUlOiHKzblP6VO9NXH4ieN50389b1WANFVWPSAW8pcHkHo3a EA== Received: from ppma21.wdc07v.mail.ibm.com (5b.69.3da9.ip4.static.sl-reverse.com [169.61.105.91]) by mx0a-001b2d01.pphosted.com (PPS) with ESMTPS id 3v2dkmxp59-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:27 +0000 Received: from pps.filterd (ppma21.wdc07v.mail.ibm.com [127.0.0.1]) by ppma21.wdc07v.mail.ibm.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI6PnG4010904; Mon, 18 Dec 2023 07:45:26 GMT Received: from smtprelay01.fra02v.mail.ibm.com ([9.218.2.227]) by ppma21.wdc07v.mail.ibm.com (PPS) with ESMTPS id 3v1q7n7d8p-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:26 +0000 Received: from smtpav04.fra02v.mail.ibm.com (smtpav04.fra02v.mail.ibm.com [10.20.54.103]) by smtprelay01.fra02v.mail.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id 3BI7jOS711993726 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK); Mon, 18 Dec 2023 07:45:24 GMT Received: from smtpav04.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 5F80520043; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from smtpav04.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 4EECA20040; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from tuxmaker.boeblingen.de.ibm.com (unknown [9.152.85.9]) by smtpav04.fra02v.mail.ibm.com (Postfix) with ESMTPS; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: by tuxmaker.boeblingen.de.ibm.com (Postfix, from userid 55390) id 13E3FE1506; Mon, 18 Dec 2023 08:45:24 +0100 (CET) From: Sven Schnelle To: Thomas Gleixner , Peter Zijlstra , Andy Lutomirski Cc: linux-kernel@vger.kernel.org, Heiko Carstens Subject: [PATCH v2 2/3] entry: move enter_from_user_mode() to header file Date: Mon, 18 Dec 2023 08:45:19 +0100 Message-Id: <20231218074520.1998026-3-svens@linux.ibm.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20231218074520.1998026-1-svens@linux.ibm.com> References: <20231218074520.1998026-1-svens@linux.ibm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-TM-AS-GCONF: 00 X-Proofpoint-ORIG-GUID: tWRgv-oV7nog7uBoCnfkO3vE1TbX0oc8 X-Proofpoint-GUID: tWRgv-oV7nog7uBoCnfkO3vE1TbX0oc8 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-18_04,2023-12-14_01,2023-05-22_02 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 adultscore=0 priorityscore=1501 spamscore=0 mlxscore=0 phishscore=0 clxscore=1015 impostorscore=0 lowpriorityscore=0 mlxlogscore=578 suspectscore=0 malwarescore=0 bulkscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.12.0-2311290000 definitions=main-2312180054 Content-Type: text/plain; charset="utf-8" To allow inlining of enter_from_user_mode(), move it to entry-common.h. Signed-off-by: Sven Schnelle --- include/linux/entry-common.h | 15 ++++++++++++++- kernel/entry/common.c | 26 +++----------------------- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index b08aceb26e8e..e8f1e4bba1c1 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -11,6 +11,7 @@ #include #include #include +#include =20 #include =20 @@ -102,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(= struct pt_regs *regs) {} * done between establishing state and enabling interrupts. The caller must * enable interrupts before invoking syscall_enter_from_user_mode_work(). */ -void enter_from_user_mode(struct pt_regs *regs); +static __always_inline void enter_from_user_mode(struct pt_regs *regs) +{ + arch_enter_from_user_mode(regs); + lockdep_hardirqs_off(CALLER_ADDR0); + + CT_WARN_ON(__ct_state() !=3D CONTEXT_USER); + user_exit_irqoff(); + + instrumentation_begin(); + kmsan_unpoison_entry_regs(regs); + trace_hardirqs_off_finish(); + instrumentation_end(); +} =20 /** * syscall_enter_from_user_mode_prepare - Establish state and enable inter= rupts diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 113bd3e8e73e..cd40cd1b4616 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -15,26 +15,6 @@ #define CREATE_TRACE_POINTS #include =20 -/* See comment for enter_from_user_mode() in entry-common.h */ -static __always_inline void __enter_from_user_mode(struct pt_regs *regs) -{ - arch_enter_from_user_mode(regs); - lockdep_hardirqs_off(CALLER_ADDR0); - - CT_WARN_ON(__ct_state() !=3D CONTEXT_USER); - user_exit_irqoff(); - - instrumentation_begin(); - kmsan_unpoison_entry_regs(regs); - trace_hardirqs_off_finish(); - instrumentation_end(); -} - -void noinstr enter_from_user_mode(struct pt_regs *regs) -{ - __enter_from_user_mode(regs); -} - static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) { if (unlikely(audit_context())) { @@ -105,7 +85,7 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs= *regs, long syscall) { long ret; =20 - __enter_from_user_mode(regs); + enter_from_user_mode(regs); =20 instrumentation_begin(); local_irq_enable(); @@ -117,7 +97,7 @@ noinstr long syscall_enter_from_user_mode(struct pt_regs= *regs, long syscall) =20 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) { - __enter_from_user_mode(regs); + enter_from_user_mode(regs); instrumentation_begin(); local_irq_enable(); instrumentation_end(); @@ -264,7 +244,7 @@ __visible noinstr void syscall_exit_to_user_mode(struct= pt_regs *regs) =20 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) { - __enter_from_user_mode(regs); + enter_from_user_mode(regs); } =20 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs) --=20 2.40.1 From nobody Sun Dec 28 00:13:26 2025 Received: from mx0a-001b2d01.pphosted.com (mx0a-001b2d01.pphosted.com [148.163.156.1]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 38172E549 for ; Mon, 18 Dec 2023 07:45:50 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.ibm.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=ibm.com header.i=@ibm.com header.b="IzalLcRh" Received: from pps.filterd (m0353728.ppops.net [127.0.0.1]) by mx0a-001b2d01.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI6bVgv013118; Mon, 18 Dec 2023 07:45:28 GMT DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=ibm.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding; s=pp1; bh=4UDu3rXjlCqT3/8LyZBW7PcqrrzBck5KaR5LZIkTUkY=; b=IzalLcRhRzENzyt0C1DSRuVoarBB/h+izeB/Fd4L/dIbpIPbuNehZEH/1Sf19UVfntT/ AhgeIRVmoQm5cRaGjSmEJ3beLt41ftwQ2KF5YOm1eVn3JH5mVVbuFpuHP9rRY83SPg+K xp7CpLzjfuW/QH7E83jrTWIzIacj80Y3qmjgJRoUiqDRjK7kLh03SGsSAV+RvfwS6vhO m9+MyBfVHqarPEj9MD8jsofVt7oQVNe8cK++CI1yTsIQ5v+Fo5lpNFxVi5lsZwqw3ur0 iO/hRY0NIqkO+PaRgnDvAjdDIPhXrBfnoDf1gQy4ijZ4K7O7iFxAcfTiZ3V5IuZ6znb/ hg== Received: from ppma21.wdc07v.mail.ibm.com (5b.69.3da9.ip4.static.sl-reverse.com [169.61.105.91]) by mx0a-001b2d01.pphosted.com (PPS) with ESMTPS id 3v2gyj1p0k-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:27 +0000 Received: from pps.filterd (ppma21.wdc07v.mail.ibm.com [127.0.0.1]) by ppma21.wdc07v.mail.ibm.com (8.17.1.19/8.17.1.19) with ESMTP id 3BI67Gdb010840; Mon, 18 Dec 2023 07:45:26 GMT Received: from smtprelay07.fra02v.mail.ibm.com ([9.218.2.229]) by ppma21.wdc07v.mail.ibm.com (PPS) with ESMTPS id 3v1q7n7d8n-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT); Mon, 18 Dec 2023 07:45:26 +0000 Received: from smtpav03.fra02v.mail.ibm.com (smtpav03.fra02v.mail.ibm.com [10.20.54.102]) by smtprelay07.fra02v.mail.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id 3BI7jOBf459510 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK); Mon, 18 Dec 2023 07:45:24 GMT Received: from smtpav03.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 571752004D; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from smtpav03.fra02v.mail.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 47C5020043; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: from tuxmaker.boeblingen.de.ibm.com (unknown [9.152.85.9]) by smtpav03.fra02v.mail.ibm.com (Postfix) with ESMTPS; Mon, 18 Dec 2023 07:45:24 +0000 (GMT) Received: by tuxmaker.boeblingen.de.ibm.com (Postfix, from userid 55390) id 15E80E16DA; Mon, 18 Dec 2023 08:45:24 +0100 (CET) From: Sven Schnelle To: Thomas Gleixner , Peter Zijlstra , Andy Lutomirski Cc: linux-kernel@vger.kernel.org, Heiko Carstens Subject: [PATCH v2 3/3] entry: move syscall_enter_from_user_mode() to header file Date: Mon, 18 Dec 2023 08:45:20 +0100 Message-Id: <20231218074520.1998026-4-svens@linux.ibm.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20231218074520.1998026-1-svens@linux.ibm.com> References: <20231218074520.1998026-1-svens@linux.ibm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-TM-AS-GCONF: 00 X-Proofpoint-GUID: wEVrj0xpU6k9jfWt2Y2zw8dxmkmnvpLB X-Proofpoint-ORIG-GUID: wEVrj0xpU6k9jfWt2Y2zw8dxmkmnvpLB X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-18_04,2023-12-14_01,2023-05-22_02 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 phishscore=0 lowpriorityscore=0 mlxscore=0 suspectscore=0 bulkscore=0 mlxlogscore=799 impostorscore=0 clxscore=1015 priorityscore=1501 spamscore=0 adultscore=0 malwarescore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.12.0-2311290000 definitions=main-2312180054 Content-Type: text/plain; charset="utf-8" To allow inlining of syscall_enter_from_user_mode(), move it to entry-common.h. Signed-off-by: Sven Schnelle --- include/linux/entry-common.h | 27 +++++++++++++++++++++++++-- kernel/entry/common.c | 32 +------------------------------- 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index e8f1e4bba1c1..4f8e467eaf3c 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -134,6 +134,9 @@ static __always_inline void enter_from_user_mode(struct= pt_regs *regs) */ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); =20 +long syscall_trace_enter(struct pt_regs *regs, long syscall, + unsigned long work); + /** * syscall_enter_from_user_mode_work - Check and handle work before invoki= ng * a syscall @@ -157,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_re= gs *regs); * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_ente= r() * 2) Invocation of audit_syscall_entry() */ -long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); +static __always_inline long syscall_enter_from_user_mode_work(struct pt_re= gs *regs, long syscall) +{ + unsigned long work =3D READ_ONCE(current_thread_info()->syscall_work); + + if (work & SYSCALL_WORK_ENTER) + syscall =3D syscall_trace_enter(regs, syscall, work); + + return syscall; +} =20 /** * syscall_enter_from_user_mode - Establish state and check and handle work @@ -176,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs = *regs, long syscall); * Returns: The original or a modified syscall number. See * syscall_enter_from_user_mode_work() for further explanation. */ -long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); +static __always_inline long syscall_enter_from_user_mode(struct pt_regs *r= egs, long syscall) +{ + long ret; + + enter_from_user_mode(regs); + + instrumentation_begin(); + local_irq_enable(); + ret =3D syscall_enter_from_user_mode_work(regs, syscall); + instrumentation_end(); + + return ret; +} =20 /** * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enabl= e() diff --git a/kernel/entry/common.c b/kernel/entry/common.c index cd40cd1b4616..a35aaaa9f8bc 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -25,7 +25,7 @@ static inline void syscall_enter_audit(struct pt_regs *re= gs, long syscall) } } =20 -static long syscall_trace_enter(struct pt_regs *regs, long syscall, +long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work) { long ret =3D 0; @@ -65,36 +65,6 @@ static long syscall_trace_enter(struct pt_regs *regs, lo= ng syscall, return ret ? : syscall; } =20 -static __always_inline long -__syscall_enter_from_user_work(struct pt_regs *regs, long syscall) -{ - unsigned long work =3D READ_ONCE(current_thread_info()->syscall_work); - - if (work & SYSCALL_WORK_ENTER) - syscall =3D syscall_trace_enter(regs, syscall, work); - - return syscall; -} - -long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall) -{ - return __syscall_enter_from_user_work(regs, syscall); -} - -noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long sysca= ll) -{ - long ret; - - enter_from_user_mode(regs); - - instrumentation_begin(); - local_irq_enable(); - ret =3D __syscall_enter_from_user_work(regs, syscall); - instrumentation_end(); - - return ret; -} - noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs) { enter_from_user_mode(regs); --=20 2.40.1