From nobody Thu Oct 2 06:30:09 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 9E2162FFFB2 for ; Mon, 22 Sep 2025 10:23:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758536582; cv=none; b=UCtfu4w400EjqF6/YV7xxJ0LO8NFC9mcV1Gh55UtDxI/TwnDVQAPadm+TlIjiAONDos0Y6YVVt/GiGEq2TEcblKq7ePa2uekGwI+hltsTBEsoePkHK5GKmWAY0pVcj17Roo5Hnu5HXMr3q2DlPfUY499GQansKpy1PHZbmTtBh0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758536582; c=relaxed/simple; bh=SscbY6KjcogaE502eJ5nQmaCURaWPLMy3N/x9vdSBX8=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Yeqm8+2qJ11WvoqXc+pHOGwIqBMPVXBrAhXv5zMYfKP/9acU5rjoJKwkpgggklduqJ7NDqJpcv1n8jc2MXv9vT+fJ6WZfiGJIhMttUVBpQzslKJ5Li3ny58L6N4oyXQcO08M0YcxNZuovvom0VnQoO91DdCdHmD0KG7gqr8G26w= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 25661150C; Mon, 22 Sep 2025 03:22:52 -0700 (PDT) Received: from e129823.cambridge.arm.com (e129823.arm.com [10.1.197.6]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 1585E3F66E; Mon, 22 Sep 2025 03:22:57 -0700 (PDT) From: Yeoreum Yun To: catalin.marinas@arm.com, will@kernel.org, broonie@kernel.org, maz@kernel.org, oliver.upton@linux.dev, joey.gouly@arm.com, james.morse@arm.com, ardb@kernel.org, scott@os.amperecomputing.com, suzuki.poulose@arm.com, yuzenghui@huawei.com, mark.rutland@arm.com Cc: linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev, linux-kernel@vger.kernel.org, Yeoreum Yun Subject: [PATCH v9 4/5] arm64: futex: refactor futex atomic operation Date: Mon, 22 Sep 2025 11:22:43 +0100 Message-Id: <20250922102244.2068414-5-yeoreum.yun@arm.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20250922102244.2068414-1-yeoreum.yun@arm.com> References: <20250922102244.2068414-1-yeoreum.yun@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Refactor futex atomic operations using ll/sc method with clearing PSTATE.PAN to prepare to apply FEAT_LSUI on them. Signed-off-by: Yeoreum Yun Reviewed-by: Catalin Marinas --- arch/arm64/include/asm/futex.h | 128 +++++++++++++++++++++------------ 1 file changed, 82 insertions(+), 46 deletions(-) diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index bc06691d2062..f8cb674bdb3f 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -7,17 +7,21 @@ =20 #include #include +#include =20 #include =20 #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of?= */ =20 -#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ -do { \ +#define LLSC_FUTEX_ATOMIC_OP(op, insn) \ +static __always_inline int \ +__llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ +{ \ unsigned int loops =3D FUTEX_MAX_LOOPS; \ + int ret, oldval, tmp; \ \ uaccess_enable_privileged(); \ - asm volatile( \ + asm volatile("// __llsc_futex_atomic_" #op "\n" \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ @@ -35,45 +39,103 @@ do { \ : "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \ -} while (0) + \ + if (!ret) \ + *oval =3D oldval; \ + \ + return ret; \ +} + +LLSC_FUTEX_ATOMIC_OP(add, "add %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(or, "orr %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(and, "and %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(eor, "eor %w3, %w1, %w5") +LLSC_FUTEX_ATOMIC_OP(set, "mov %w3, %w5") + +static __always_inline int +__llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + int ret =3D 0; + unsigned int loops =3D FUTEX_MAX_LOOPS; + u32 val, tmp; + + uaccess_enable_privileged(); + asm volatile("//__llsc_futex_cmpxchg\n" +" prfm pstl1strm, %2\n" +"1: ldxr %w1, %2\n" +" eor %w3, %w1, %w5\n" +" cbnz %w3, 4f\n" +"2: stlxr %w3, %w6, %2\n" +" cbz %w3, 3f\n" +" sub %w4, %w4, %w3\n" +" cbnz %w4, 1b\n" +" mov %w0, %w7\n" +"3:\n" +" dmb ish\n" +"4:\n" + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) + : "+r" (ret), "=3D&r" (val), "+Q" (*uaddr), "=3D&r" (tmp), "+r" (loops) + : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) + : "memory"); + uaccess_disable_privileged(); + + if (!ret) + *oval =3D val; + + return ret; +} + +#define FUTEX_ATOMIC_OP(op) \ +static __always_inline int \ +__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ +{ \ + return __llsc_futex_atomic_##op(oparg, uaddr, oval); \ +} + +FUTEX_ATOMIC_OP(add) +FUTEX_ATOMIC_OP(or) +FUTEX_ATOMIC_OP(and) +FUTEX_ATOMIC_OP(eor) +FUTEX_ATOMIC_OP(set) + +static __always_inline int +__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) +{ + return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval); +} =20 static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uad= dr) { - int oldval =3D 0, ret, tmp; - u32 __user *uaddr =3D __uaccess_mask_ptr(_uaddr); + int ret; + u32 __user *uaddr; =20 if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; =20 + uaddr =3D __uaccess_mask_ptr(_uaddr); + switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %w3, %w5", - ret, oldval, uaddr, tmp, oparg); + ret =3D __futex_atomic_set(oparg, uaddr, oval); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret =3D __futex_atomic_add(oparg, uaddr, oval); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret =3D __futex_atomic_or(oparg, uaddr, oval); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %w3, %w1, %w5", - ret, oldval, uaddr, tmp, ~oparg); + ret =3D __futex_atomic_and(~oparg, uaddr, oval); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %w3, %w1, %w5", - ret, oldval, uaddr, tmp, oparg); + ret =3D __futex_atomic_eor(oparg, uaddr, oval); break; default: ret =3D -ENOSYS; } =20 - if (!ret) - *oval =3D oldval; - return ret; } =20 @@ -81,40 +143,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { - int ret =3D 0; - unsigned int loops =3D FUTEX_MAX_LOOPS; - u32 val, tmp; u32 __user *uaddr; =20 if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; =20 uaddr =3D __uaccess_mask_ptr(_uaddr); - uaccess_enable_privileged(); - asm volatile("// futex_atomic_cmpxchg_inatomic\n" -" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n" -" sub %w3, %w1, %w5\n" -" cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" -" cbz %w3, 3f\n" -" sub %w4, %w4, %w3\n" -" cbnz %w4, 1b\n" -" mov %w0, %w7\n" -"3:\n" -" dmb ish\n" -"4:\n" - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) - : "+r" (ret), "=3D&r" (val), "+Q" (*uaddr), "=3D&r" (tmp), "+r" (loops) - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) - : "memory"); - uaccess_disable_privileged(); - - if (!ret) - *uval =3D val; =20 - return ret; + return __futex_cmpxchg(uaddr, oldval, newval, uval); } =20 #endif /* __ASM_FUTEX_H */ --=20 LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}