From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id D68172874E9 for ; Tue, 23 Sep 2025 17:49:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649762; cv=none; b=Ae+YTro7sqGF+0LlzR1DLrp26452vf0DLGaG7xdSWUmsY5AtrBAD1tjthcQelaHTovaO7ZiqZ3ayN103gCL+FOXeByjZqvtdxTNz8VTjlA0SEy6AmdI/ocyEMCLcsdHzqy+34ZRXXwIZUEiMrR3YTBfpokv6Jw5ycP1C3vRyLG4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649762; c=relaxed/simple; bh=128ZpLwaXADL1vNovgRJUTn6L7GJXWYdUh56fZ+sRYU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SqI1cG6taM11rIJ+09RGLzkPYbPc3IC/CHQUbvgMajGG5igEvOoo42HbKETuxTBX4Y1kzYM/tPgxvzSe1CqH6fTDD7mddX663wzIqe/79Dy6qrz9ClRBCXdpZGZ1bg8yuO6/w0Y6rg33d4EIfKDV33smFQFyg55E4yk3ganrcLk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id DBD86FEC; Tue, 23 Sep 2025 10:49:11 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 1734B3F5A1; Tue, 23 Sep 2025 10:49:15 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 01/16] kasan: mark kasan_(hw_)tags_enabled() __always_inline Date: Tue, 23 Sep 2025 18:48:48 +0100 Message-ID: <20250923174903.76283-2-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" `kasan_hw_tags_enabled()` and `kasan_tags_enabled()` are marked inline, except `kasan_enabled()` with `CONFIG_KASAN_HW_TAGS`, which is marked `__always_inline`. Those functions are called in the arm64 alternative callback `kasan_hw_tags_enable()`, which requires them to be inlined to avoid being instrumented and safe for patching. For consistency between the four declarations and to make the arm64 alternative callback safe, mark them `__always_inline`. Signed-off-by: Ada Couprie Diaz --- include/linux/kasan-enabled.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h index 6f612d69ea0c..d3d5a2327e11 100644 --- a/include/linux/kasan-enabled.h +++ b/include/linux/kasan-enabled.h @@ -13,19 +13,19 @@ static __always_inline bool kasan_enabled(void) return static_branch_likely(&kasan_flag_enabled); } =20 -static inline bool kasan_hw_tags_enabled(void) +static __always_inline bool kasan_hw_tags_enabled(void) { return kasan_enabled(); } =20 #else /* CONFIG_KASAN_HW_TAGS */ =20 -static inline bool kasan_enabled(void) +static __always_inline bool kasan_enabled(void) { return IS_ENABLED(CONFIG_KASAN); } =20 -static inline bool kasan_hw_tags_enabled(void) +static __always_inline bool kasan_hw_tags_enabled(void) { return false; } --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id D46CB296BAA for ; Tue, 23 Sep 2025 17:49:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649766; cv=none; b=APinjv4QbX//3bNgrkSbxTChoMZApfZv/GhyinAVEgb6uEAhywfqE5VIBgCWaAMPhs2rUNw/0BbHh1L098rhncvnHqSPLCqg0GnadjjQ/FSXeV+l1mLOme/Wm7ORTNMjYa53uCy/ho5+S9pQQ12FXEQlzS2+4qZTPbsVe1rQ0O4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649766; c=relaxed/simple; bh=Jf411jvxYLsqPZur/yDlu4YD6Al+gJMzr3Rj3+S6nac=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=k4p6IHDjOO4hGFowkVtFo5YFoIg2JG6zOv1A0SMuYxRhmeqnJkWGByW6WhE17ngkqxgVSUN+qTXO0yzlHrsx9gSDBPavvCHWuIqXgOGFSdsyHWB0Wc4NGj0Z7lIPxHQU6sRMca+d5nbfMADYtorSsG2yHhltmbXcx47bNtPR6Xo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id EEBD7267F; Tue, 23 Sep 2025 10:49:15 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 67B913F5A1; Tue, 23 Sep 2025 10:49:20 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 02/16] arm64: kasan: make kasan_hw_tags_enable() callback safe Date: Tue, 23 Sep 2025 18:48:49 +0100 Message-ID: <20250923174903.76283-3-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Alternative callback functions are regular functions, which means they or any function they call could get patched or instrumented by alternatives or other parts of the kernel. Given that applying alternatives does not guarantee a consistent state while patching, only once done, and handles cache maintenance manually, it could lead to nasty corruptions and execution of bogus code. Make `kasan_hw_tags_enable()` safe by preventing its instrumentation. This is possible thanks to a previous commit making `kasan_hw_tags_enabled()` always inlined, preventing any instrumentation in the callback. As `kasan_hw_tags_enable()` is already marked as `__init`, which has its own text section conflicting with the `noinstr` one, use `__no_instr_section(".noinstr.text")` to add all the function attributes added by `noinstr`, without the section conflict. This can be an issue, as kprobes seems to only block the text sections, not based on function attributes. Signed-off-by: Ada Couprie Diaz --- arch/arm64/kernel/mte.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index e5e773844889..a525c1d0c26d 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -239,6 +239,7 @@ static void mte_update_gcr_excl(struct task_struct *tas= k) void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); =20 +__noinstr_section(".init.text") void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 97BCB29CB4D for ; Tue, 23 Sep 2025 17:49:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649770; cv=none; b=NQYczrddGXu9S2hlsVjVU+bQ9aSY5aPpwbQN6GSNoSxKjogWUFOAZoi+CeHEnwPC+DoQ1ovfpZpVV/dzUOHhEi4JvhuojIywxblj5f6IyFpOBEQOcG3CY0j1SBtOGEoj2uaxEaJu28Y4JHCW1Fh8F/5sM8OIbXlNilURiR/NNzs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649770; c=relaxed/simple; bh=xtIsC31tIAtKKB8qHnYm4xT7iB6zAT90RzN5yj2qix8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=AIgDa7BOsNhSCGjXOBVio5fUI96vaAgtnOCdJryGN33QGi/e0m1F39ApEd6i5R6sZN8npg95QmIosn0j1Jh4BZnNJ3FlQYkhzJyscWRerQsKbTom3NPDTypnZO2TA+YdASAYrfhjsWyYZrs8Q2wkHcgqKuY0+nMC8XnDi9QW/5g= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D92C0497; Tue, 23 Sep 2025 10:49:19 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 6F0BD3F5A1; Tue, 23 Sep 2025 10:49:24 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 03/16] arm64/insn: always inline aarch64_insn_decode_register() Date: Tue, 23 Sep 2025 18:48:50 +0100 Message-ID: <20250923174903.76283-4-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit register type, we can check for its validity at compile time and remove the runtime error print. This makes `aarch64_insn_decode_register()` self-contained and safe for inlining and usage from patching callbacks. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 32 ++++++++++++++++++++++++++++++-- arch/arm64/lib/insn.c | 29 ----------------------------- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 18c7811774d3..f6bce1a62dda 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -7,6 +7,7 @@ */ #ifndef __ASM_INSN_H #define __ASM_INSN_H +#include #include #include =20 @@ -558,8 +559,35 @@ enum aarch64_insn_encoding_class aarch64_get_insn_clas= s(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); -u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type, - u32 insn); +static __always_inline u32 aarch64_insn_decode_register( + enum aarch64_insn_register_type type, u32 insn) +{ + compiletime_assert(type >=3D AARCH64_INSN_REGTYPE_RT && + type <=3D AARCH64_INSN_REGTYPE_RS, "unknown register type encoding"); + int shift; + + switch (type) { + case AARCH64_INSN_REGTYPE_RT: + case AARCH64_INSN_REGTYPE_RD: + shift =3D 0; + break; + case AARCH64_INSN_REGTYPE_RN: + shift =3D 5; + break; + case AARCH64_INSN_REGTYPE_RT2: + case AARCH64_INSN_REGTYPE_RA: + shift =3D 10; + break; + case AARCH64_INSN_REGTYPE_RM: + case AARCH64_INSN_REGTYPE_RS: + shift =3D 16; + break; + default: + return 0; + } + + return (insn >> shift) & GENMASK(4, 0); +} u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_branch_type type); u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 4e298baddc2e..0fac78e542cf 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -144,35 +144,6 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch= 64_insn_imm_type type, return insn; } =20 -u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type, - u32 insn) -{ - int shift; - - switch (type) { - case AARCH64_INSN_REGTYPE_RT: - case AARCH64_INSN_REGTYPE_RD: - shift =3D 0; - break; - case AARCH64_INSN_REGTYPE_RN: - shift =3D 5; - break; - case AARCH64_INSN_REGTYPE_RT2: - case AARCH64_INSN_REGTYPE_RA: - shift =3D 10; - break; - case AARCH64_INSN_REGTYPE_RM: - shift =3D 16; - break; - default: - pr_err("%s: unknown register type encoding %d\n", __func__, - type); - return 0; - } - - return (insn >> shift) & GENMASK(4, 0); -} - static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type ty= pe, u32 insn, enum aarch64_insn_register reg) --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 5BB252BDC32 for ; Tue, 23 Sep 2025 17:49:32 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649774; cv=none; b=ZCcaD0NluSZtD1Ob5wEkN01v/7SYSsjdtMkf7An/mrVcuBueLydmmAYS///t7WwU+LVWTMNz/k6ToyVgCiCa8Bcv9tfIS3G/xHsZbU6amrYqJToEunX59+BObWGF+Ag54NhgLGYr7zpzZZWAN8rxv6qlMbNilsFyhF+viU9cZhI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649774; c=relaxed/simple; bh=cZst8SaMQq9Nj4wU3NBzuqgsLJ5jKgBxDr0f5Dvt9Ds=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=KeJxtqlQt7vgXl+6zus4Y4GwgEIPLaEhMe9iw2yTQcoiusgLS2Lng3IcoIaMKuu3mdXwHoniuxHtdYJzVr6KxE7N5E1LcfaoyJBraX2cSUY9aXJmKAvkqLuJ8WQuZ6bdKadCygo0fVg9GpGgRYpmQNU4MbNgNAnotnQfr5GeTQE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 72CBAFEC; Tue, 23 Sep 2025 10:49:23 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 4AA563F5A1; Tue, 23 Sep 2025 10:49:28 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 04/16] arm64/insn: always inline aarch64_insn_encode_register() Date: Tue, 23 Sep 2025 18:48:51 +0100 Message-ID: <20250923174903.76283-5-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit register type, we can check for its validity at compile time and remove the runtime error print. The register and instruction checks cannot be made at compile time, as they are dynamically created. However, we can remove the error print as it should never appear in normal operation and will still lead to a fault BRK. This makes `aarch64_insn_encode_register()` self-contained and safe for inlining and usage from patching callbacks. This is a change of visiblity, as previously the function was private to lib/insn.c. However, in order to inline more `aarch64_insn_...` functions and make patching callbacks safe, it needs to be accessible by those functions. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 42 +++++++++++++++++++++++++++++++++++ arch/arm64/lib/insn.c | 42 ----------------------------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f6bce1a62dda..90f271483e5b 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -559,6 +559,48 @@ enum aarch64_insn_encoding_class aarch64_get_insn_clas= s(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); +static __always_inline u32 aarch64_insn_encode_register( + enum aarch64_insn_register_type type, + u32 insn, + enum aarch64_insn_register reg) +{ + compiletime_assert(type >=3D AARCH64_INSN_REGTYPE_RT && + type <=3D AARCH64_INSN_REGTYPE_RS, "unknown register type encoding"); + int shift; + + if (insn =3D=3D AARCH64_BREAK_FAULT) + return AARCH64_BREAK_FAULT; + + if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { + return AARCH64_BREAK_FAULT; + } + + switch (type) { + case AARCH64_INSN_REGTYPE_RT: + case AARCH64_INSN_REGTYPE_RD: + shift =3D 0; + break; + case AARCH64_INSN_REGTYPE_RN: + shift =3D 5; + break; + case AARCH64_INSN_REGTYPE_RT2: + case AARCH64_INSN_REGTYPE_RA: + shift =3D 10; + break; + case AARCH64_INSN_REGTYPE_RM: + case AARCH64_INSN_REGTYPE_RS: + shift =3D 16; + break; + default: + return AARCH64_BREAK_FAULT; + } + + insn &=3D ~(GENMASK(4, 0) << shift); + insn |=3D reg << shift; + + return insn; +} + static __always_inline u32 aarch64_insn_decode_register( enum aarch64_insn_register_type type, u32 insn) { diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 0fac78e542cf..1810e1ea64a7 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -144,48 +144,6 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch= 64_insn_imm_type type, return insn; } =20 -static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type ty= pe, - u32 insn, - enum aarch64_insn_register reg) -{ - int shift; - - if (insn =3D=3D AARCH64_BREAK_FAULT) - return AARCH64_BREAK_FAULT; - - if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { - pr_err("%s: unknown register encoding %d\n", __func__, reg); - return AARCH64_BREAK_FAULT; - } - - switch (type) { - case AARCH64_INSN_REGTYPE_RT: - case AARCH64_INSN_REGTYPE_RD: - shift =3D 0; - break; - case AARCH64_INSN_REGTYPE_RN: - shift =3D 5; - break; - case AARCH64_INSN_REGTYPE_RT2: - case AARCH64_INSN_REGTYPE_RA: - shift =3D 10; - break; - case AARCH64_INSN_REGTYPE_RM: - case AARCH64_INSN_REGTYPE_RS: - shift =3D 16; - break; - default: - pr_err("%s: unknown register type encoding %d\n", __func__, - type); - return AARCH64_BREAK_FAULT; - } - - insn &=3D ~(GENMASK(4, 0) << shift); - insn |=3D reg << shift; - - return insn; -} - static const u32 aarch64_insn_ldst_size[] =3D { [AARCH64_INSN_SIZE_8] =3D 0, [AARCH64_INSN_SIZE_16] =3D 1, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id D44452BE65B for ; Tue, 23 Sep 2025 17:49:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649778; cv=none; b=R5tFEk0/42RguptoyMk+EFO0y3G0dhl9IPuoUkkEQul8/3zyrBqoRArJNmWdWtztRjhfwMx6euZ9Bi22n3yvr/1cBb4EYAt4X5q10FUpSPvIqL5BE4uiTT3FMU1dqclJaZr5sAdFGBFn5p/fLqkq0fVlqfv7R+nRMFKZ34muhE0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649778; c=relaxed/simple; bh=utee+C1D9MBTxnYgylcBivUsPRX2koBM0Ut2XMYDZkU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=oHGg/J9VZlHVSbXrBkBEh92zENc9tyGtNxq9t0PWpCz/xPVury49EtuDBt1k1yOTVYbCeau90Je0sBbAnD6AkRI6+VIGaAIFwtLhCl8sDg3gvWgz9g5F+iKOuifgms2uzEH8ucajQxunxkWRnaMaM4LmiWr7YGQxu8FAw6/pg98= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5738B25E0; Tue, 23 Sep 2025 10:49:27 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id DB98A3F5A1; Tue, 23 Sep 2025 10:49:31 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 05/16] arm64/insn: always inline aarch64_insn_encode_immediate() Date: Tue, 23 Sep 2025 18:48:52 +0100 Message-ID: <20250923174903.76283-6-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As type is passed dynamically at runtime we cannot check at compile time that is valid. However, in practice this should not happen and will still result in a fault BRK, so remove the error print. Pull `aarch64_get_imm_shift_mask()` in the header as well and make it `__always_inline` as it is needed for `aarch64_insn_encode_immediate()` and is already safe to inline. This is a change of visibility, so make sure to check the input pointers in case it is used in other places. Current callers do not care about -EINVAL, they just check for an error, so change the return to a boolean. This makes `aarch64_insn_encode_immediate()` safe for inlining and usage from patching callbacks. As both functions are now `__always_inline`, they do not need their `__kprobes` annotation anymore. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 103 +++++++++++++++++++++++++++++++++- arch/arm64/lib/insn.c | 102 +-------------------------------- 2 files changed, 102 insertions(+), 103 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 90f271483e5b..5f5f6a125b4e 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -9,6 +9,7 @@ #define __ASM_INSN_H #include #include +#include #include =20 #include @@ -555,10 +556,108 @@ static __always_inline bool aarch64_insn_uses_litera= l(u32 insn) aarch64_insn_is_prfm_lit(insn); } =20 +static __always_inline bool aarch64_get_imm_shift_mask( + enum aarch64_insn_imm_type type, u32 *maskp, int *shiftp) +{ + u32 mask; + int shift; + + if (maskp =3D=3D NULL || shiftp =3D=3D NULL) + return false; + + switch (type) { + case AARCH64_INSN_IMM_26: + mask =3D BIT(26) - 1; + shift =3D 0; + break; + case AARCH64_INSN_IMM_19: + mask =3D BIT(19) - 1; + shift =3D 5; + break; + case AARCH64_INSN_IMM_16: + mask =3D BIT(16) - 1; + shift =3D 5; + break; + case AARCH64_INSN_IMM_14: + mask =3D BIT(14) - 1; + shift =3D 5; + break; + case AARCH64_INSN_IMM_12: + mask =3D BIT(12) - 1; + shift =3D 10; + break; + case AARCH64_INSN_IMM_9: + mask =3D BIT(9) - 1; + shift =3D 12; + break; + case AARCH64_INSN_IMM_7: + mask =3D BIT(7) - 1; + shift =3D 15; + break; + case AARCH64_INSN_IMM_6: + case AARCH64_INSN_IMM_S: + mask =3D BIT(6) - 1; + shift =3D 10; + break; + case AARCH64_INSN_IMM_R: + mask =3D BIT(6) - 1; + shift =3D 16; + break; + case AARCH64_INSN_IMM_N: + mask =3D 1; + shift =3D 22; + break; + default: + return false; + } + + *maskp =3D mask; + *shiftp =3D shift; + + return true; +} + +#define ADR_IMM_HILOSPLIT 2 +#define ADR_IMM_SIZE SZ_2M +#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_LOSHIFT 29 +#define ADR_IMM_HISHIFT 5 + enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n); -u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm); + +static __always_inline u32 aarch64_insn_encode_immediate( + enum aarch64_insn_imm_type type, u32 insn, u64 imm) +{ + u32 immlo, immhi, mask; + int shift; + + if (insn =3D=3D AARCH64_BREAK_FAULT) + return AARCH64_BREAK_FAULT; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + shift =3D 0; + immlo =3D (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; + imm >>=3D ADR_IMM_HILOSPLIT; + immhi =3D (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; + imm =3D immlo | immhi; + mask =3D ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | + (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); + break; + default: + if (aarch64_get_imm_shift_mask(type, &mask, &shift) =3D=3D false) { + return AARCH64_BREAK_FAULT; + } + } + + /* Update the immediate field. */ + insn &=3D ~(mask << shift); + insn |=3D (imm & mask) << shift; + + return insn; +} static __always_inline u32 aarch64_insn_encode_register( enum aarch64_insn_register_type type, u32 insn, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 1810e1ea64a7..d77aef7f84f1 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -13,7 +13,6 @@ #include =20 #include -#include #include #include =20 @@ -21,71 +20,6 @@ #define AARCH64_INSN_N_BIT BIT(22) #define AARCH64_INSN_LSL_12 BIT(22) =20 -static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type= type, - u32 *maskp, int *shiftp) -{ - u32 mask; - int shift; - - switch (type) { - case AARCH64_INSN_IMM_26: - mask =3D BIT(26) - 1; - shift =3D 0; - break; - case AARCH64_INSN_IMM_19: - mask =3D BIT(19) - 1; - shift =3D 5; - break; - case AARCH64_INSN_IMM_16: - mask =3D BIT(16) - 1; - shift =3D 5; - break; - case AARCH64_INSN_IMM_14: - mask =3D BIT(14) - 1; - shift =3D 5; - break; - case AARCH64_INSN_IMM_12: - mask =3D BIT(12) - 1; - shift =3D 10; - break; - case AARCH64_INSN_IMM_9: - mask =3D BIT(9) - 1; - shift =3D 12; - break; - case AARCH64_INSN_IMM_7: - mask =3D BIT(7) - 1; - shift =3D 15; - break; - case AARCH64_INSN_IMM_6: - case AARCH64_INSN_IMM_S: - mask =3D BIT(6) - 1; - shift =3D 10; - break; - case AARCH64_INSN_IMM_R: - mask =3D BIT(6) - 1; - shift =3D 16; - break; - case AARCH64_INSN_IMM_N: - mask =3D 1; - shift =3D 22; - break; - default: - return -EINVAL; - } - - *maskp =3D mask; - *shiftp =3D shift; - - return 0; -} - -#define ADR_IMM_HILOSPLIT 2 -#define ADR_IMM_SIZE SZ_2M -#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) -#define ADR_IMM_LOSHIFT 29 -#define ADR_IMM_HISHIFT 5 - u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n) { u32 immlo, immhi, mask; @@ -100,7 +34,7 @@ u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_= type type, u32 insn) mask =3D ADR_IMM_SIZE - 1; break; default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { + if (aarch64_get_imm_shift_mask(type, &mask, &shift) =3D=3D false) { pr_err("%s: unknown immediate encoding %d\n", __func__, type); return 0; @@ -110,40 +44,6 @@ u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm= _type type, u32 insn) return (insn >> shift) & mask; } =20 -u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type typ= e, - u32 insn, u64 imm) -{ - u32 immlo, immhi, mask; - int shift; - - if (insn =3D=3D AARCH64_BREAK_FAULT) - return AARCH64_BREAK_FAULT; - - switch (type) { - case AARCH64_INSN_IMM_ADR: - shift =3D 0; - immlo =3D (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; - imm >>=3D ADR_IMM_HILOSPLIT; - immhi =3D (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; - imm =3D immlo | immhi; - mask =3D ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | - (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); - break; - default: - if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { - pr_err("%s: unknown immediate encoding %d\n", __func__, - type); - return AARCH64_BREAK_FAULT; - } - } - - /* Update the immediate field. */ - insn &=3D ~(mask << shift); - insn |=3D (imm & mask) << shift; - - return insn; -} - static const u32 aarch64_insn_ldst_size[] =3D { [AARCH64_INSN_SIZE_8] =3D 0, [AARCH64_INSN_SIZE_16] =3D 1, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id D149D2C08CE for ; Tue, 23 Sep 2025 17:49:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649781; cv=none; b=A8S0A8VcnpYBX4G7+eI9R3Qj5IYqMgG8fjvqlzy65aR7qA9+ch30e4Wc7Wi251LJTwe0BsLKP85lX0k8NTZMgeBnYQYvUgVwllPZQrFK8AqdlWzKDEuwd7+LZV7QBEZobRypZQzHOUNOZCZqP30J7SNZ06TwdZ69U5EMV1tTlbY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649781; c=relaxed/simple; bh=CjDMdhk0G6tcrHt3kbkGU5RYLrcfJPwi8y6dzNI2K/I=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=cmX0KO7WwZdHDohcwPOWhKnVAKmhCVXl1I93AiFIJP4bc+Ouvp1jrOuBnL1jp/LeAa3W2m+Xhr3NoKWwA//kdtuzpDxkrJbZ2g92sURnGGWjceQkooFSGpWEm8QJadrL1imBYep9l1BUgQc+qUF9G9VHvEEqTGwv2DfCowklYyA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 55F86497; Tue, 23 Sep 2025 10:49:31 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id C991B3F5A1; Tue, 23 Sep 2025 10:49:35 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 06/16] arm64/insn: always inline aarch64_insn_gen_movewide() Date: Tue, 23 Sep 2025 18:48:53 +0100 Message-ID: <20250923174903.76283-7-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit movewide type, we can check for its validity at compile time and remove the runtime error print. The other error prints cannot be verified at compile time, but should not occur in practice and will still lead to a fault BRK, so remove them. This makes `aarch64_insn_gen_movewide()` safe for inlining and usage from patching callbacks, as both `aarch64_insn_encode_register()` and `aarch64_insn_encode_immediate()` have been made safe in previous commits. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 58 ++++++++++++++++++++++++++++++++--- arch/arm64/lib/insn.c | 56 --------------------------------- 2 files changed, 54 insertions(+), 60 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 5f5f6a125b4e..5a25e311717f 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -624,6 +624,8 @@ static __always_inline bool aarch64_get_imm_shift_mask( #define ADR_IMM_LOSHIFT 29 #define ADR_IMM_HISHIFT 5 =20 +#define AARCH64_INSN_SF_BIT BIT(31) + enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n); =20 @@ -796,10 +798,58 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_regis= ter dst, int immr, int imms, enum aarch64_insn_variant variant, enum aarch64_insn_bitfield_type type); -u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, - int imm, int shift, - enum aarch64_insn_variant variant, - enum aarch64_insn_movewide_type type); + +static __always_inline u32 aarch64_insn_gen_movewide( + enum aarch64_insn_register dst, + int imm, int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_movewide_type type) +{ + compiletime_assert(type >=3D AARCH64_INSN_MOVEWIDE_ZERO && + type <=3D AARCH64_INSN_MOVEWIDE_INVERSE, "unknown movewide encoding"); + u32 insn; + + switch (type) { + case AARCH64_INSN_MOVEWIDE_ZERO: + insn =3D aarch64_insn_get_movz_value(); + break; + case AARCH64_INSN_MOVEWIDE_KEEP: + insn =3D aarch64_insn_get_movk_value(); + break; + case AARCH64_INSN_MOVEWIDE_INVERSE: + insn =3D aarch64_insn_get_movn_value(); + break; + default: + return AARCH64_BREAK_FAULT; + } + + if (imm & ~(SZ_64K - 1)) { + return AARCH64_BREAK_FAULT; + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + if (shift !=3D 0 && shift !=3D 16) { + return AARCH64_BREAK_FAULT; + } + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |=3D AARCH64_INSN_SF_BIT; + if (shift !=3D 0 && shift !=3D 16 && shift !=3D 32 && shift !=3D 48) { + return AARCH64_BREAK_FAULT; + } + break; + default: + return AARCH64_BREAK_FAULT; + } + + insn |=3D (shift >> 4) << 21; + + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); +} + u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, enum aarch64_insn_register src, enum aarch64_insn_register reg, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index d77aef7f84f1..7530d51f9b2a 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -16,7 +16,6 @@ #include #include =20 -#define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) #define AARCH64_INSN_LSL_12 BIT(22) =20 @@ -702,61 +701,6 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_regist= er dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); } =20 -u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, - int imm, int shift, - enum aarch64_insn_variant variant, - enum aarch64_insn_movewide_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_MOVEWIDE_ZERO: - insn =3D aarch64_insn_get_movz_value(); - break; - case AARCH64_INSN_MOVEWIDE_KEEP: - insn =3D aarch64_insn_get_movk_value(); - break; - case AARCH64_INSN_MOVEWIDE_INVERSE: - insn =3D aarch64_insn_get_movn_value(); - break; - default: - pr_err("%s: unknown movewide encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - if (imm & ~(SZ_64K - 1)) { - pr_err("%s: invalid immediate encoding %d\n", __func__, imm); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (shift !=3D 0 && shift !=3D 16) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |=3D AARCH64_INSN_SF_BIT; - if (shift !=3D 0 && shift !=3D 16 && shift !=3D 32 && shift !=3D 48) { - pr_err("%s: invalid shift encoding %d\n", __func__, - shift); - return AARCH64_BREAK_FAULT; - } - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn |=3D (shift >> 4) << 21; - - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); -} - u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, enum aarch64_insn_register src, enum aarch64_insn_register reg, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 4B6B32C08CE for ; Tue, 23 Sep 2025 17:49:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649785; cv=none; b=qYHIBw4bawXZDfBS1wJwFLNUWnkFXbAKeRPE+t4bg8RdtqG/zXqqqcLr/W+QbE7PnvelGSfkL5QCD6RcWyHAHuL+t6ApJF7AwPw/AQYsnerzsIvONSIKEqKqQ9djikELkS2XHib5Qag0zp3ki5eINkP5YPdEgp9G38urlarM0u0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649785; c=relaxed/simple; bh=Py7l0YS0EW7fsBvD6gVVgfYxM8uX6Dlxs9iZ8oTPMeA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=HZtifZuNbmNZKwfYWLcFibgMyZr04hGDacDIVewNbFIIIIT5NMrnnryVcFlKXaovkM99+TEkMDI0AwRGPRfVfFKFJN87GsQH0zeqn+iZRoaGpQZ/jvcJTeShCkzYGTv10R5MQHFYJDJciAdQH8PwqFTTHH9oz2E8qnyvPa45NcQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id B63E425E0; Tue, 23 Sep 2025 10:49:35 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id E078E3F5A1; Tue, 23 Sep 2025 10:49:39 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 07/16] arm64/proton-pack: make alternative callbacks safe Date: Tue, 23 Sep 2025 18:48:54 +0100 Message-ID: <20250923174903.76283-8-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Alternative callback functions are regular functions, which means they or any function they call could get patched or instrumented by alternatives or other parts of the kernel. Given that applying alternatives does not guarantee a consistent state while patching, only once done, and handles cache maintenance manually, it could lead to nasty corruptions and execution of bogus code. Make the Spectre mitigations alternative callbacks safe by marking them `noinstr` when they are not. This is possible thanks to previous commits making `aarch64_insn_...` functions used in the callbacks safe to inline. `spectre_bhb_patch_clearbhb()` is already marked as `__init`, which has its own text section conflicting with the `noinstr` one. Instead, use `__no_instr_section(".noinstr.text")` to add all the function attributes added by `noinstr`, without the section conflict. This can be an issue, as kprobes seems to only block the text sections, not based on function attributes. Signed-off-by: Ada Couprie Diaz --- This is missing `spectre_bhb_patch_wa3()` and `spectre_v4_patch_fw_mitigation_enable()` callbacks, which would need some more work : - `spectre_bhb_patch_wa3()` uses `WARN` which is instrumented, and I am not sure if it is safe to remove. It feels like something else should be done there ? - `spectre_v4_patch_fw_mitigation_enable()` calls into `spectre_v4_mitigations_off()` which calls `pr_info_once()` to notice the disabling of the mitigations on the command line, which is instrumentable but feels important to keep. I am not sure if there would be a better place to generate that message ? Interestingly, this was brought up recently[0]. It also calls `cpu_mitigations_off()` which checks a static variable against a static enum, in a common code C file, and is instrumentable. This one feels like it could be `__always_inline`'d, but given it is common code and the static nature of operands in the check, maybe marking it `noinstr` would be acceptable ? [0]: https://lore.kernel.org/all/aNF0gb1iZndz0-be@J2N7QTR9R3/ --- arch/arm64/kernel/proton-pack.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pac= k.c index edf1783ffc81..4ba8d24bf7ef 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -1174,6 +1174,7 @@ void noinstr spectre_bhb_patch_wa3(struct alt_instr *= alt, } =20 /* Patched to NOP when not supported */ +__noinstr_section(".init.text") void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 142A62D0C68 for ; Tue, 23 Sep 2025 17:49:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649789; cv=none; b=OgeTRvGMavLfQK9gBp6AxdZVWAEAOXBqQtGpYm/N0TCjzoegfmLlFKW+PxXQpi5CPRnIdqvqOD1oF1eOTvgMBPgq+nPCPpHjdB1ye8jJew/dhFzdI1n/kqQkdy9HvBAMWu4075vDOdlZxFeSPBXlTVKE0scw5VbSPEn5iZTqkmU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649789; c=relaxed/simple; bh=6Tr4kf7vFu7YuYjDqQ3S3owB0KlK2pmWRH9TBZyDAxc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=L9JI6/ohj9RL8/Gj6xh+L1sYagwcq4YGxepk5az1wgOtF0ofl0HC6Raww2NSu0zqT23e7+7MX/sj8Zi8UklWiATscCZ1xaXTydXPjOvU+W7zDUYIlJABQjCz1wj9+Pr2PmXTnOse2tcpkYnmGKwKrHTzMpXT5Poysl1fi+LhZ7Q= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 86C7D497; Tue, 23 Sep 2025 10:49:39 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 4E63D3F5A1; Tue, 23 Sep 2025 10:49:44 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 08/16] arm64/insn: always inline aarch64_insn_gen_logical_immediate() Date: Tue, 23 Sep 2025 18:48:55 +0100 Message-ID: <20250923174903.76283-9-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit logic instruction type, we can check for its validity at compile time and remove the runtime error print. Pull its helper functions, `aarch64_encode_immediate()` and `range_of_ones()`, into the header and make them `__always_inline` as well. This is safe as they only call other `__always_inline` functions. This makes `aarch64_insn_gen_logical_immediate()` safe for inlining and usage from patching callbacks. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 149 ++++++++++++++++++++++++++++++++-- arch/arm64/lib/insn.c | 136 ------------------------------- 2 files changed, 144 insertions(+), 141 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 5a25e311717f..a94ecc9140f1 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -16,6 +16,8 @@ =20 #ifndef __ASSEMBLY__ =20 +#include + enum aarch64_insn_hint_cr_op { AARCH64_INSN_HINT_NOP =3D 0x0 << 5, AARCH64_INSN_HINT_YIELD =3D 0x1 << 5, @@ -880,11 +882,148 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch6= 4_insn_register dst, u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst, enum aarch64_insn_register src, enum aarch64_insn_variant variant); -u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, - enum aarch64_insn_variant variant, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u64 imm); + +static __always_inline bool range_of_ones(u64 val) +{ + /* Doesn't handle full ones or full zeroes */ + u64 sval =3D val >> __ffs64(val); + + /* One of Sean Eron Anderson's bithack tricks */ + return ((sval + 1) & (sval)) =3D=3D 0; +} + +static __always_inline u32 aarch64_encode_immediate(u64 imm, + enum aarch64_insn_variant variant, + u32 insn) +{ + unsigned int immr, imms, n, ones, ror, esz, tmp; + u64 mask; + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + esz =3D 32; + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |=3D AARCH64_INSN_SF_BIT; + esz =3D 64; + break; + default: + return AARCH64_BREAK_FAULT; + } + + mask =3D GENMASK(esz - 1, 0); + + /* Can't encode full zeroes, full ones, or value wider than the mask */ + if (!imm || imm =3D=3D mask || imm & ~mask) + return AARCH64_BREAK_FAULT; + + /* + * Inverse of Replicate(). Try to spot a repeating pattern + * with a pow2 stride. + */ + for (tmp =3D esz / 2; tmp >=3D 2; tmp /=3D 2) { + u64 emask =3D BIT(tmp) - 1; + + if ((imm & emask) !=3D ((imm >> tmp) & emask)) + break; + + esz =3D tmp; + mask =3D emask; + } + + /* N is only set if we're encoding a 64bit value */ + n =3D esz =3D=3D 64; + + /* Trim imm to the element size */ + imm &=3D mask; + + /* That's how many ones we need to encode */ + ones =3D hweight64(imm); + + /* + * imms is set to (ones - 1), prefixed with a string of ones + * and a zero if they fit. Cap it to 6 bits. + */ + imms =3D ones - 1; + imms |=3D 0xf << ffs(esz); + imms &=3D BIT(6) - 1; + + /* Compute the rotation */ + if (range_of_ones(imm)) { + /* + * Pattern: 0..01..10..0 + * + * Compute how many rotate we need to align it right + */ + ror =3D __ffs64(imm); + } else { + /* + * Pattern: 0..01..10..01..1 + * + * Fill the unused top bits with ones, and check if + * the result is a valid immediate (all ones with a + * contiguous ranges of zeroes). + */ + imm |=3D ~mask; + if (!range_of_ones(~imm)) + return AARCH64_BREAK_FAULT; + + /* + * Compute the rotation to get a continuous set of + * ones, with the first bit set at position 0 + */ + ror =3D fls64(~imm); + } + + /* + * immr is the number of bits we need to rotate back to the + * original set of ones. Note that this is relative to the + * element size... + */ + immr =3D (esz - ror) % esz; + + insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n); + insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); +} + +static __always_inline u32 aarch64_insn_gen_logical_immediate( + enum aarch64_insn_logic_type type, + enum aarch64_insn_variant variant, + enum aarch64_insn_register Rn, + enum aarch64_insn_register Rd, + u64 imm) +{ + compiletime_assert(type =3D=3D AARCH64_INSN_LOGIC_AND || + type =3D=3D AARCH64_INSN_LOGIC_ORR || + type =3D=3D AARCH64_INSN_LOGIC_EOR || + type =3D=3D AARCH64_INSN_LOGIC_AND_SETFLAGS, + "unknown logical encoding"); + u32 insn; + + switch (type) { + case AARCH64_INSN_LOGIC_AND: + insn =3D aarch64_insn_get_and_imm_value(); + break; + case AARCH64_INSN_LOGIC_ORR: + insn =3D aarch64_insn_get_orr_imm_value(); + break; + case AARCH64_INSN_LOGIC_EOR: + insn =3D aarch64_insn_get_eor_imm_value(); + break; + case AARCH64_INSN_LOGIC_AND_SETFLAGS: + insn =3D aarch64_insn_get_ands_imm_value(); + break; + default: + return AARCH64_BREAK_FAULT; + } + + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); + return aarch64_encode_immediate(imm, variant, insn); +} + + u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, enum aarch64_insn_register Rm, enum aarch64_insn_register Rn, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 7530d51f9b2a..15634094de05 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -1106,142 +1106,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn) return insn & CRM_MASK; } =20 -static bool range_of_ones(u64 val) -{ - /* Doesn't handle full ones or full zeroes */ - u64 sval =3D val >> __ffs64(val); - - /* One of Sean Eron Anderson's bithack tricks */ - return ((sval + 1) & (sval)) =3D=3D 0; -} - -static u32 aarch64_encode_immediate(u64 imm, - enum aarch64_insn_variant variant, - u32 insn) -{ - unsigned int immr, imms, n, ones, ror, esz, tmp; - u64 mask; - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - esz =3D 32; - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |=3D AARCH64_INSN_SF_BIT; - esz =3D 64; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - mask =3D GENMASK(esz - 1, 0); - - /* Can't encode full zeroes, full ones, or value wider than the mask */ - if (!imm || imm =3D=3D mask || imm & ~mask) - return AARCH64_BREAK_FAULT; - - /* - * Inverse of Replicate(). Try to spot a repeating pattern - * with a pow2 stride. - */ - for (tmp =3D esz / 2; tmp >=3D 2; tmp /=3D 2) { - u64 emask =3D BIT(tmp) - 1; - - if ((imm & emask) !=3D ((imm >> tmp) & emask)) - break; - - esz =3D tmp; - mask =3D emask; - } - - /* N is only set if we're encoding a 64bit value */ - n =3D esz =3D=3D 64; - - /* Trim imm to the element size */ - imm &=3D mask; - - /* That's how many ones we need to encode */ - ones =3D hweight64(imm); - - /* - * imms is set to (ones - 1), prefixed with a string of ones - * and a zero if they fit. Cap it to 6 bits. - */ - imms =3D ones - 1; - imms |=3D 0xf << ffs(esz); - imms &=3D BIT(6) - 1; - - /* Compute the rotation */ - if (range_of_ones(imm)) { - /* - * Pattern: 0..01..10..0 - * - * Compute how many rotate we need to align it right - */ - ror =3D __ffs64(imm); - } else { - /* - * Pattern: 0..01..10..01..1 - * - * Fill the unused top bits with ones, and check if - * the result is a valid immediate (all ones with a - * contiguous ranges of zeroes). - */ - imm |=3D ~mask; - if (!range_of_ones(~imm)) - return AARCH64_BREAK_FAULT; - - /* - * Compute the rotation to get a continuous set of - * ones, with the first bit set at position 0 - */ - ror =3D fls64(~imm); - } - - /* - * immr is the number of bits we need to rotate back to the - * original set of ones. Note that this is relative to the - * element size... - */ - immr =3D (esz - ror) % esz; - - insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n); - insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); -} - -u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type, - enum aarch64_insn_variant variant, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u64 imm) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LOGIC_AND: - insn =3D aarch64_insn_get_and_imm_value(); - break; - case AARCH64_INSN_LOGIC_ORR: - insn =3D aarch64_insn_get_orr_imm_value(); - break; - case AARCH64_INSN_LOGIC_EOR: - insn =3D aarch64_insn_get_eor_imm_value(); - break; - case AARCH64_INSN_LOGIC_AND_SETFLAGS: - insn =3D aarch64_insn_get_ands_imm_value(); - break; - default: - pr_err("%s: unknown logical encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); - return aarch64_encode_immediate(imm, variant, insn); -} - u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, enum aarch64_insn_register Rm, enum aarch64_insn_register Rn, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 313952D541B for ; Tue, 23 Sep 2025 17:49:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649795; cv=none; b=sB/FUEgJo/2+9Vghcs6m1DDDyYWKvlrbiTUweL/b342MSl3KAormFBztHN+eBYHdUIpy2nFIicDkG/QLNq0PTOUJMYHC3A9Db5ZINpvt5lyRjkbzxtdqP4j7A38SeOYyN6miaBrCflvu0CwZeyN0nmPRu6kH3U/Hi0+Pab06uzw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649795; c=relaxed/simple; bh=Urr8+VR0qOSdsILBhXsPbRbFoSlNV8lWhdKoJwi+ahE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Zs/IK5WZQQhLREF7a7lGeeiGcZ1xZPYjt4Pe2Kf7muNWE03pKulCSs7Z6eG2Me9aBP2aIZC0LHSuhD5qQq3vr1pYfX8GiGW+3OZUwFIBirNSSx0C4nA54aXrd9NAyoNY1wYr1edPMQVY8fSE09S5hDpE1TMHAoJ5abSEk4gaNtc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id A8739497; Tue, 23 Sep 2025 10:49:43 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 1AE8B3F5A1; Tue, 23 Sep 2025 10:49:47 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 09/16] arm64/insn: always inline aarch64_insn_gen_add_sub_imm() Date: Tue, 23 Sep 2025 18:48:56 +0100 Message-ID: <20250923174903.76283-10-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit instruction adsb type and variant, we can check for their validity at compile time and remove the runtime error print. This is not the case for the immediate error print, as it checks dynamically. However, it should not occur in practice and will still generate a fault BRK, so remove it. This makes `aarch64_insn_gen_add_sub_imm()` safe for inlining and usage from patching callbacks, as both `aarch64_insn_encode_register()` and `aarch64_insn_encode_immediate()` have been made safe in previous commits. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 68 +++++++++++++++++++++++++++++++++-- arch/arm64/lib/insn.c | 62 -------------------------------- 2 files changed, 66 insertions(+), 64 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index a94ecc9140f1..a7caafd6f02b 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -627,6 +627,8 @@ static __always_inline bool aarch64_get_imm_shift_mask( #define ADR_IMM_HISHIFT 5 =20 #define AARCH64_INSN_SF_BIT BIT(31) +#define AARCH64_INSN_LSL_12 BIT(22) + =20 enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n); @@ -788,10 +790,72 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_= register reg, enum aarch64_insn_register state, enum aarch64_insn_size_type size, enum aarch64_insn_ldst_type type); -u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, + +static __always_inline u32 aarch64_insn_gen_add_sub_imm( + enum aarch64_insn_register dst, enum aarch64_insn_register src, int imm, enum aarch64_insn_variant variant, - enum aarch64_insn_adsb_type type); + enum aarch64_insn_adsb_type type) +{ + compiletime_assert(type >=3D AARCH64_INSN_ADSB_ADD && + type <=3D AARCH64_INSN_ADSB_SUB_SETFLAGS, + "unknown add/sub encoding"); + compiletime_assert(variant =3D=3D AARCH64_INSN_VARIANT_32BIT || + variant =3D=3D AARCH64_INSN_VARIANT_64BIT, + "unknown variant encoding"); + u32 insn; + + switch (type) { + case AARCH64_INSN_ADSB_ADD: + insn =3D aarch64_insn_get_add_imm_value(); + break; + case AARCH64_INSN_ADSB_SUB: + insn =3D aarch64_insn_get_sub_imm_value(); + break; + case AARCH64_INSN_ADSB_ADD_SETFLAGS: + insn =3D aarch64_insn_get_adds_imm_value(); + break; + case AARCH64_INSN_ADSB_SUB_SETFLAGS: + insn =3D aarch64_insn_get_subs_imm_value(); + break; + default: + return AARCH64_BREAK_FAULT; + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |=3D AARCH64_INSN_SF_BIT; + break; + default: + return AARCH64_BREAK_FAULT; + } + + /* We can't encode more than a 24bit value (12bit + 12bit shift) */ + if (imm & ~(BIT(24) - 1)) + goto out; + + /* If we have something in the top 12 bits... */ + if (imm & ~(SZ_4K - 1)) { + /* ... and in the low 12 bits -> error */ + if (imm & (SZ_4K - 1)) + goto out; + + imm >>=3D 12; + insn |=3D AARCH64_INSN_LSL_12; + } + + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); + +out: + return AARCH64_BREAK_FAULT; +} + u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr, enum aarch64_insn_register reg, enum aarch64_insn_adr_type type); diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 15634094de05..34b6f1c692b4 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -17,7 +17,6 @@ #include =20 #define AARCH64_INSN_N_BIT BIT(22) -#define AARCH64_INSN_LSL_12 BIT(22) =20 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 ins= n) { @@ -585,67 +584,6 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register re= sult, } #endif =20 -u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, - enum aarch64_insn_register src, - int imm, enum aarch64_insn_variant variant, - enum aarch64_insn_adsb_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_ADSB_ADD: - insn =3D aarch64_insn_get_add_imm_value(); - break; - case AARCH64_INSN_ADSB_SUB: - insn =3D aarch64_insn_get_sub_imm_value(); - break; - case AARCH64_INSN_ADSB_ADD_SETFLAGS: - insn =3D aarch64_insn_get_adds_imm_value(); - break; - case AARCH64_INSN_ADSB_SUB_SETFLAGS: - insn =3D aarch64_insn_get_subs_imm_value(); - break; - default: - pr_err("%s: unknown add/sub encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - break; - case AARCH64_INSN_VARIANT_64BIT: - insn |=3D AARCH64_INSN_SF_BIT; - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - /* We can't encode more than a 24bit value (12bit + 12bit shift) */ - if (imm & ~(BIT(24) - 1)) - goto out; - - /* If we have something in the top 12 bits... */ - if (imm & ~(SZ_4K - 1)) { - /* ... and in the low 12 bits -> error */ - if (imm & (SZ_4K - 1)) - goto out; - - imm >>=3D 12; - insn |=3D AARCH64_INSN_LSL_12; - } - - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); - - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); - - return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); - -out: - pr_err("%s: invalid immediate encoding %d\n", __func__, imm); - return AARCH64_BREAK_FAULT; -} - u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, enum aarch64_insn_register src, int immr, int imms, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id B42622D94A1 for ; Tue, 23 Sep 2025 17:49:56 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649798; cv=none; b=BXgCFo9cxjY1l0gDkZGdew5fEvjAx3XK6uQHWFq4UXGC9t8OEgQqJO26LFWAbgntjFSzZjgLcmNOZdySjBqy0ZkIkuiyPOuuOB5kgZRs988/tPDl5TusSvNuNIwoZ8OUTn1/99W2uYHSFbtGWVLnc7lCJ5BWT7TXT7JdVrqFtv4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649798; c=relaxed/simple; bh=LCKq7fLf7Qu3vSMK4wYqBuUbaQGUmC2XSGreBuSbVJE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=mnb77I4w7BInR/AEuuulkxiN6EuJ9ro+e50MnNZ8FHj0m1iGSAPCbPf+HerN1lY5cg49zRocW1/qEbvcP1HU4VQsQxHJ0jdVxwmpWdlSrm6JTSIkUc1oF8ijDZXJi/6YDzWBN1bRgBQJxDjfeMLGU1hIwBwdcLIKntFwMle6MrU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id CDDE8497; Tue, 23 Sep 2025 10:49:47 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 333583F5A1; Tue, 23 Sep 2025 10:49:51 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 10/16] arm64/insn: always inline aarch64_insn_gen_branch_reg() Date: Tue, 23 Sep 2025 18:48:57 +0100 Message-ID: <20250923174903.76283-11-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit instruction branch type, we can check for its validity at compile time and remove the runtime error print. This makes `aarch64_insn_gen_branch_reg()` safe for inlining and usage from patching callbacks, as `aarch64_insn_encode_register()` has been made safe in a previous commit. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 28 ++++++++++++++++++++++++++-- arch/arm64/lib/insn.c | 23 ----------------------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index a7caafd6f02b..6e6a53d4d750 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -760,8 +760,32 @@ static __always_inline bool aarch64_insn_is_nop(u32 in= sn) return insn =3D=3D aarch64_insn_gen_nop(); } =20 -u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, - enum aarch64_insn_branch_type type); +static __always_inline u32 aarch64_insn_gen_branch_reg( + enum aarch64_insn_register reg, + enum aarch64_insn_branch_type type) +{ + compiletime_assert(type >=3D AARCH64_INSN_BRANCH_NOLINK && + type <=3D AARCH64_INSN_BRANCH_RETURN, + "unknown branch encoding"); + u32 insn; + + switch (type) { + case AARCH64_INSN_BRANCH_NOLINK: + insn =3D aarch64_insn_get_br_value(); + break; + case AARCH64_INSN_BRANCH_LINK: + insn =3D aarch64_insn_get_blr_value(); + break; + case AARCH64_INSN_BRANCH_RETURN: + insn =3D aarch64_insn_get_ret_value(); + break; + default: + return AARCH64_BREAK_FAULT; + } + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); +} + u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, enum aarch64_insn_register base, enum aarch64_insn_register offset, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 34b6f1c692b4..8d38bf4bf203 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -178,29 +178,6 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc,= unsigned long addr, offset >> 2); } =20 -u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, - enum aarch64_insn_branch_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_BRANCH_NOLINK: - insn =3D aarch64_insn_get_br_value(); - break; - case AARCH64_INSN_BRANCH_LINK: - insn =3D aarch64_insn_get_blr_value(); - break; - case AARCH64_INSN_BRANCH_RETURN: - insn =3D aarch64_insn_get_ret_value(); - break; - default: - pr_err("%s: unknown branch encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); -} - u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, enum aarch64_insn_register base, enum aarch64_insn_register offset, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 532A52DAFD7 for ; Tue, 23 Sep 2025 17:50:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649802; cv=none; b=bXdPkfaTElFxdst9zxBo/MJPjj9b6iXt+rV6dEEHeu4akMySuoGa0KS5SAtUZSv2/MbE+D+3f77OZKzqXETY90OSkLbUUxUiXnUs3f9naHPMoVHgvlCMZrnorUTN4VwFvrKxHZ/IjpNBvZb4f2gTzzKtoj8kTebK8JK/78j4Q4E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649802; c=relaxed/simple; bh=RoPt2nnGJMaaDYrbv6yIXtOoFlsHlJGSMI3Dq37b3KY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=h2Lrym+utwTOOX68/0zTW4Gr8G1Gon63UjtQgiAH2MbK+lwQ2uHXiSVrem7sZkQiB1dvfNlcBq94yOKxiDOkBXghXU+lPE3ZEoDOXaYrYKdbykLjDziZ93HZ2md9HGQZbJ13Jt3E2bgKsDPvxVv9keR8cfqLOQ5w+VmrUKf2hUM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 9AB86497; Tue, 23 Sep 2025 10:49:51 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 62B403F5A1; Tue, 23 Sep 2025 10:49:56 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 11/16] arm64/insn: always inline aarch64_insn_gen_extr() Date: Tue, 23 Sep 2025 18:48:58 +0100 Message-ID: <20250923174903.76283-12-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit variant, we can check for its validity at compile time and remove the runtime error print. This makes `aarch64_insn_gen_extr()` safe for inlining and usage from patching callbacks, as both `aarch64_insn_encode_immediate()` and `aarch64_insn_encode_register()` have been made safe in previous commits. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 39 ++++++++++++++++++++++++++++++----- arch/arm64/lib/insn.c | 32 ---------------------------- 2 files changed, 34 insertions(+), 37 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 6e6a53d4d750..4ba4d5c50137 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -1111,12 +1111,41 @@ static __always_inline u32 aarch64_insn_gen_logical= _immediate( return aarch64_encode_immediate(imm, variant, insn); } =20 +static __always_inline u32 aarch64_insn_gen_extr( + enum aarch64_insn_variant variant, + enum aarch64_insn_register Rm, + enum aarch64_insn_register Rn, + enum aarch64_insn_register Rd, + u8 lsb) +{ + compiletime_assert(variant =3D=3D AARCH64_INSN_VARIANT_32BIT || + variant =3D=3D AARCH64_INSN_VARIANT_64BIT, + "unknown variant encoding"); + u32 insn; + + insn =3D aarch64_insn_get_extr_value(); + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + if (lsb > 31) + return AARCH64_BREAK_FAULT; + break; + case AARCH64_INSN_VARIANT_64BIT: + if (lsb > 63) + return AARCH64_BREAK_FAULT; + insn |=3D AARCH64_INSN_SF_BIT; + insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1); + break; + default: + return AARCH64_BREAK_FAULT; + } + + insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb); + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); +} =20 -u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, - enum aarch64_insn_register Rm, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u8 lsb); #ifdef CONFIG_ARM64_LSE_ATOMICS u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result, enum aarch64_insn_register address, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 8d38bf4bf203..71df4d72ac81 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -1021,38 +1021,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn) return insn & CRM_MASK; } =20 -u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, - enum aarch64_insn_register Rm, - enum aarch64_insn_register Rn, - enum aarch64_insn_register Rd, - u8 lsb) -{ - u32 insn; - - insn =3D aarch64_insn_get_extr_value(); - - switch (variant) { - case AARCH64_INSN_VARIANT_32BIT: - if (lsb > 31) - return AARCH64_BREAK_FAULT; - break; - case AARCH64_INSN_VARIANT_64BIT: - if (lsb > 63) - return AARCH64_BREAK_FAULT; - insn |=3D AARCH64_INSN_SF_BIT; - insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1); - break; - default: - pr_err("%s: unknown variant encoding %d\n", __func__, variant); - return AARCH64_BREAK_FAULT; - } - - insn =3D aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb); - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd); - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn); - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); -} - static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type) { switch (type) { --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 144FC28B3E7 for ; Tue, 23 Sep 2025 17:50:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649805; cv=none; b=kJSbFhGnBFVI7OnAaA+XdADuJ2hJg3Jzdwq/KGPkj1LdcpNblTs/RblxMbPdK1YOofilNYJZppQkZFKYEQGB5moIedvvch9UvChqiVDhdEEDLmA2E0K+DWbYbncvQmY6vO1XBP435CC51c+PmTajg4E/c9V/fGogO0OOUMBSC0g= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649805; c=relaxed/simple; bh=QjGU0Aksok9mK7+fVYuqIavVILfHnzN921TjOMeE/Qs=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ibdLEiYHrJu+IgWwU8mjqZFVMU0vHzubc0JsfkjB4sdlemdL1yqSRfhjfnsyaF2EdmVNPibP24pRsfBza0XY5E0E2UhbIFFXELET5+wUjdd/rfTBEVZ6SFcKfhFYXIr4vRlhqcytonEg8819X/HSWAF8QqCzaPs4IEZLLWPIbX8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 66942497; Tue, 23 Sep 2025 10:49:55 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 2F2973F5A1; Tue, 23 Sep 2025 10:49:59 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 12/16] kvm/arm64: make alternative callbacks safe Date: Tue, 23 Sep 2025 18:48:59 +0100 Message-ID: <20250923174903.76283-13-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Alternative callback functions are regular functions, which means they or any function they call could get patched or instrumented by alternatives or other parts of the kernel. Given that applying alternatives does not guarantee a consistent state while patching, only once done, and handles cache maintenance manually, it could lead to nasty corruptions and execution of bogus code. Make the KVM alternative callbacks safe by marking them `noinstr` and `__always_inline`'ing their helpers. This is possible thanks to previous commits making `aarch64_insn_...` functions used in the callbacks safe to inline. `kvm_update_va_mask()` is already marked as `__init`, which has its own text section conflicting with the `noinstr` one. Instead, use `__no_instr_section(".noinstr.text")` to add all the function attributes added by `noinstr`, without the section conflict. This can be an issue, as kprobes seems to only block the text sections, not based on function attributes. Signed-off-by: Ada Couprie Diaz --- This is missing `kvm_patch_vector_branch()`, which could receive the same treatment, but the `WARN_ON_ONCE` in the early-exit check would make it call into instrumentable code. I do not currently know if this `WARN` can safely be removed or if it has some importance. --- arch/arm64/kvm/va_layout.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 91b22a014610..3ebb7e0074f6 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -109,7 +109,7 @@ __init void kvm_apply_hyp_relocations(void) } } =20 -static u32 compute_instruction(int n, u32 rd, u32 rn) +static __always_inline u32 compute_instruction(int n, u32 rd, u32 rn) { u32 insn =3D AARCH64_BREAK_FAULT; =20 @@ -151,6 +151,7 @@ static u32 compute_instruction(int n, u32 rd, u32 rn) return insn; } =20 +__noinstr_section(".init.text") void __init kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { @@ -241,7 +242,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, *updptr++ =3D cpu_to_le32(insn); } =20 -static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int n= r_inst) +static __always_inline void generate_mov_q(u64 val, __le32 *origptr, + __le32 *updptr, int nr_inst) { u32 insn, oinsn, rd; =20 @@ -284,15 +286,15 @@ static void generate_mov_q(u64 val, __le32 *origptr, = __le32 *updptr, int nr_inst *updptr++ =3D cpu_to_le32(insn); } =20 -void kvm_get_kimage_voffset(struct alt_instr *alt, +noinstr void kvm_get_kimage_voffset(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { generate_mov_q(kimage_voffset, origptr, updptr, nr_inst); } =20 -void kvm_compute_final_ctr_el0(struct alt_instr *alt, +noinstr void kvm_compute_final_ctr_el0(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { - generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0), + generate_mov_q(arm64_ftr_reg_ctrel0.sys_val, origptr, updptr, nr_inst); } --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 2C2A82DCF4D for ; Tue, 23 Sep 2025 17:50:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649809; cv=none; b=NoTlzfvhsUrk/CFY1E5ecX8DZ/HfJW/C1uJiJHu61Nhbe2gvEX8U30kwmGmm455nJixVIHP5BuWytv4mUNydhiXT2B80wxEQG2OkgHtFEflo4m4qpiPGdYGDNqT9vO7arkTfEJJFb6yOtdb3hO4VECQXzLMkyrSvHYcgU9pu6MI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649809; c=relaxed/simple; bh=oUgQCxSQ8ScXU1gOufuTnRrF675d4ncv/V4PX7owEbU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=eLr5momYz99lj6zzc4dm4JRyg1+pkvH6TBwcjXR8k2lyANLccHFjnFqYmH5SoP0dnQILLqz3G7kBUx9Z0KokJO0qcvQZRoS+6SB4lZPre7kiy0C6jjichTgXoW+o1XDEHIu6uXiiVn9bRQ8sZBlNceIX+ag/5+c6LXawFU7g4UA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 65F85FEC; Tue, 23 Sep 2025 10:49:59 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id D6C233F5A1; Tue, 23 Sep 2025 10:50:03 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 13/16] arm64/insn: introduce missing is_store/is_load helpers Date: Tue, 23 Sep 2025 18:49:00 +0100 Message-ID: <20250923174903.76283-14-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The current helpers only cover single and pair load/stores. Introduce new helpers to cover exclusive, load acquire, store release and the LSE atomics, as they both load and store. To gather all of them in one call : introduce `aarch64_insn_is_load()`, `aarch64_insn_is_store()`, and `aarch64_insn_is_ldst()` helpers which check if the instruction is a load, store or either. Signed-off-by: Ada Couprie Diaz --- Note: I made the LSE atomics part of the is_{load,store} helpers as they are used as such by `aarch64_insn_encode_ldst_size()`, but it could make sense to not have them in the helpers and just call them together where neeeded. --- arch/arm64/include/asm/insn.h | 53 +++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 4ba4d5c50137..44435eede1f3 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -520,6 +520,23 @@ static __always_inline bool aarch64_insn_is_barrier(u3= 2 insn) aarch64_insn_is_pssbb(insn); } =20 +#ifdef CONFIG_ARM64_LSE_ATOMICS +static __always_inline bool aarch64_insn_is_lse_atomic(u32 insn) +{ + return aarch64_insn_is_ldadd(insn) || + aarch64_insn_is_ldclr(insn) || + aarch64_insn_is_ldeor(insn) || + aarch64_insn_is_ldset(insn) || + aarch64_insn_is_swp(insn) || + aarch64_insn_is_cas(insn); +} +#else /* CONFIG_ARM64_LSE_ATOMICS */ +static __always_inline bool aarch64_insn_is_lse_atomic(u32 insn) +{ + return false; +} +#endif /* CONFIG_ARM64_LSE_ATOMICS */ + static __always_inline bool aarch64_insn_is_store_single(u32 insn) { return aarch64_insn_is_store_imm(insn) || @@ -534,6 +551,21 @@ static __always_inline bool aarch64_insn_is_store_pair= (u32 insn) aarch64_insn_is_stp_post(insn); } =20 +static __always_inline bool aarch64_insn_is_store_ex_or_rel(u32 insn) +{ + return aarch64_insn_is_store_ex(insn) || + aarch64_insn_is_store_ex(insn & (~BIT(15))) || + aarch64_insn_is_store_rel(insn); +} + +static __always_inline bool aarch64_insn_is_store(u32 insn) +{ + return aarch64_insn_is_store_single(insn) || + aarch64_insn_is_store_pair(insn) || + aarch64_insn_is_store_ex_or_rel(insn) || + aarch64_insn_is_lse_atomic(insn); +} + static __always_inline bool aarch64_insn_is_load_single(u32 insn) { return aarch64_insn_is_load_imm(insn) || @@ -548,6 +580,27 @@ static __always_inline bool aarch64_insn_is_load_pair(= u32 insn) aarch64_insn_is_ldp_post(insn); } =20 +static __always_inline bool aarch64_insn_is_load_ex_or_acq(u32 insn) +{ + return aarch64_insn_is_load_ex(insn) || + aarch64_insn_is_load_ex(insn & (~BIT(15))) || + aarch64_insn_is_load_acq(insn); +} + +static __always_inline bool aarch64_insn_is_load(u32 insn) +{ + return aarch64_insn_is_load_single(insn) || + aarch64_insn_is_load_pair(insn) || + aarch64_insn_is_load_ex_or_acq(insn) || + aarch64_insn_is_lse_atomic(insn); +} + +static __always_inline bool aarch64_insn_is_ldst(u32 insn) +{ + return aarch64_insn_is_load(insn) || + aarch64_insn_is_store(insn); +} + static __always_inline bool aarch64_insn_uses_literal(u32 insn) { /* ldr/ldrsw (literal), prfm */ --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 2A209292B4B for ; Tue, 23 Sep 2025 17:50:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649813; cv=none; b=eK7dJA+fEv9kMCnyyG9a6PoCTnW1FP6oaiGNVe9JkgjLCr/zOG5AzUcXGv5bLHoYBZpXEhrfwhjetmFHfIUVTuYC4SnOFkbDJNw5iVuzWTKFC3h7KuioV9E5Fl3AB5oK9SjwfEUUm5syJOwYiczJZZHs/smMMqkoe7yKG/K0X/E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649813; c=relaxed/simple; bh=wiExZ4Fgng8BTYqoMIsC2nlF1tC8brtADbufO81BoN8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=r6BBTHCVLOx5x29KRbO/klMIRpYW+/v0urrD9wnSPNh20gRovtBrpsvXuiX/FHgAwzlIVkKPNggt8cbDy1sHQTY4XFGwjo7/JOj2gx9GRzmI0XVxYyQdBh0wxWNZ+XM9KBFRMkQA4Qkkso0ZuhC4/o4uDSfxLjyghDNK9XNo7qs= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 60DD1497; Tue, 23 Sep 2025 10:50:03 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id D65E03F5A1; Tue, 23 Sep 2025 10:50:07 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 14/16] arm64/insn: always inline aarch64_insn_encode_ldst_size() Date: Tue, 23 Sep 2025 18:49:01 +0100 Message-ID: <20250923174903.76283-15-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The type and instruction checks cannot be made at compile time, as they are dynamically created. However, we can remove the error print as it should never appear in normal operation and will still lead to a fault BRK. This makes `aarch64_insn_encode_ldst_size()` safe for inlining and usage from patching callbacks. This is a change of visiblity, as previously the function was private to lib/insn.c. However, in order to inline more `aarch64_insn_` functions and make patching callbacks safe, it needs to be accessible by those functions. As it is more accessible than before, add a check so that only loads or stores can be affected by the size encoding. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 24 ++++++++++++++++++++++++ arch/arm64/lib/insn.c | 19 +------------------ 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 44435eede1f3..46d4d452e2e2 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -717,6 +717,30 @@ static __always_inline u32 aarch64_insn_encode_immedia= te( =20 return insn; } + +extern const u32 aarch64_insn_ldst_size[]; +static __always_inline u32 aarch64_insn_encode_ldst_size( + enum aarch64_insn_size_type type, + u32 insn) +{ + u32 size; + + if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) { + return AARCH64_BREAK_FAULT; + } + + /* Don't corrput the top bits of other instructions which aren't a size. = */ + if (!aarch64_insn_is_ldst(insn)) { + return AARCH64_BREAK_FAULT; + } + + size =3D aarch64_insn_ldst_size[type]; + insn &=3D ~GENMASK(31, 30); + insn |=3D size << 30; + + return insn; +} + static __always_inline u32 aarch64_insn_encode_register( enum aarch64_insn_register_type type, u32 insn, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 71df4d72ac81..63564d236235 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -42,30 +42,13 @@ u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm= _type type, u32 insn) return (insn >> shift) & mask; } =20 -static const u32 aarch64_insn_ldst_size[] =3D { +const u32 aarch64_insn_ldst_size[] =3D { [AARCH64_INSN_SIZE_8] =3D 0, [AARCH64_INSN_SIZE_16] =3D 1, [AARCH64_INSN_SIZE_32] =3D 2, [AARCH64_INSN_SIZE_64] =3D 3, }; =20 -static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, - u32 insn) -{ - u32 size; - - if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) { - pr_err("%s: unknown size encoding %d\n", __func__, type); - return AARCH64_BREAK_FAULT; - } - - size =3D aarch64_insn_ldst_size[type]; - insn &=3D ~GENMASK(31, 30); - insn |=3D size << 30; - - return insn; -} - static inline long label_imm_common(unsigned long pc, unsigned long addr, long range) { --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 39B2C2DEA95 for ; Tue, 23 Sep 2025 17:50:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649817; cv=none; b=VLbA2mE3yoH38BWOcudhexvMRpHsducsL3UJ9qUkCBY8pww1gXa0Ny9xUTBF1F7fjcgDxOBt7k8SSyoa2QxMwytCxr58oslZvFyvyInZow5nrJYWE9h7LSAts8QoAWkH3UpOaJ1ehUNIc7kfRJAft+MdNw30gB0JigTPS1v8AHw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649817; c=relaxed/simple; bh=odqMRWq0AVxn5mGgrhTcGJSh/oeRfsrm+r1/SiGdrSI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SuyRT2A9nZ3tQNm7OPFSRu41m5jy9cqqDGSJvxcSvRb5MQGaYq44/rfG3YD6b0AKEuX0pUPKPQMIHgFcRmG8pC+zty6W/RCV0jrVXz5pp4/ROByoqzpSW8Zi7utHg8/DbeG4yLG+EPtj6qFKuO5OoAGTWkHFh3gSHTx5I+Xd99U= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 8E640497; Tue, 23 Sep 2025 10:50:07 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id EC8333F5A1; Tue, 23 Sep 2025 10:50:11 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 15/16] arm64/insn: always inline aarch64_insn_gen_load_acq_store_rel() Date: Tue, 23 Sep 2025 18:49:02 +0100 Message-ID: <20250923174903.76283-16-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" As it is always called with an explicit instruction type, we can check for its validity at compile time and remove the runtime error print. This makes `aarch64_insn_gen_load_acq_store_rel()` safe for inlining and usage from patching callbacks, as both `aarch64_insn_encode_ldst_size()` and `aarch64_insn_encode_register()` have been made safe in previous commits. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/insn.h | 36 +++++++++++++++++++++++++++++++---- arch/arm64/lib/insn.c | 29 ---------------------------- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 46d4d452e2e2..b7abc9b3e74c 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -882,10 +882,38 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_ins= n_register reg1, int offset, enum aarch64_insn_variant variant, enum aarch64_insn_ldst_type type); -u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg, - enum aarch64_insn_register base, - enum aarch64_insn_size_type size, - enum aarch64_insn_ldst_type type); + +static __always_inline u32 aarch64_insn_gen_load_acq_store_rel( + enum aarch64_insn_register reg, + enum aarch64_insn_register base, + enum aarch64_insn_size_type size, + enum aarch64_insn_ldst_type type) +{ + compiletime_assert(type =3D=3D AARCH64_INSN_LDST_LOAD_ACQ || + type =3D=3D AARCH64_INSN_LDST_STORE_REL, + "unknown load-acquire/store-release encoding"); + u32 insn; + + switch (type) { + case AARCH64_INSN_LDST_LOAD_ACQ: + insn =3D aarch64_insn_get_load_acq_value(); + break; + case AARCH64_INSN_LDST_STORE_REL: + insn =3D aarch64_insn_get_store_rel_value(); + break; + default: + return AARCH64_BREAK_FAULT; + } + + insn =3D aarch64_insn_encode_ldst_size(size, insn); + + insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, + reg); + + return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, + base); +} + u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, enum aarch64_insn_register base, enum aarch64_insn_register state, diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 63564d236235..6ee298f96d47 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -328,35 +328,6 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn= _register reg1, offset >> shift); } =20 -u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg, - enum aarch64_insn_register base, - enum aarch64_insn_size_type size, - enum aarch64_insn_ldst_type type) -{ - u32 insn; - - switch (type) { - case AARCH64_INSN_LDST_LOAD_ACQ: - insn =3D aarch64_insn_get_load_acq_value(); - break; - case AARCH64_INSN_LDST_STORE_REL: - insn =3D aarch64_insn_get_store_rel_value(); - break; - default: - pr_err("%s: unknown load-acquire/store-release encoding %d\n", - __func__, type); - return AARCH64_BREAK_FAULT; - } - - insn =3D aarch64_insn_encode_ldst_size(size, insn); - - insn =3D aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, - reg); - - return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, - base); -} - u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, enum aarch64_insn_register base, enum aarch64_insn_register state, --=20 2.43.0 From nobody Thu Oct 2 02:16:41 2025 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 7BCBE2DECD8 for ; Tue, 23 Sep 2025 17:50:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=217.140.110.172 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649822; cv=none; b=cHHBWMnfyMdAurujek+6o8hk4VU40X+kIkcFvUcTy7eS7W0v4K/SA8+3fdtVbjFwlXQJycJOo5SoDW9miInJdCiNB1OnVebEt+6hSfxo0JJ95Yk3qH7DPItuqj9BJimnLebwrP2Rhn97YKi12YAjjUm55bro6xfKiWpI7mrtM4c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758649822; c=relaxed/simple; bh=2Skgx1MTi8lR+s4s9QJ+V8BEWAegpG4SfLkSTV2Coog=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=p1MTtWQVZomEm2Q5/LgpTxoA2EBpM7uv0+CF0BO7xE24qYngxqDyFvbYyfjbPpXMv6CTzfPSO6L1fEken0BxkwkqFat74rTUUwF0Kl3BEK3Ip+24h6BIFt4BYLVj9f5/qbipLCUSnkxCCtdeQiQKyCsHjwGKYj8gqr+6PPqdAL4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com; spf=pass smtp.mailfrom=arm.com; arc=none smtp.client-ip=217.140.110.172 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=arm.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=arm.com Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id E32FCFEC; Tue, 23 Sep 2025 10:50:11 -0700 (PDT) Received: from e137867.cambridge.arm.com (e137867.arm.com [10.1.30.204]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 0FD563F5A1; Tue, 23 Sep 2025 10:50:15 -0700 (PDT) From: Ada Couprie Diaz To: linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Marc Zyngier , Oliver Upton , Ard Biesheuvel , Joey Gouly , Suzuki K Poulose , Zenghui Yu , Andrey Ryabinin , Alexander Potapenko , Andrey Konovalov , Dmitry Vyukov , Vincenzo Frascino , linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev, kasan-dev@googlegroups.com, Mark Rutland , Ada Couprie Diaz Subject: [RFC PATCH 16/16] arm64/io: rework Cortex-A57 erratum 832075 to use callback Date: Tue, 23 Sep 2025 18:49:03 +0100 Message-ID: <20250923174903.76283-17-ada.coupriediaz@arm.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250923174903.76283-1-ada.coupriediaz@arm.com> References: <20250923174903.76283-1-ada.coupriediaz@arm.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The Cortex-A57 erratum 832075 fix implemented by the kernel replaces all device memory loads with their load-acquire versions. By using simple instruction-level alternatives to replace the 13k+ instances of such loads, we add more than 50kB of data to the `.altinstructions` section, and thus the kernel image. Implement `alt_cb_patch_ldr_to_ldar()` as the alternative callback to patch LDRs to device memory into LDARs and use it instead of the alternative instructions. This lightens the image by around 50kB as predicted, with the same result. The new callback is safe to be used for alternatives as it is `noinstr` and the `aarch64_insn_...` functions it uses have been made safe in previous commits. Add `alt_cb_patch_ldr_to_ldar()` to the nVHE namespace as `__vgic_v2_perform_cpuif_access()` uses one of the patched functions. Signed-off-by: Ada Couprie Diaz --- arch/arm64/include/asm/io.h | 27 +++++++++++++++------------ arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kernel/io.c | 21 +++++++++++++++++++++ 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 9b96840fb979..ec75bd0a9d76 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -50,13 +50,16 @@ static __always_inline void __raw_writeq(u64 val, volat= ile void __iomem *addr) asm volatile("str %x0, %1" : : "rZ" (val), "Qo" (*ptr)); } =20 +void noinstr alt_cb_patch_ldr_to_ldar(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); + #define __raw_readb __raw_readb static __always_inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; - asm volatile(ALTERNATIVE("ldrb %w0, [%1]", - "ldarb %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + asm volatile(ALTERNATIVE_CB("ldrb %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + alt_cb_patch_ldr_to_ldar) : "=3Dr" (val) : "r" (addr)); return val; } @@ -66,9 +69,9 @@ static __always_inline u16 __raw_readw(const volatile voi= d __iomem *addr) { u16 val; =20 - asm volatile(ALTERNATIVE("ldrh %w0, [%1]", - "ldarh %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + asm volatile(ALTERNATIVE_CB("ldrh %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + alt_cb_patch_ldr_to_ldar) : "=3Dr" (val) : "r" (addr)); return val; } @@ -77,9 +80,9 @@ static __always_inline u16 __raw_readw(const volatile voi= d __iomem *addr) static __always_inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; - asm volatile(ALTERNATIVE("ldr %w0, [%1]", - "ldar %w0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + asm volatile(ALTERNATIVE_CB("ldr %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + alt_cb_patch_ldr_to_ldar) : "=3Dr" (val) : "r" (addr)); return val; } @@ -88,9 +91,9 @@ static __always_inline u32 __raw_readl(const volatile voi= d __iomem *addr) static __always_inline u64 __raw_readq(const volatile void __iomem *addr) { u64 val; - asm volatile(ALTERNATIVE("ldr %0, [%1]", - "ldar %0, [%1]", - ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + asm volatile(ALTERNATIVE_CB("ldr %0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + alt_cb_patch_ldr_to_ldar) : "=3Dr" (val) : "r" (addr)); return val; } diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 714b0b5ec5ac..43ac41f87229 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -91,6 +91,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable); KVM_NVHE_ALIAS(spectre_bhb_patch_wa3); KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb); KVM_NVHE_ALIAS(alt_cb_patch_nops); +KVM_NVHE_ALIAS(alt_cb_patch_ldr_to_ldar); =20 /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_vgic_global_state); diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index fe86ada23c7d..d4dff119f78c 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -9,6 +9,27 @@ #include #include =20 +noinstr void alt_cb_patch_ldr_to_ldar(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u32 rt, rn, size, orinst, altinst; + + BUG_ON(nr_inst !=3D 1); + + orinst =3D le32_to_cpu(origptr[0]); + + rt =3D aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, orinst); + rn =3D aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, orinst); + /* The size field (31,30) matches the enum used in gen_load_acq below. */ + size =3D orinst >> 30; + + altinst =3D aarch64_insn_gen_load_acq_store_rel(rt, rn, size, + AARCH64_INSN_LDST_LOAD_ACQ); + + updptr[0] =3D cpu_to_le32(altinst); +} +EXPORT_SYMBOL(alt_cb_patch_ldr_to_ldar); + /* * This generates a memcpy that works on a from/to address which is aligne= d to * bits. Count is in terms of the number of bits sized quantities to copy.= It --=20 2.43.0