From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D79F9EB64DA for ; Sat, 24 Jun 2023 12:21:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233120AbjFXMVD (ORCPT ); Sat, 24 Jun 2023 08:21:03 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49048 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233077AbjFXMU7 (ORCPT ); Sat, 24 Jun 2023 08:20:59 -0400 Received: from mail-wm1-x32d.google.com (mail-wm1-x32d.google.com [IPv6:2a00:1450:4864:20::32d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5F11D2129 for ; Sat, 24 Jun 2023 05:20:58 -0700 (PDT) Received: by mail-wm1-x32d.google.com with SMTP id 5b1f17b1804b1-3fa86011753so602215e9.0 for ; Sat, 24 Jun 2023 05:20:58 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609257; x=1690201257; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=XQDAxN5g1vMcAJWLoIEgGtbs3vH3NsC3JS3eiLLd1tc=; b=VAuIn5wmrft6ouJxQrKuyMshqzgPOFFMp3cLYRFjWEUWrbZGQ8XF6H1vCQtgoU+hFS DET3mgt3Dg75nvp7UHdGyH9gK5rjRBBGEONXmIvNkoPxYNXpmfrrOEsKaCJl/VpxOS7y SRGO0kk8bcyZmm6CxiZeHT4I71gLxrXew6U2gDxOWngaFRQ39qoiOy/yJT+VCs+FkeBv TFBEBwlMTFvc7CtPyasehlUTiDNnv3sikuzPp3WmAq8F26uQLaJ9JNhdBJ4STrX8ycVJ oIkybPrQT52yhyJk6Qg2XMUNCBbalV8KdxJrm3tB4UUeLKmPAZ0h62NFyQMs+PF7U0Ng QPfw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609257; x=1690201257; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=XQDAxN5g1vMcAJWLoIEgGtbs3vH3NsC3JS3eiLLd1tc=; b=TJXhZkBF37mnUbTNAGEVL0BqGaKl+I0ITV6GMVwkDQA28U9P3bYtY4RlrpChPBvqJH G/7PYxO5syMclWeRsKZDe1pZp4IggRu+W+Iiw5gICrRQJqn6iJyau1LiWzYHhSChPwot ody/sfaXqDjYHm8kATg+SGMcWrQZslAGQgeudc07J0C1+/JkV1uuLLtxjnSGeF2bdBvY mbLiIGCmtem1dnSK0430Hld9UQ77VNFzI4hoB4W8WxJWkuXUJ2rhNsbCWAFp1FHIsCdQ rgq1cMlnyzo7+4K3q9EYSR7LBleeND6LcRm8sEzE3voDj11w4h+usnTAhnmMLTGCTv4M IftA== X-Gm-Message-State: AC+VfDxbxaxDI29m5MyV3ctYPC+tiu8Azqei9uYkRtkzUo6CYlWRN7Bm IfPNAJujJe8Scm6qfAZiu9EXqg== X-Google-Smtp-Source: ACHHUZ40aNXF8GUQdKojrSBWRvtDelAZZelDb3OI5i9UIVBbqRPN27EP9AAhdB6gqYMhg9yBAApZVw== X-Received: by 2002:a1c:ed17:0:b0:3f5:f543:d81f with SMTP id l23-20020a1ced17000000b003f5f543d81fmr25057066wmh.3.1687609256330; Sat, 24 Jun 2023 05:20:56 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.20.54 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:20:55 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 1/6] riscv: remove unused functions in traps_misaligned.c Date: Sat, 24 Jun 2023 14:20:44 +0200 Message-Id: <20230624122049.7886-2-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Replace macros by the only two function calls that are done from this file, store_u8() and load_u8(). Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/traps_misaligned.c | 46 +++++----------------------- 1 file changed, 7 insertions(+), 39 deletions(-) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 378f5b151443..e7bfb33089c1 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -151,51 +151,19 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 -#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \ -static inline type load_##type(const type *addr) \ -{ \ - type val; \ - asm (#insn " %0, %1" \ - : "=3D&r" (val) : "m" (*addr)); \ - return val; \ -} +static inline u8 load_u8(const u8 *addr) +{ + u8 val; =20 -#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \ -static inline void store_##type(type *addr, type val) \ -{ \ - asm volatile (#insn " %0, %1\n" \ - : : "r" (val), "m" (*addr)); \ -} + asm volatile("lbu %0, %1" : "=3D&r" (val) : "m" (*addr)); =20 -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw) -#if defined(CONFIG_64BIT) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld) -#else -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw) - -static inline u64 load_u64(const u64 *addr) -{ - return load_u32((u32 *)addr) - + ((u64)load_u32((u32 *)addr + 1) << 32); + return val; } =20 -static inline void store_u64(u64 *addr, u64 val) +static inline void store_u8(u8 *addr, u8 val) { - store_u32((u32 *)addr, val); - store_u32((u32 *)addr + 1, val >> 32); + asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); } -#endif =20 static inline ulong get_insn(ulong mepc) { --=20 2.40.1 From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 49C72EB64DD for ; Sat, 24 Jun 2023 12:21:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233130AbjFXMVG (ORCPT ); Sat, 24 Jun 2023 08:21:06 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49074 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233086AbjFXMVB (ORCPT ); Sat, 24 Jun 2023 08:21:01 -0400 Received: from mail-wr1-x42b.google.com (mail-wr1-x42b.google.com [IPv6:2a00:1450:4864:20::42b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id AAD212120 for ; Sat, 24 Jun 2023 05:20:59 -0700 (PDT) Received: by mail-wr1-x42b.google.com with SMTP id ffacd0b85a97d-3127e67842aso423854f8f.1 for ; Sat, 24 Jun 2023 05:20:59 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609258; x=1690201258; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=C0yKSEPQ2uBWE2LY+saIm//xFx7HrBCHNyL34cp2oHM=; b=QeZjgJfhQJvqZvx1t1IeWYsbwQNI5QY0nY+n+6iuKprKzqPPDhqAlCK/X8MXw9lllM MF7MJUd3AyF6NLExmFN2tqOyKJxGXQ5A620LqDfFp1F1MgWOF2XqUr8EUczC4R2MWmPT FPV9ONEJm4zSLcf0xZlhrZVG+xsG//PTKyBEZCr7/8dLR1hNjsmbGDUqGsh9vM21Svm7 dAy7RyMU7SrYkSqPju5iD/CqEc/7iiUHvgjZoAQEvI82vuQl+FYI2+3r31+PjQUdTX5Y sjL2IQB9LnUMnP7pvvD/q1oELJQNwk2RV0RkSmIPwXN9cWyi2KhZGSAxqRLH8HJDsVlQ TIBg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609258; x=1690201258; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=C0yKSEPQ2uBWE2LY+saIm//xFx7HrBCHNyL34cp2oHM=; b=azzY2eHCbZ5KyIxz64ltg/XusQKUbwYAQL+/iw8Br/paVCyFVSEthXk3LODkY0UrI0 8fFPim986niFeLVJrsCRodZ6AN28Z5WHzAU2+W7O9jL4cGm2Bf2mI/5BWcwk1nIrtJMW rt9fGqJiPVvXKtx6w9oKVuGxS1FVz3Q3LOtwPrJejwivcdPziAmzBYbdgWJMulv9s38K mEIVw9jxm7MOnayhQRKOF2BE2znlDJiqgA38kFVJtDf6Kr1vuADmziDWNfizTCOSRBdb P5aQzE6OmHxupJQe3LovbJP0d5yEJGWMcn/ZL3d12W3X+ZR3+7JiUAc0VUJ36v+9pS37 wIow== X-Gm-Message-State: AC+VfDzxyDbwYLwhnkqBjZRhzBdS6WvGtIXDZCmk0OvxOFPrmbbZPe8d ZT0RwPzXMQmjnN3AbvHItmo/Nw== X-Google-Smtp-Source: ACHHUZ4ou5+mAhAZf0neB/pEp/lFSTloYGI60h/0wpB1djUbSRRuQtMMLGSiDbhu49FUgBf4zF50Sg== X-Received: by 2002:adf:fdc8:0:b0:313:e3a8:b477 with SMTP id i8-20020adffdc8000000b00313e3a8b477mr1055898wrs.2.1687609258116; Sat, 24 Jun 2023 05:20:58 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.20.56 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:20:57 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 2/6] riscv: add support for misaligned handling in S-mode Date: Sat, 24 Jun 2023 14:20:45 +0200 Message-Id: <20230624122049.7886-3-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Misalignment handling is only supported for M-mode and uses direct accesses to user memory. in S-mode, this requires to use the get_user()/put_user() accessors. Implement load_u8(), store_u8() and get_insn() using these accessors. Also, use CSR_TVAL instead of hardcoded mtval in csr_read() call which will work for both S-mode and M-mode. When used in S-mode, we do not handle misaligned accesses that are triggered from kernel mode. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/Makefile | 2 +- arch/riscv/kernel/traps_misaligned.c | 111 +++++++++++++++++++++++---- 2 files changed, 99 insertions(+), 14 deletions(-) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 153864e4f399..61bad09280a6 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -56,9 +56,9 @@ obj-y +=3D stacktrace.o obj-y +=3D cacheinfo.o obj-y +=3D patch.o obj-y +=3D probes/ +obj-y +=3D traps_misaligned.o obj-$(CONFIG_MMU) +=3D vdso.o vdso/ =20 -obj-$(CONFIG_RISCV_M_MODE) +=3D traps_misaligned.o obj-$(CONFIG_FPU) +=3D fpu.o obj-$(CONFIG_SMP) +=3D smpboot.o obj-$(CONFIG_SMP) +=3D smp.o diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index e7bfb33089c1..e4a273ab77c9 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -12,6 +12,7 @@ #include #include #include +#include =20 #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -151,21 +152,25 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 -static inline u8 load_u8(const u8 *addr) +#ifdef CONFIG_RISCV_M_MODE +static inline int load_u8(const u8 *addr, u8 *r_val) { u8 val; =20 asm volatile("lbu %0, %1" : "=3D&r" (val) : "m" (*addr)); + *r_val =3D val; =20 - return val; + return 0; } =20 -static inline void store_u8(u8 *addr, u8 val) +static inline int store_u8(u8 *addr, u8 val) { asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); + + return 0; } =20 -static inline ulong get_insn(ulong mepc) +static inline int get_insn(ulong mepc, ulong *r_insn) { register ulong __mepc asm ("a2") =3D mepc; ulong val, rvc_mask =3D 3, tmp; @@ -194,9 +199,63 @@ static inline ulong get_insn(ulong mepc) : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), [xlen_minus_16] "i" (XLEN_MINUS_16)); =20 - return val; + *r_insn =3D val; + + return 0; +} +#else +static inline int load_u8(const u8 *addr, u8 *r_val) +{ + return __get_user(*r_val, addr); +} + +static inline int store_u8(u8 *addr, u8 val) +{ + return __put_user(val, addr); } =20 +static inline int get_insn(ulong mepc, ulong *r_insn) +{ + ulong insn =3D 0; + + if (mepc & 0x2) { + ulong tmp =3D 0; + u16 __user *insn_addr =3D (u16 __user *)mepc; + + if (__get_user(insn, insn_addr)) + return -EFAULT; + /* __get_user() uses regular "lw" which sign extend the loaded + * value make sure to clear higher order bits in case we "or" it + * below with the upper 16 bits half. + */ + insn &=3D GENMASK(15, 0); + if ((insn & __INSN_LENGTH_MASK) !=3D __INSN_LENGTH_32) { + *r_insn =3D insn; + return 0; + } + insn_addr++; + if (__get_user(tmp, insn_addr)) + return -EFAULT; + *r_insn =3D (tmp << 16) | insn; + + return 0; + } else { + u32 __user *insn_addr =3D (u32 __user *)mepc; + + if (__get_user(insn, insn_addr)) + return -EFAULT; + if ((insn & __INSN_LENGTH_MASK) =3D=3D __INSN_LENGTH_32) { + *r_insn =3D insn; + return 0; + } + insn &=3D GENMASK(15, 0); + *r_insn =3D insn; + + return 0; + } +} +#endif + union reg_data { u8 data_bytes[8]; ulong data_ulong; @@ -207,10 +266,21 @@ int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc =3D regs->epc; - unsigned long insn =3D get_insn(epc); - unsigned long addr =3D csr_read(mtval); + unsigned long insn; + unsigned long addr; int i, fp =3D 0, shift =3D 0, len =3D 0; =20 + /* + * When running in supervisor mode, we only handle misaligned accesses + * triggered from user mode. + */ + if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs)) + return -1; + + if (get_insn(epc, &insn)) + return -1; + + addr =3D csr_read(CSR_TVAL); regs->epc =3D 0; =20 if ((insn & INSN_MASK_LW) =3D=3D INSN_MATCH_LW) { @@ -274,8 +344,10 @@ int handle_misaligned_load(struct pt_regs *regs) } =20 val.data_u64 =3D 0; - for (i =3D 0; i < len; i++) - val.data_bytes[i] =3D load_u8((void *)(addr + i)); + for (i =3D 0; i < len; i++) { + if (load_u8((void *)(addr + i), &val.data_bytes[i])) + return -1; + } =20 if (fp) return -1; @@ -290,10 +362,21 @@ int handle_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc =3D regs->epc; - unsigned long insn =3D get_insn(epc); - unsigned long addr =3D csr_read(mtval); + unsigned long insn; + unsigned long addr; int i, len =3D 0; =20 + /* + * When running in supervisor mode, we only handle misaligned accesses + * triggered from user mode. + */ + if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs)) + return -1; + + if (get_insn(epc, &insn)) + return -1; + + addr =3D csr_read(CSR_TVAL); regs->epc =3D 0; =20 val.data_ulong =3D GET_RS2(insn, regs); @@ -327,8 +410,10 @@ int handle_misaligned_store(struct pt_regs *regs) return -1; } =20 - for (i =3D 0; i < len; i++) - store_u8((void *)(addr + i), val.data_bytes[i]); + for (i =3D 0; i < len; i++) { + if (store_u8((void *)(addr + i), val.data_bytes[i])) + return -1; + } =20 regs->epc =3D epc + INSN_LEN(insn); =20 --=20 2.40.1 From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C2701C0015E for ; Sat, 24 Jun 2023 12:21:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233155AbjFXMVN (ORCPT ); Sat, 24 Jun 2023 08:21:13 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49092 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233102AbjFXMVC (ORCPT ); Sat, 24 Jun 2023 08:21:02 -0400 Received: from mail-wm1-x332.google.com (mail-wm1-x332.google.com [IPv6:2a00:1450:4864:20::332]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3C87E2112 for ; Sat, 24 Jun 2023 05:21:01 -0700 (PDT) Received: by mail-wm1-x332.google.com with SMTP id 5b1f17b1804b1-3fa86011753so602375e9.0 for ; Sat, 24 Jun 2023 05:21:01 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609259; x=1690201259; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=ep2XsA9j2LD9YyooNd5WW+/YowFL7q+DOAVN9RNvwtM=; b=HqVG27IW75eotRBvjQJoz9xZyhDt145raK7ooTKnqu1gvdfvCTqjpUDRkjfALOWDC0 mD4PWhAlcqJI/GE4WBdRaig1Nz8ol5rOsAd1cbvTM2oyWEp7MKJVmPD5p3RVYsNRacUG sSx6dGcqzbcvueBBPiznmCyGLZPSjkqnwnArEjLJjlVEsOUNt0jqnQMYHN1cbCbjreWN 9Dv/hx7mxiAdn1EYlTNvcONsABk7uWEp4Illmj3F/P02ehGh4ZDXsTIenjMWEoDbCzVv zi2pv57FNYPvzvif4UrJc9KZEoR4YszV5m2YdtNmQuKZc3J4J9ZgfMyzfWnjkuUIqH81 SX2A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609259; x=1690201259; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=ep2XsA9j2LD9YyooNd5WW+/YowFL7q+DOAVN9RNvwtM=; b=KnoHxWd7Rkq1EiST7EHlfgzjZME1850M/jOhQSbdLZbyQozmbKeTwO4A/4vijR5H4C GZUsGf14IuMjx3WfG5tsGipBEBaOEF4DAdAunCoFIE05sxi3gWJEOYHbEW3gMKI3iOm/ CEO9EtfXPBgxrpLDaJgwcExEwYnta2/w4u384LNT3lfePqovk6z+tIa4BEZopuCnKGnY HR8YMdQh1H/T5x7x+DW4txLpFT6kcjeSg1NPGk8coKAYxFoLaeGk0a4bEO5XFiI5i20u 4tfqYX8rI95CXqsfnC8A9bq2OYTaOkxI+672mfJe7hqel3iEBFOXoK996vHLy7ptDBO4 N5Ow== X-Gm-Message-State: AC+VfDwS1zfRhD3j6Vx1CgYGAacFxa2zAZ8VWSgXR83ki/37SX+b/qv0 NW0akjK2o1ii0PjMuPlG0pvREg== X-Google-Smtp-Source: ACHHUZ7YN3i+JPKbbojGoxNDyVgRp5DPwz5gJYk2TtRpMCkPHy0uZUm4weZNeKjABF/uBJNYciMH5g== X-Received: by 2002:a05:600c:3d93:b0:3f9:7c2:175c with SMTP id bi19-20020a05600c3d9300b003f907c2175cmr20595520wmb.2.1687609259522; Sat, 24 Jun 2023 05:20:59 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.20.58 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:20:59 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 3/6] riscv: allow S-mode to handle misaligned traps Date: Sat, 24 Jun 2023 14:20:46 +0200 Message-Id: <20230624122049.7886-4-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Now that we will be able to configure the SBI to let us handle the traps, let this misalignment handling be available for S-mode. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/traps.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8c258b78c925..b544cdd52dd2 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -149,12 +149,6 @@ DO_ERROR_INFO(do_trap_insn_illegal, SIGILL, ILL_ILLOPC, "illegal instruction"); DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -#ifndef CONFIG_RISCV_M_MODE -DO_ERROR_INFO(do_trap_load_misaligned, - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); -DO_ERROR_INFO(do_trap_store_misaligned, - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); -#else int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); =20 @@ -199,7 +193,6 @@ asmlinkage __visible __trap_section void do_trap_store_= misaligned(struct pt_regs irqentry_nmi_exit(regs, state); } } -#endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, --=20 2.40.1 From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 808FCEB64DA for ; Sat, 24 Jun 2023 12:21:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233167AbjFXMVT (ORCPT ); Sat, 24 Jun 2023 08:21:19 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49202 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233086AbjFXMVK (ORCPT ); Sat, 24 Jun 2023 08:21:10 -0400 Received: from mail-wm1-x32b.google.com (mail-wm1-x32b.google.com [IPv6:2a00:1450:4864:20::32b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7D8922133 for ; Sat, 24 Jun 2023 05:21:02 -0700 (PDT) Received: by mail-wm1-x32b.google.com with SMTP id 5b1f17b1804b1-3f3284dff6cso4414145e9.0 for ; Sat, 24 Jun 2023 05:21:02 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609261; x=1690201261; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=eFR35YNaQcGqljtTqigZb96yicyQhSfPK+W1MwtG1Qc=; b=fQ0Jj0/g8hraC+//ujaUdGnISdJuhZSqzlYtTNJFkbqaAQn94ZS1MRbEZ/6hlB+iZi +LrsgtrLieSflMBQqSa4DikDXR/EFQ3cOO5RmtXSIGDAGfRWturi1KhTux4m0ZmeAVGi YiS2wOrizlJJO9TwgSjc884uuBSQ5vSJsopmCYPSllwDhRKrQ+YT/vjRxFLdOXzB1K8R P1+1SIalQbR0Js89SZGXDMNcCR/sGBy1Q0DZiQ9FBNJnasfgUAj1UXcjYNE6KpwyUOs7 y2+eoAsfUuoSbjIx1AsXyRBkwHAf4zuUab8EjVYOtvwFyZTlIgvGT1S+mG1yfWeCy1Gy kqcQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609261; x=1690201261; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=eFR35YNaQcGqljtTqigZb96yicyQhSfPK+W1MwtG1Qc=; b=ktl9qy7h6pzV0Ezn7IDBcH4x2wrz5Cik5+q/Sfj3KtfaChZ4KmCXW3FrdZLxN1Iy+I ZmspLGaf8c7pa8AFREdaWQwLbmZOXU0kwwVVpnO5AjjMESKfZRA4I8oGDL00L3IdaFB3 4KkLtttkZyBliHBSBYLv4eh3nVd4JxXLD6U1arwAyTb2h8vZhGmB4JUPcahuBbX6YxPW DMQ/E0fObcdNoYsNO0cIi2am/Gu79cZskL5jqts+Od8fdGX98wfxNDWc892+S5sI5AMR y1kQHy8Zb644VrRwr/jRIIzfv4eOJcxu6j7pbm1/9/7pFpPe20M1FqgVkJ3rIspRtVQj 0Dkg== X-Gm-Message-State: AC+VfDwmCa/6Ry42fUElU/dM+314Hu+Si4FJ1WyAfUAMln0m9HF2K5JQ +nkYtqcc1J6UkIbOkvtvIPju5w== X-Google-Smtp-Source: ACHHUZ7Y/lLiiw2Du4ptFpQIG4DBcgnXX9ZY5s4tkti3QpKLzjg9BTlNfa3GoOtgo9F54jtJgwXTTg== X-Received: by 2002:a1c:ed05:0:b0:3f5:927:2b35 with SMTP id l5-20020a1ced05000000b003f509272b35mr20797437wmh.1.1687609260981; Sat, 24 Jun 2023 05:21:00 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.20.59 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:21:00 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 4/6] riscv: add support for SBI misalignment trap delegation Date: Sat, 24 Jun 2023 14:20:47 +0200 Message-Id: <20230624122049.7886-5-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Add support for misalignment trap delegation by setting it with SBI_EXT_FW_FEATURE SBI extension. This extension allows to control SBI behavior by requesting to set them to specific value. In order to implement prctl(PR_SET_UNALIGN, PR_UNALIGN_SIGBUS) behavior properly, we need to let the kernel handle the misalignment error by itself. This commit adds the sbi_delegate_misaligned() function to check if the SBI delegated us the misalignement handling. The value returned by this function is initialize at init time from sbi_init(). Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/sbi.h | 12 ++++++++++++ arch/riscv/kernel/sbi.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 5b4a1bf5f439..97c2b5d6abff 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -30,6 +30,7 @@ enum sbi_ext_id { SBI_EXT_HSM =3D 0x48534D, SBI_EXT_SRST =3D 0x53525354, SBI_EXT_PMU =3D 0x504D55, + SBI_EXT_FW_FEATURE =3D 0x46574654, =20 /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START =3D 0x08000000, @@ -236,6 +237,16 @@ enum sbi_pmu_ctr_type { /* Flags defined for counter stop function */ #define SBI_PMU_STOP_FLAG_RESET (1 << 0) =20 +/* SBI function IDs for FW feature extension */ +#define SBI_EXT_FW_FEATURE_SET 0x0 +#define SBI_EXT_FW_FEATURE_GET 0x1 + +enum sbi_fw_features_t { + SBI_FW_FEATURE_MISALIGNED_DELEG =3D 0, + + SBI_FW_FEATURE_MAX, +}; + #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -269,6 +280,7 @@ int sbi_console_getchar(void); long sbi_get_mvendorid(void); long sbi_get_marchid(void); long sbi_get_mimpid(void); +bool sbi_delegate_misaligned(void); void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_send_ipi(unsigned int cpu); diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index c672c8ba9a2a..18effd9e6ea9 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -22,6 +22,8 @@ static int (*__sbi_rfence)(int fid, const struct cpumask = *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) __ro_after_init; =20 +static bool __sbi_misaligned_deleg __ro_after_init; + struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, @@ -342,6 +344,11 @@ static int __sbi_rfence_v02(int fid, const struct cpum= ask *cpu_mask, return 0; } =20 +bool sbi_delegate_misaligned(void) +{ + return __sbi_misaligned_deleg; +} + /** * sbi_set_timer() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. @@ -494,6 +501,16 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *= cpu_mask, } EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid); =20 +static int sbi_fw_feature_set(enum sbi_fw_features_t feature, bool set) +{ + struct sbiret ret; + + ret =3D sbi_ecall(SBI_EXT_FW_FEATURE, SBI_EXT_FW_FEATURE_SET, feature, + set, 0, 0, 0, 0); + + return sbi_err_map_linux_errno(ret.error); +} + static void sbi_srst_reset(unsigned long type, unsigned long reason) { sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason, @@ -624,6 +641,17 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority =3D 192; register_restart_handler(&sbi_srst_reboot_nb); } + /* + * TODO: this will likely need to be updated when SBI extension + * is ratified + */ + if ((sbi_spec_version >=3D sbi_mk_version(1, 0)) && + (sbi_probe_extension(SBI_EXT_FW_FEATURE) > 0)) { + pr_info("SBI FW_FEATURE extension detected\n"); + if (!sbi_fw_feature_set(SBI_FW_FEATURE_MISALIGNED_DELEG, + true)) + __sbi_misaligned_deleg =3D true; + } } else { __sbi_set_timer =3D __sbi_set_timer_v01; __sbi_send_ipi =3D __sbi_send_ipi_v01; --=20 2.40.1 From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 187FDEB64DA for ; Sat, 24 Jun 2023 12:21:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233183AbjFXMVY (ORCPT ); Sat, 24 Jun 2023 08:21:24 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49286 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233110AbjFXMVM (ORCPT ); Sat, 24 Jun 2023 08:21:12 -0400 Received: from mail-wm1-x333.google.com (mail-wm1-x333.google.com [IPv6:2a00:1450:4864:20::333]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 692E0213A for ; Sat, 24 Jun 2023 05:21:04 -0700 (PDT) Received: by mail-wm1-x333.google.com with SMTP id 5b1f17b1804b1-3f80b192aecso4273505e9.1 for ; Sat, 24 Jun 2023 05:21:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609263; x=1690201263; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=EN/Bpcd3g2BBD8IENaIvpqjenPqBgjOGqqvyeNWy6hM=; b=S7Qug/L8+jW8rcAlVoty9CtIsTLlm7GoXtnsOASUOfExV8W14+nl81+uKtroNm1s07 ioZUFXzxJ0BtUU4MvAuFR6D504wL5FcuBd5hiXnYgMjsGgVplXR0GsSKfG9K7psiptXp W+17RG3pSPSeeV46yBxeEUW62zoQZHRBxvcQaXqYN3lzAssysvZo5TKxpcNuxYdwyqhY FOfscDUcMo/1U+vvgocX47LDEnT0c1MhbXMqYYC30sF9/8XSLKfm9RTQfdX6kJKAJM6b Mzdvrof+ebl5xvWFGwb/njJC5xUgWaTdhgC+fFF5Qc9q5gagn9LPkooMcaJwEQxpuBuY /Dgw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609263; x=1690201263; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=EN/Bpcd3g2BBD8IENaIvpqjenPqBgjOGqqvyeNWy6hM=; b=I7dK5HdxM2yUX8jOn9kLlPPe4bWfSezk9qw0hM5JD/ZKXQ96f2vfUeyA+6IrX6LfPq jPRVwpfjpJI75frK0xZcv3mUqrm77Ky+70E59ncv4JEHvf0EXpupNCl1pUmUjboFS0Ia ffuu75Jc//QS7lJcQfRb/M9jM5BSYK8PWy/3WBwjaa3ehCM8Z9W4ojqEhEW4Eo9Q80/l GJWp7SgJe/A5cjl2QGP/bhNe4D4K+cwEDxiocIRnJYYZBBbGAP6xkbG0EISREO+T+s3J iOpChCd8z7v7+3LTtAiy+DVg/MIH1AZbvbc6qlFeCudYmG4q2vKj1KR7Fcs2bfsjqZRe +MHQ== X-Gm-Message-State: AC+VfDzqg2KPQXxkKnA0hIe0MJvGMROOZKNE/2wxm83oMr7FTgOXvsDF jJiOByb3lH3fCt4z+3di0ft5+Q== X-Google-Smtp-Source: ACHHUZ4N+s9JDq+KJBGUVTphvOM9RzUpUcJe7aoOxsQriy0LYzQji5fTNu3KxQojc980fZEbPL/iQQ== X-Received: by 2002:a05:600c:19d2:b0:3f7:fb5d:6e7a with SMTP id u18-20020a05600c19d200b003f7fb5d6e7amr22343211wmq.0.1687609262634; Sat, 24 Jun 2023 05:21:02 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.21.01 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:21:02 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 5/6] riscv: add support for PR_SET_UNALIGN and PR_GET_UNALIGN Date: Sat, 24 Jun 2023 14:20:48 +0200 Message-Id: <20230624122049.7886-6-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Now that trap support is ready to handle misalignment errors in S-mode, allow the user to control the behavior of misalignment accesses using prctl(). Add an align_ctl flag in thread_struct which will be used to determine if we should SIGBUS the process or not on such fault. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/processor.h | 9 +++++++++ arch/riscv/kernel/process.c | 20 ++++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 7 +++++++ 3 files changed, 36 insertions(+) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/pr= ocessor.h index 94a0590c6971..4e6667d5ca68 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -7,6 +7,7 @@ #define _ASM_RISCV_PROCESSOR_H =20 #include +#include =20 #include =20 @@ -39,6 +40,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long align_ctl; }; =20 /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -51,6 +53,7 @@ static inline void arch_thread_struct_whitelist(unsigned = long *offset, =20 #define INIT_THREAD { \ .sp =3D sizeof(init_stack) + (long)&init_stack, \ + .align_ctl =3D PR_UNALIGN_NOPRINT, \ } =20 #define task_pt_regs(tsk) \ @@ -80,6 +83,12 @@ int riscv_of_parent_hartid(struct device_node *node, uns= igned long *hartid); extern void riscv_fill_hwcap(void); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struc= t *src); =20 +extern int get_unalign_ctl(struct task_struct *, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + #endif /* __ASSEMBLY__ */ =20 #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e2a060066730..b8a41e3c1333 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,25 @@ void arch_cpu_idle(void) cpu_do_idle(); } =20 +int set_unalign_ctl(struct task_struct *tsk, unsigned int val) +{ +#if IS_ENABLED(CONFIG_RISCV_SBI) + if (!sbi_delegate_misaligned()) + return -EINVAL; +#endif + tsk->thread.align_ctl =3D val; + return 0; +} + +int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) +{ +#if IS_ENABLED(CONFIG_RISCV_SBI) + if (!sbi_delegate_misaligned()) + return -EINVAL; +#endif + return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr); +} + void __show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index e4a273ab77c9..b828a0f3d4f7 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -8,6 +8,7 @@ #include #include #include +#include =20 #include #include @@ -277,6 +278,9 @@ int handle_misaligned_load(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs)) return -1; =20 + if ((current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(epc, &insn)) return -1; =20 @@ -373,6 +377,9 @@ int handle_misaligned_store(struct pt_regs *regs) if (!IS_ENABLED(CONFIG_RISCV_M_MODE) && !user_mode(regs)) return -1; =20 + if ((current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(epc, &insn)) return -1; =20 --=20 2.40.1 From nobody Sat Feb 7 18:28:47 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 246B6EB64DA for ; Sat, 24 Jun 2023 12:21:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233161AbjFXMV1 (ORCPT ); Sat, 24 Jun 2023 08:21:27 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:49232 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233132AbjFXMVN (ORCPT ); Sat, 24 Jun 2023 08:21:13 -0400 Received: from mail-wr1-x42d.google.com (mail-wr1-x42d.google.com [IPv6:2a00:1450:4864:20::42d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 99F54268C for ; Sat, 24 Jun 2023 05:21:05 -0700 (PDT) Received: by mail-wr1-x42d.google.com with SMTP id ffacd0b85a97d-3111f6551d7so429362f8f.0 for ; Sat, 24 Jun 2023 05:21:05 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1687609264; x=1690201264; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=WH8JAFU+k8uhqJtfQAQWAcCTeqzZxdaoJtc5uAAK8o4=; b=ih9THep4/PLcg79d8XyCVmDshSYavCmqoUjBG8qo21Y+yZfHTx+0uT2XQWB/HXkYOe EKBHn6KgCwYpZOeDR8y7wJI0tzkydSi/+xjKM70SM8NC4lpS6dj/fk8SybBL9u6z8O6b vN+7OkR8JjPC86p1D08ozTd9CICSnIpBRQEATak+DqmemQ5yPObs6Ho6kiiuA6JwUDYy OS5gW7u288JpTEV6Tjhp0tpL1XGN084+EdRlMqsqr3t/ogJGNMmeYCL4A6mXTQ3z/asS CiISJtZNmZpiNcsD3tXoPP5n9lCE9bM0MeZlrd1XLJTr9KASDo9x6damR/gLg0MCd0AG Jn7w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687609264; x=1690201264; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=WH8JAFU+k8uhqJtfQAQWAcCTeqzZxdaoJtc5uAAK8o4=; b=C9sDOyKy2ycSpQfryq3748pbNf3Qp4UPIdiiNmvFix0WFTH3efktuI/izCfkvc0YNe 3LBc+1ctryaE/+yAdKZfyFR36VJL/ZjY4W2eSyqDijsmnuCcw/Aoh2j4HNCrBK3Yeo1M vF5HWXVLKf4dwc+/lbSOuN/maIJnISB87ep2fabgVjo9eI5ANM9bBskPRVoSY8ppYt+Z 6NBmYUVC/zToXXG+2dmuBt+cKnyQyTZkbjBsgQGpycnWnV5n/mfRGTsKE6nhIt8XNgxh hdWxdCpR670hNj/5Upr09NFjtiTivfmKe4eahXDah3acXA3sfKRMeyRujff4UQ7Y48pH z94g== X-Gm-Message-State: AC+VfDwY6aelKUO19QU4RvpIy/OGxHFU3lOUcnbon7QeDBozFi3P+QBs HTpbntpSdjfDtHzNSRHwwrcMDw== X-Google-Smtp-Source: ACHHUZ4/sQw+nU+u/j9srxKAfjdDjXZTYXcECdccA7helmryviefMAEAlSbykXAXiBWmOVjK3XV9Pg== X-Received: by 2002:adf:ed83:0:b0:311:1a93:71ad with SMTP id c3-20020adfed83000000b003111a9371admr17688641wro.3.1687609264118; Sat, 24 Jun 2023 05:21:04 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:94c1:d2b1:7300:5620]) by smtp.gmail.com with ESMTPSA id y15-20020a7bcd8f000000b003f8f9ab6f30sm2058823wmj.20.2023.06.24.05.21.02 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 24 Jun 2023 05:21:03 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC PATCH 6/6] riscv: add floating point insn support to misaligned access emulation Date: Sat, 24 Jun 2023 14:20:49 +0200 Message-Id: <20230624122049.7886-7-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230624122049.7886-1-cleger@rivosinc.com> References: <20230624122049.7886-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This support is partially based of openSBI misaligned emulation floating point instruction support. It provides support for the existing floating point instructions (both for 32/64 bits as well as compressed ones). Since floating point registers are not aprt of the pt_regs struct, we need to modify them directly using some assembly. We also dirty the pt_regs status in case we modify them to be sure context switch will save them. With this support, Linux is on par with openSBI support. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/fpu.S | 118 ++++++++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 130 ++++++++++++++++++++++++++- 2 files changed, 244 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index dd2205473de7..27f2e35ce8c2 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -104,3 +104,121 @@ ENTRY(__fstate_restore) csrc CSR_STATUS, t1 ret ENDPROC(__fstate_restore) + + +#define get_f32(which) fmv.x.s a0, which; j 2f +#define put_f32(which) fmv.s.x which, a1; j 2f +#if __riscv_xlen =3D=3D 64 +# define get_f64(which) fmv.x.d a0, which; j 2f +# define put_f64(which) fmv.d.x which, a1; j 2f +#else +# define get_f64(which) fsd which, 0(a1); j 2f +# define put_f64(which) fld which, 0(a1); j 2f +#endif + +.macro fp_access_prologue + /* + * Compute jump offset to store the correct FP register since we don't + * have indirect FP register access + */ + sll t0, a0, 3 + la t2, 1f + add t0, t0, t2 + li t1, SR_FS + csrs CSR_STATUS, t1 + jr t0 +1: +.endm + +.macro fp_access_epilogue +2: + csrc CSR_STATUS, t1 + ret +.endm + +#define fp_access_body(__access_func) \ + __access_func(f0); \ + __access_func(f1); \ + __access_func(f2); \ + __access_func(f3); \ + __access_func(f4); \ + __access_func(f5); \ + __access_func(f6); \ + __access_func(f7); \ + __access_func(f8); \ + __access_func(f9); \ + __access_func(f10); \ + __access_func(f11); \ + __access_func(f12); \ + __access_func(f13); \ + __access_func(f14); \ + __access_func(f15); \ + __access_func(f16); \ + __access_func(f17); \ + __access_func(f18); \ + __access_func(f19); \ + __access_func(f20); \ + __access_func(f21); \ + __access_func(f22); \ + __access_func(f23); \ + __access_func(f24); \ + __access_func(f25); \ + __access_func(f26); \ + __access_func(f27); \ + __access_func(f28); \ + __access_func(f29); \ + __access_func(f30); \ + __access_func(f31) + + +/* + * Disable compressed instructions set to keep a constant offset between FP + * load/store/move instructions + */ +.option norvc +/* + * put_f32_reg - Set a FP register from a register containing the value + * a0 =3D FP register index to be set + * a1 =3D value to be loaded in the FP register + */ +SYM_FUNC_START(put_f32_reg) + fp_access_prologue + fp_access_body(put_f32) + fp_access_epilogue +SYM_FUNC_END(put_f32_reg) + +/* + * get_f32_reg - Get a FP register value and return it + * a0 =3D FP register index to be retrieved + */ +SYM_FUNC_START(get_f32_reg) + fp_access_prologue + fp_access_body(get_f32) + fp_access_epilogue +SYM_FUNC_END(put_f32_reg) + +/* + * put_f64_reg - Set a 64 bits FP register from a value or a pointer. + * a0 =3D FP register index to be set + * a1 =3D value/pointer to be loaded in the FP register (when xlen =3D=3D = 32 bits, we + * load the value to a pointer). + */ +SYM_FUNC_START(put_f64_reg) + fp_access_prologue + fp_access_body(put_f64) + fp_access_epilogue +SYM_FUNC_END(put_f64_reg) + +/* + * put_f64_reg - Get a 64 bits FP register value and returned it or store = it to + * a pointer. + * a0 =3D FP register index to be retrieved + * a1 =3D If xlen =3D=3D 32, pointer which should be loaded with the FP re= gister value + * or unused if xlen =3D=3D 64. In which case the FP register value is ret= urned + * through a0 + */ +SYM_FUNC_START(get_f64_reg) + fp_access_prologue + fp_access_body(get_f64) + fp_access_epilogue +SYM_FUNC_END(get_f64_reg) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index b828a0f3d4f7..0c02d6ef6a85 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -153,6 +153,93 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 +#ifdef CONFIG_FPU + +#define FP_GET_RD(insn) (insn >> 7 & 0x1F) + +extern void put_f32_reg(unsigned long fp_reg, unsigned long value); + +static inline void set_f32_rd(unsigned long insn, struct pt_regs *regs, + unsigned long val) +{ + unsigned long fp_reg =3D FP_GET_RD(insn); + + put_f32_reg(fp_reg, val); + regs->status |=3D SR_FS_DIRTY; +} + +extern void put_f64_reg(unsigned long fp_reg, unsigned long value); + +static inline void set_f64_rd(unsigned long insn, struct pt_regs *regs, u6= 4 val) +{ + unsigned long fp_reg =3D FP_GET_RD(insn); + unsigned long value; + +#if __riscv_xlen =3D=3D 32 + value =3D (unsigned long) &val; +#else + value =3D val; +#endif + put_f64_reg(fp_reg, value); + regs->status |=3D SR_FS_DIRTY; +} + +#if __riscv_xlen =3D=3D 32 +extern void get_f64_reg(unsigned long fp_reg, u64 *value); + +static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + u64 val; + + get_f64_reg(fp_reg, &val); + regs->status |=3D SR_FS_DIRTY; + + return val; +} +#else + +extern unsigned long get_f64_reg(unsigned long fp_reg); + +static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val =3D get_f64_reg(fp_reg); + regs->status |=3D SR_FS_DIRTY; + + return val; +} + +#endif + +#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs)) +#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs)) +#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs)) + +extern unsigned long get_f32_reg(unsigned long fp_reg); + +static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val =3D get_f32_reg(fp_reg); + regs->status |=3D SR_FS_DIRTY; + + return val; +} + +#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs)) +#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs)) +#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs)) + +#endif + #ifdef CONFIG_RISCV_M_MODE static inline int load_u8(const u8 *addr, u8 *r_val) { @@ -347,15 +434,21 @@ int handle_misaligned_load(struct pt_regs *regs) return -1; } =20 + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -1; + val.data_u64 =3D 0; for (i =3D 0; i < len; i++) { if (load_u8((void *)(addr + i), &val.data_bytes[i])) return -1; } =20 - if (fp) - return -1; - SET_RD(insn, regs, val.data_ulong << shift >> shift); + if (!fp) + SET_RD(insn, regs, val.data_ulong << shift >> shift); + else if (len =3D=3D 8) + set_f64_rd(insn, regs, val.data_u64); + else + set_f32_rd(insn, regs, val.data_ulong); =20 regs->epc =3D epc + INSN_LEN(insn); =20 @@ -368,7 +461,7 @@ int handle_misaligned_store(struct pt_regs *regs) unsigned long epc =3D regs->epc; unsigned long insn; unsigned long addr; - int i, len =3D 0; + int i, len =3D 0, fp =3D 0; =20 /* * When running in supervisor mode, we only handle misaligned accesses @@ -394,6 +487,14 @@ int handle_misaligned_store(struct pt_regs *regs) } else if ((insn & INSN_MASK_SD) =3D=3D INSN_MATCH_SD) { len =3D 8; #endif + } else if ((insn & INSN_MASK_FSD) =3D=3D INSN_MATCH_FSD) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2(insn, regs); + } else if ((insn & INSN_MASK_FSW) =3D=3D INSN_MATCH_FSW) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2(insn, regs); } else if ((insn & INSN_MASK_SH) =3D=3D INSN_MATCH_SH) { len =3D 2; #if defined(CONFIG_64BIT) @@ -412,11 +513,32 @@ int handle_misaligned_store(struct pt_regs *regs) ((insn >> SH_RD) & 0x1f)) { len =3D 4; val.data_ulong =3D GET_RS2C(insn, regs); + } else if ((insn & INSN_MASK_C_FSD) =3D=3D INSN_MATCH_C_FSD) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSDSP) =3D=3D INSN_MATCH_C_FSDSP) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2C(insn, regs); +#if !defined(CONFIG_64BIT) + } else if ((insn & INSN_MASK_C_FSW) =3D=3D INSN_MATCH_C_FSW) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSWSP) =3D=3D INSN_MATCH_C_FSWSP) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2C(insn, regs); +#endif } else { regs->epc =3D epc; return -1; } =20 + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -1; + for (i =3D 0; i < len; i++) { if (store_u8((void *)(addr + i), val.data_bytes[i])) return -1; --=20 2.40.1