From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7A665EB64D9 for ; Tue, 4 Jul 2023 14:10:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231370AbjGDOKN (ORCPT ); Tue, 4 Jul 2023 10:10:13 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39438 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230246AbjGDOKL (ORCPT ); Tue, 4 Jul 2023 10:10:11 -0400 Received: from mail-pf1-x434.google.com (mail-pf1-x434.google.com [IPv6:2607:f8b0:4864:20::434]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 986C7E76 for ; Tue, 4 Jul 2023 07:10:10 -0700 (PDT) Received: by mail-pf1-x434.google.com with SMTP id d2e1a72fcca58-656bc570a05so1191169b3a.0 for ; Tue, 04 Jul 2023 07:10:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479810; x=1691071810; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=XQDAxN5g1vMcAJWLoIEgGtbs3vH3NsC3JS3eiLLd1tc=; b=x3kmJaGRhNFiyR+5vFpSXLIa9I/mlBp1FZIlHR7iUWNbemQ2AzC7VjP/ef5O2LNqQ3 gmLryl1+KAIA1T/8iQaZh1jUujXJtzNWUcyyQzos3podLdmJ+n403ueH+A3QJfYo9CVd s9C6UM+TXW8lzy9VxwBaLgt8VTxzsRJtNmuP5YRH+hefRCko72ghRzzDDSeMNcZ4OjiZ B/Og/WST6QGTSeWZkH/DtF4t4sWbs6qCSyD25ztKlA5EqvqGMRI0/9Z2tBkifZkGVn2v XPsBHp77Icmh++/Rxhwpx4Vf3RNw9lTC7M7FZ7ooLktOPvGrBhqi95HJO+fqd3F63xtK N3MA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479810; x=1691071810; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=XQDAxN5g1vMcAJWLoIEgGtbs3vH3NsC3JS3eiLLd1tc=; b=eqNDcDx7RWBRREWwADUDZhVvCFU/VkRhRuj/gRWFAp72Mm+vIwfPlkme+6rJFpX59q LTlq1o41FYGT5NqCdS7M3mynm4mxdvKf09qKfflDSgJYrCIknMYlwvsowGnmHGFzF1tV 8O8PsBTILOMuWBLHHv5X+wrnBus/i0EJuNCR5boWmD/8z3gncHDz5/jjaTsMxDZ2x4kb 8Qppacij2063cDGTmBDeN3y0/n1pcFtnI94S1+mKCfX6gOIwnuwyRkdLyzB7pUXUFWgw YZDHabawwQfg1iNJfbromPp8cSW5/ha3Xwfk/JQKCTkHcaj2mr05KDiykn9j304uZm87 Uhmg== X-Gm-Message-State: AC+VfDxwDixkLH9wpydvPvrA04rXkx0fFoYKBbTRJz8bptXeGdMmX4TU KQFQ36okhmwm+LlVm+1EbXw3Tw== X-Google-Smtp-Source: ACHHUZ61pGxqIwJDLzs0Ls4ahG6yCuywecUKKdvw7/wiQX3e1lNH2vE+lgotJfY9hRYQbqxpXQYWYA== X-Received: by 2002:a05:6a20:8e19:b0:12d:77e:ba3 with SMTP id y25-20020a056a208e1900b0012d077e0ba3mr19091770pzj.0.1688479809970; Tue, 04 Jul 2023 07:10:09 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.09.54 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:10:09 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 1/9] riscv: remove unused functions in traps_misaligned.c Date: Tue, 4 Jul 2023 16:09:16 +0200 Message-Id: <20230704140924.315594-2-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Replace macros by the only two function calls that are done from this file, store_u8() and load_u8(). Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/traps_misaligned.c | 46 +++++----------------------- 1 file changed, 7 insertions(+), 39 deletions(-) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 378f5b151443..e7bfb33089c1 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -151,51 +151,19 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 -#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \ -static inline type load_##type(const type *addr) \ -{ \ - type val; \ - asm (#insn " %0, %1" \ - : "=3D&r" (val) : "m" (*addr)); \ - return val; \ -} +static inline u8 load_u8(const u8 *addr) +{ + u8 val; =20 -#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \ -static inline void store_##type(type *addr, type val) \ -{ \ - asm volatile (#insn " %0, %1\n" \ - : : "r" (val), "m" (*addr)); \ -} + asm volatile("lbu %0, %1" : "=3D&r" (val) : "m" (*addr)); =20 -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw) -#if defined(CONFIG_64BIT) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld) -#else -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw) - -static inline u64 load_u64(const u64 *addr) -{ - return load_u32((u32 *)addr) - + ((u64)load_u32((u32 *)addr + 1) << 32); + return val; } =20 -static inline void store_u64(u64 *addr, u64 val) +static inline void store_u8(u8 *addr, u8 val) { - store_u32((u32 *)addr, val); - store_u32((u32 *)addr + 1, val >> 32); + asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); } -#endif =20 static inline ulong get_insn(ulong mepc) { --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B3F48EB64DD for ; Tue, 4 Jul 2023 14:10:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230375AbjGDOKb (ORCPT ); Tue, 4 Jul 2023 10:10:31 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39666 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231162AbjGDOK1 (ORCPT ); Tue, 4 Jul 2023 10:10:27 -0400 Received: from mail-pl1-x62c.google.com (mail-pl1-x62c.google.com [IPv6:2607:f8b0:4864:20::62c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A803610A for ; Tue, 4 Jul 2023 07:10:26 -0700 (PDT) Received: by mail-pl1-x62c.google.com with SMTP id d9443c01a7336-1b867f9198dso5148375ad.0 for ; Tue, 04 Jul 2023 07:10:26 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479826; x=1691071826; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=hP9LYFgOAjVow+h4CXKR68tZiOwggsdorDJAE/dJK2A=; b=pYjDpVSeZeTuytuJLr3GPhFpEWF9LCgMQRXoR5oOr/k5mdYt8DhDctQQKJnZASFmgT kdKEH2cTcqEIpm5lw1W754SPdG5AQDTOMkq3e0mrGXsrWkcVKCZ8m3d1pSAweRrvK62G ow9UIJEAjMznprBt+XIWaUjHOlVl+3M7ENW9lTuORu4pRtjBbLyyxw3mInyMscFrHA1y 4dqQWTl/CwSfDfTFBWHg7Hp9KR/AGkX3LAX24+MmF6Pydl1t0lxtVZyZ+qx1xLdQR/zO s0Sav4ta/c5+XMKQhfO1NqsLN+O8Nfh3Dn0EQNf66+zcWSUh5FVWnPvIzfkEeaK5X4++ L1MA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479826; x=1691071826; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=hP9LYFgOAjVow+h4CXKR68tZiOwggsdorDJAE/dJK2A=; b=S/kQey5vUj9nxcVmD84XuvKizKJJnxuovs1UEAQZ+rwCv8UkD2mvD9+rV8Rg5poci+ y1L8+xbUz8jWnyFkzNc7LMQ+17c6CQi8rTh6LKlP0GNDNCDnTdcqBmrmUCmgi2gXWmV2 /9p9YuCm5y6kBmFs58fWosZUxXxiHi2gy74sdfUAk3+9+wmH/54fRF2nO7YbnTcYN6B8 CbYARFd1dRKZcyBRTSPsDlgJeE+fuaVvvxxqyQFL/oSGAw0U8xXg6uwbB3e47D1MjVDh M4WaZYeKZcgWU5baiu2mrNNmZYsuwo16IQ5Ri543OzavcUwU9Qwr3y56uy5GxrptRo7X ZAoA== X-Gm-Message-State: ABy/qLaLwgKa1g10ebuF7ShElxaqFm7Nix94/S6ckSRxEYuqP8Uw4gdE D6hjxVH+PUiTFn5PKrUAt6jpmw== X-Google-Smtp-Source: APBJJlHicEwS4uP/iuupus1vVIadXFyTj6jwkGefEbPWSlascGFaY876ZMt3mvzrTKvWDbF92RyLFg== X-Received: by 2002:a17:902:ec88:b0:1a6:6bdb:b548 with SMTP id x8-20020a170902ec8800b001a66bdbb548mr13222771plg.1.1688479826153; Tue, 04 Jul 2023 07:10:26 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.10.10 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:10:25 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 2/9] riscv: avoid missing prototypes warning Date: Tue, 4 Jul 2023 16:09:17 +0200 Message-Id: <20230704140924.315594-3-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Declare handle_misaligned_store/load() functions in entry-common.h and include that file in traps_misaligned.c file to avoid warnings. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/entry-common.h | 3 +++ arch/riscv/kernel/traps.c | 2 -- arch/riscv/kernel/traps_misaligned.c | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm= /entry-common.h index 6e4dee49d84b..58e9e2976e1b 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -8,4 +8,7 @@ void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); =20 +int handle_misaligned_load(struct pt_regs *regs); +int handle_misaligned_store(struct pt_regs *regs); + #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8c258b78c925..7fcaf2fd27a1 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -155,8 +155,6 @@ DO_ERROR_INFO(do_trap_load_misaligned, DO_ERROR_INFO(do_trap_store_misaligned, SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); #else -int handle_misaligned_load(struct pt_regs *regs); -int handle_misaligned_store(struct pt_regs *regs); =20 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt= _regs *regs) { diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index e7bfb33089c1..0cccac4822a8 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -12,6 +12,7 @@ #include #include #include +#include =20 #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4702EEB64D9 for ; Tue, 4 Jul 2023 14:10:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231406AbjGDOKs (ORCPT ); Tue, 4 Jul 2023 10:10:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39876 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231383AbjGDOKp (ORCPT ); Tue, 4 Jul 2023 10:10:45 -0400 Received: from mail-pl1-x62c.google.com (mail-pl1-x62c.google.com [IPv6:2607:f8b0:4864:20::62c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7808910C1 for ; Tue, 4 Jul 2023 07:10:43 -0700 (PDT) Received: by mail-pl1-x62c.google.com with SMTP id d9443c01a7336-1b7e0904a3aso6748235ad.0 for ; Tue, 04 Jul 2023 07:10:43 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479843; x=1691071843; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=tjQ2GzX04kf0h9GMnyJoWtp+hj7mtUQd1bUdoSwTimI=; b=2D84fmx8ldK1bX2AURYX4UfCnJ8PND8qZqnODVLh1ZPtRExDeFgT6muDDtq5fo/me2 KM1xs6xcI8E5R8wpwRry3UPICUkVmfPp7feQ8W3knFgty1UqbENYodnh+uSqomp6hSK/ llhsZvbc5a0wePJY30HYr9hdfkC7deKJnxNX2zz14X5NlnZiYftro2CpaOfbOBe1WOg5 D0s4qCelN8Q0YU8DHTKypQpN0thdgEyP+XhR0rEhDR1CqUNLK3GMZIObn5oNXJI61nDY zxXuY0BDhzq6PSX5LXN1sR/84VrWV47u5iKDQPLpW1iNLfTgo46+OrYFBY6Da+ETpsbj g3dw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479843; x=1691071843; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=tjQ2GzX04kf0h9GMnyJoWtp+hj7mtUQd1bUdoSwTimI=; b=Bvw99NvNzg/UnBx0HscSh1ROQwVrmBNFqeL1AU7OptTo604gUuG7cBzSVGFPMAywW0 wXG5zGRjs4NW0f1ay0zzxwDGkl/Otnft7pYhT1GTStRPGPuqNK+IhOYUNe8NR2w7JjmF q3+3upQL/TJELurKGfFFVG0KG+ZZD17qWv7bLKrapuAOMNx41TmAGNDDq1SbH7tGVwYJ HEJHhi/Y2dnkNe/zk+TfPKxCBI/xMbYh2CrVb24lnb1sYH1mX0QnEcSPrCJZmX05146v ylJlWBnxZV6+nqI2nk6TUuK8l2TTIiJ8YTwNxm5RML4j3KbyRgoOibU88V7ZgBfcOyg6 j/SA== X-Gm-Message-State: ABy/qLb8iw81sheaJVSvoQ8WU2yxnMaJ6scMxhHd3dyPG9AYoIBB1E2K I+A632c+uUdE0mfMOn7cIjEFYQ== X-Google-Smtp-Source: APBJJlHtqmN+8FBUFiHr/OGtEU+mlJXelWyy/CZVQGAvVawGZmoodlwRJaLU0Rd2kUcdu8kdGt9NRw== X-Received: by 2002:a17:902:720c:b0:1ae:3ff8:7fa7 with SMTP id ba12-20020a170902720c00b001ae3ff87fa7mr13945198plb.4.1688479842532; Tue, 04 Jul 2023 07:10:42 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.10.26 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:10:41 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 3/9] riscv: add support for misaligned handling in S-mode Date: Tue, 4 Jul 2023 16:09:18 +0200 Message-Id: <20230704140924.315594-4-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Misalignment handling is only supported for M-mode and uses direct accesses to user memory. in S-mode, this requires to use the get_user()/put_user() accessors. Implement load_u8(), store_u8() and get_insn() using these accessors. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/Makefile | 2 +- arch/riscv/kernel/traps.c | 7 -- arch/riscv/kernel/traps_misaligned.c | 118 ++++++++++++++++++++++++--- 3 files changed, 106 insertions(+), 21 deletions(-) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 153864e4f399..79b8dafc699d 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -55,10 +55,10 @@ obj-y +=3D riscv_ksyms.o obj-y +=3D stacktrace.o obj-y +=3D cacheinfo.o obj-y +=3D patch.o +obj-y +=3D traps_misaligned.o obj-y +=3D probes/ obj-$(CONFIG_MMU) +=3D vdso.o vdso/ =20 -obj-$(CONFIG_RISCV_M_MODE) +=3D traps_misaligned.o obj-$(CONFIG_FPU) +=3D fpu.o obj-$(CONFIG_SMP) +=3D smpboot.o obj-$(CONFIG_SMP) +=3D smp.o diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7fcaf2fd27a1..b2fb2266fb83 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -149,12 +149,6 @@ DO_ERROR_INFO(do_trap_insn_illegal, SIGILL, ILL_ILLOPC, "illegal instruction"); DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -#ifndef CONFIG_RISCV_M_MODE -DO_ERROR_INFO(do_trap_load_misaligned, - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); -DO_ERROR_INFO(do_trap_store_misaligned, - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); -#else =20 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt= _regs *regs) { @@ -197,7 +191,6 @@ asmlinkage __visible __trap_section void do_trap_store_= misaligned(struct pt_regs irqentry_nmi_exit(regs, state); } } -#endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 0cccac4822a8..9daed7d756ae 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -152,21 +152,25 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 -static inline u8 load_u8(const u8 *addr) +#ifdef CONFIG_RISCV_M_MODE +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { u8 val; =20 asm volatile("lbu %0, %1" : "=3D&r" (val) : "m" (*addr)); + *r_val =3D val; =20 - return val; + return 0; } =20 -static inline void store_u8(u8 *addr, u8 val) +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); + + return 0; } =20 -static inline ulong get_insn(ulong mepc) +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) { register ulong __mepc asm ("a2") =3D mepc; ulong val, rvc_mask =3D 3, tmp; @@ -195,9 +199,87 @@ static inline ulong get_insn(ulong mepc) : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), [xlen_minus_16] "i" (XLEN_MINUS_16)); =20 - return val; + *r_insn =3D val; + + return 0; +} +#else +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) +{ + if (user_mode(regs)) { + return __get_user(*r_val, addr); + } else { + *r_val =3D *addr; + return 0; + } } =20 +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) +{ + if (user_mode(regs)) { + return __put_user(val, addr); + } else { + *addr =3D val; + return 0; + } +} + +#define __read_insn(regs, insn, insn_addr) \ +({ \ + int __ret; \ + \ + if (user_mode(regs)) { \ + __ret =3D __get_user(insn, insn_addr); \ + } else { \ + insn =3D *insn_addr; \ + __ret =3D 0; \ + } \ + \ + __ret; \ +}) + +static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn) +{ + ulong insn =3D 0; + + if (epc & 0x2) { + ulong tmp =3D 0; + u16 __user *insn_addr =3D (u16 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + /* __get_user() uses regular "lw" which sign extend the loaded + * value make sure to clear higher order bits in case we "or" it + * below with the upper 16 bits half. + */ + insn &=3D GENMASK(15, 0); + if ((insn & __INSN_LENGTH_MASK) !=3D __INSN_LENGTH_32) { + *r_insn =3D insn; + return 0; + } + insn_addr++; + if (__read_insn(regs, tmp, insn_addr)) + return -EFAULT; + *r_insn =3D (tmp << 16) | insn; + + return 0; + } else { + u32 __user *insn_addr =3D (u32 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + if ((insn & __INSN_LENGTH_MASK) =3D=3D __INSN_LENGTH_32) { + *r_insn =3D insn; + return 0; + } + insn &=3D GENMASK(15, 0); + *r_insn =3D insn; + + return 0; + } +} +#endif + union reg_data { u8 data_bytes[8]; ulong data_ulong; @@ -208,10 +290,13 @@ int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc =3D regs->epc; - unsigned long insn =3D get_insn(epc); - unsigned long addr =3D csr_read(mtval); + unsigned long insn; + unsigned long addr =3D regs->badaddr; int i, fp =3D 0, shift =3D 0, len =3D 0; =20 + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc =3D 0; =20 if ((insn & INSN_MASK_LW) =3D=3D INSN_MATCH_LW) { @@ -275,8 +360,10 @@ int handle_misaligned_load(struct pt_regs *regs) } =20 val.data_u64 =3D 0; - for (i =3D 0; i < len; i++) - val.data_bytes[i] =3D load_u8((void *)(addr + i)); + for (i =3D 0; i < len; i++) { + if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) + return -1; + } =20 if (fp) return -1; @@ -291,10 +378,13 @@ int handle_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc =3D regs->epc; - unsigned long insn =3D get_insn(epc); - unsigned long addr =3D csr_read(mtval); + unsigned long insn; + unsigned long addr =3D regs->badaddr; int i, len =3D 0; =20 + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc =3D 0; =20 val.data_ulong =3D GET_RS2(insn, regs); @@ -328,8 +418,10 @@ int handle_misaligned_store(struct pt_regs *regs) return -1; } =20 - for (i =3D 0; i < len; i++) - store_u8((void *)(addr + i), val.data_bytes[i]); + for (i =3D 0; i < len; i++) { + if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) + return -1; + } =20 regs->epc =3D epc + INSN_LEN(insn); =20 --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CB0FCEB64D9 for ; Tue, 4 Jul 2023 14:11:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231418AbjGDOLI (ORCPT ); Tue, 4 Jul 2023 10:11:08 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40262 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231463AbjGDOLB (ORCPT ); Tue, 4 Jul 2023 10:11:01 -0400 Received: from mail-pg1-x532.google.com (mail-pg1-x532.google.com [IPv6:2607:f8b0:4864:20::532]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 785B211D for ; Tue, 4 Jul 2023 07:10:59 -0700 (PDT) Received: by mail-pg1-x532.google.com with SMTP id 41be03b00d2f7-55acbe0c7e4so548655a12.0 for ; Tue, 04 Jul 2023 07:10:59 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479859; x=1691071859; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=0nyDpPdQazxwk2pvs7+quB1sTF8r3wWKIala/GUDnoY=; b=CZLXwP5VfQFjNrQnljMV5Zy85jJG3k4y+dCeHiq43dFIAPlLI6+Aiw8u7Ewmj6ZKMI n+2VmRvDI146eRvT1T3tUKueL+wVMfiGNCkgHowSMdHmjUhXxC77nDSpU1HjavoSOI8Z 7s3o8t88db/r3H9oFk6dS28esOW0KptWw4EDcpiqhwINWZFpDI4YUDxEGLpp/R+Qg2+N r8R4zP+32ZhTE58O4FHU8SuQK6bg4pwmqJ9ozEq7N0kwMILYVuLSx5y3anN59D6P2hyJ 2c1rC7vxx/kZY9dKN6Y6OOZa+SeVG6fUBp4FnXLQ6YRLuHyOnoIl1dHz5pGdkSsPSEZU lVyA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479859; x=1691071859; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=0nyDpPdQazxwk2pvs7+quB1sTF8r3wWKIala/GUDnoY=; b=Mo3P7jn/f0Gk5nj6yvXMcwijNTAmt3K3rLAAZo5yI0ObW8r1BDcl4eMlrN4+PKiZSh eKAge2qgLmYRYYenZGMtGiCwUxdji8PazDxiFZ1A0YHSNB2rgVf5GelVCKm9VHAEKWf1 p/mHMqLRKbNRHZybv1P8K5hNkTdra7pSfLU1erm1BUohJANwBh2696P+qf7Xy3NoV9BB X7KUm4ZVczLJL9ypJcsN1ftuxabUookl9YX7qfel+a3e8insvsaeu8yjM+sH+nIwyQb9 1GrKUEJ9FdlnQfyy96e3neECA/KYwWEGB8AFr8mme6RmlBy7YNNZpz32cLaZthL04TlY 7/gg== X-Gm-Message-State: ABy/qLbGry6nfjfjmiownFVfpyZkE7cZRAhgTGkTQWOXd8X6bf2xelvw F/eCBfmL1V7a+rwUnIQ4o4flKA== X-Google-Smtp-Source: APBJJlGlJHbQn1fjO2ojYVmiw4VmYk3Pe31P5xg00a17Fz/X6ELKBxWagGdfOYtxZ6KCYAit5lucQQ== X-Received: by 2002:a17:90b:2396:b0:262:f76d:b29c with SMTP id mr22-20020a17090b239600b00262f76db29cmr14800835pjb.2.1688479858934; Tue, 04 Jul 2023 07:10:58 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.10.43 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:10:58 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 4/9] riscv: report perf event for misaligned fault Date: Tue, 4 Jul 2023 16:09:19 +0200 Message-Id: <20230704140924.315594-5-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Add missing calls to account for misaligned fault event using perf_sw_event(). Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/traps_misaligned.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 9daed7d756ae..804f6c5e0e44 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include =20 @@ -294,6 +295,8 @@ int handle_misaligned_load(struct pt_regs *regs) unsigned long addr =3D regs->badaddr; int i, fp =3D 0, shift =3D 0, len =3D 0; =20 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (get_insn(regs, epc, &insn)) return -1; =20 @@ -382,6 +385,8 @@ int handle_misaligned_store(struct pt_regs *regs) unsigned long addr =3D regs->badaddr; int i, len =3D 0; =20 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (get_insn(regs, epc, &insn)) return -1; =20 --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7A40BEB64D9 for ; Tue, 4 Jul 2023 14:11:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231445AbjGDOLU (ORCPT ); Tue, 4 Jul 2023 10:11:20 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40410 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231469AbjGDOLQ (ORCPT ); Tue, 4 Jul 2023 10:11:16 -0400 Received: from mail-pg1-x52d.google.com (mail-pg1-x52d.google.com [IPv6:2607:f8b0:4864:20::52d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DE1B5113 for ; Tue, 4 Jul 2023 07:11:15 -0700 (PDT) Received: by mail-pg1-x52d.google.com with SMTP id 41be03b00d2f7-55b741fd0c5so321327a12.0 for ; Tue, 04 Jul 2023 07:11:15 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479875; x=1691071875; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=JxIfI7L8QtMZ5B4oD7tB4oC/oAl/mItJTbe2vhSQwvU=; b=Q0HCtGy+m+yqgXytqQhl/oK/eRqrAUZnmyhcGucvrKtV+vyH0fl6JF9MeLRXDOdikh kPD+LCMVMPGrvC9PK21P8R53FGTVvNsLJqEqSrZNgIL2c4Qy00fTOc7ZMXjo8TuWc/K8 lnf7IimmS/UtfgM6Tvjl0MMcBjh0EmsM7LjhwDySn+LpOwCCGErHVJmOl8wYh5prwvUi xzfIJsrD6G0EJGc6a+GUHpRSdG15iOY51qIiQ/R5qosZ8nIO9Bs6/pTlva1ZQqaxUYck nP8AFGSPtsKDYQbqU6M+Wy+YpGcN0yNJQ9YSxw+hgmOGOhb8OyGBCe0sJXYtS0nMbjvL AvhQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479875; x=1691071875; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=JxIfI7L8QtMZ5B4oD7tB4oC/oAl/mItJTbe2vhSQwvU=; b=BpLyfZwEqGiT6CXpRHxUviP+JIZy+KaBI8dxXypkUqV3L+t5fkh0QOBovYJEBu72Vu m5w1yO49Qa5i/xLh2F/jCJvVKNt4KGeMAPh203WN1NQyqRnfqQsthejFkYAreeEqSfSp PhMwMlQeVIcVYWO77pxNIVcZKaGkBU/liER5JCo820Q1Gb7JKYwQIhnRtWBPWMi5+m/n Z8gYAZ8qDo4S5ePuRYc88MSwWPK9Ld6EPqZYH7OsEbY7IKoIEYCCTknuuUHSuf/eNDUW 9FNBLhgZOc51vRDR2qDUw6ZYjNCdHsBH5oS6KLZMOdlMaIT48Ci8MVQXgDUWifLVgvoq DsKg== X-Gm-Message-State: ABy/qLZlSIXlEO2S6pw/BL4dOAc2riBC7e0+7BuCWBPKw9eCOTtHW/Yw R6tDl1U5kEuJslW/GfEOrk7flA== X-Google-Smtp-Source: APBJJlHa+Mx7cFJFRV4cvt3iY38HAoWgJj0SrcNe9F1wszSRZftWoIzwW9Ulq7ysrP6tB0kmo0UcdA== X-Received: by 2002:a17:903:28c7:b0:1b7:f3e6:5727 with SMTP id kv7-20020a17090328c700b001b7f3e65727mr13626965plb.2.1688479875116; Tue, 04 Jul 2023 07:11:15 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.10.59 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:11:14 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 5/9] riscv: add support for sysctl unaligned_enabled control Date: Tue, 4 Jul 2023 16:09:20 +0200 Message-Id: <20230704140924.315594-6-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This sysctl tuning option allows the user to disable misaligned access handling globally on the system. This will also be used by misaligned detection code to temporarily disable misaligned access handling. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/Kconfig | 1 + arch/riscv/kernel/traps_misaligned.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c69572fbe613..99fd951def39 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -139,6 +139,7 @@ config RISCV select RISCV_TIMER if RISCV_SBI select SIFIVE_PLIC select SPARSE_IRQ + select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select TRACE_IRQFLAGS_SUPPORT diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 804f6c5e0e44..39ec6caa6234 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -287,6 +287,9 @@ union reg_data { u64 data_u64; }; =20 +/* sysctl hooks */ +int unaligned_enabled __read_mostly =3D 1; /* Enabled by default */ + int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; @@ -297,6 +300,9 @@ int handle_misaligned_load(struct pt_regs *regs) =20 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); =20 + if (!unaligned_enabled) + return -1; + if (get_insn(regs, epc, &insn)) return -1; =20 @@ -387,6 +393,9 @@ int handle_misaligned_store(struct pt_regs *regs) =20 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); =20 + if (!unaligned_enabled) + return -1; + if (get_insn(regs, epc, &insn)) return -1; =20 --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 513D0EB64D9 for ; Tue, 4 Jul 2023 14:11:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231305AbjGDOLr (ORCPT ); Tue, 4 Jul 2023 10:11:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40838 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231160AbjGDOLi (ORCPT ); Tue, 4 Jul 2023 10:11:38 -0400 Received: from mail-pg1-x531.google.com (mail-pg1-x531.google.com [IPv6:2607:f8b0:4864:20::531]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0E0AD10E9 for ; Tue, 4 Jul 2023 07:11:32 -0700 (PDT) Received: by mail-pg1-x531.google.com with SMTP id 41be03b00d2f7-55b741fd0c5so321374a12.0 for ; Tue, 04 Jul 2023 07:11:32 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479891; x=1691071891; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=Pj3KtNcskxGwBQpyjif+BbQZv3sqa1IZuxdbRGC5RTE=; b=JshnIAR8bl71rt3AS3UvaRseV7AvmKEBxHoNNg0/9Qhh8a1yTEPcGhFhTWqHfZeDXO zz5qgL/izraxd+lACmXYz5/XoieedX/a96bKFLIFs+C9JCUM+kgTJy45aunBrt8pI7wp s2BOdmMUUuGkOrDbLfk8RLtcHx6lF3lCNtAp600HpbSgFjPv+cyrTalGob0f6f5gBzut qlBTrFkFe3yTSMx8qWX2NoLmGuh7C0/5BcgvzluimZLJDauzVr1FdM725FboXPc4Q15I GLG4o+IDY/QgjFlIxKEesbCulbASs1+ri51nrsgg622pfAUSMMSHkrlr31t5c+jywUVM NmaA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479891; x=1691071891; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=Pj3KtNcskxGwBQpyjif+BbQZv3sqa1IZuxdbRGC5RTE=; b=KQ6cm0gSd7suE07OX/EFnGP1LNbp9+N+AdKD1blR3OmaSBMm8YY67bwpD44QaTTCvU +iXbqhVEeZdaMHa1TGWQwNQ0LwagfzxPk1GZb/c/TGDiiv5FlHGQ5rPk03DsCuD31720 HgTeuzlyQ4p7Do6UnmoaJX4tDi1xNhuxXBJi7+yFoeB1tmO3LLmS1NqlEtpyrR2EWb5N BfBOFqgrGiJpx7ru4QMUwmxkd8HJtoO67DTAk6/9oR3XmKfGzlHAERFpip41c4bLPZLc Z6ak8C1SLG6X9zQK/nF9x3JDdVB3rQv1M2XkKAANYTavpVDM2fCwLcL1DUIdgdeu9np/ ZHjg== X-Gm-Message-State: ABy/qLaaphwVuzOYC7Fw6Rp81wr5CCSLHkWjhMSnjw8rDjaU5At26BqF 3nqHcb2noNCpOxjysnpdNlMLvQ== X-Google-Smtp-Source: APBJJlGJqqxOUkEZ0m69EgVitdcB3J4XS5rhZM+9Vu61VUJmXRK/Z/pitJwsMMHB6apqNoot6wAbCA== X-Received: by 2002:a17:902:e5c5:b0:1b8:17e8:5472 with SMTP id u5-20020a170902e5c500b001b817e85472mr13650058plf.1.1688479891435; Tue, 04 Jul 2023 07:11:31 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.11.15 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:11:31 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 6/9] riscv: add support for SBI misalignment trap delegation Date: Tue, 4 Jul 2023 16:09:21 +0200 Message-Id: <20230704140924.315594-7-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Add support for misalignment trap delegation by setting it with SBI_EXT_FW_FEATURE SBI extension. This extension allows to control SBI behavior by requesting to set them to specific value. In order to implement prctl(PR_SET_UNALIGN, PR_UNALIGN_SIGBUS) behavior properly, we need to let the kernel handle the misalignment error by itself. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/sbi.h | 11 +++++++++++ arch/riscv/kernel/sbi.c | 21 +++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 5b4a1bf5f439..c1b74c7d0d56 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -30,6 +30,7 @@ enum sbi_ext_id { SBI_EXT_HSM =3D 0x48534D, SBI_EXT_SRST =3D 0x53525354, SBI_EXT_PMU =3D 0x504D55, + SBI_EXT_FW_FEATURE =3D 0x46574654, =20 /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START =3D 0x08000000, @@ -236,6 +237,16 @@ enum sbi_pmu_ctr_type { /* Flags defined for counter stop function */ #define SBI_PMU_STOP_FLAG_RESET (1 << 0) =20 +/* SBI function IDs for FW feature extension */ +#define SBI_EXT_FW_FEATURE_SET 0x0 +#define SBI_EXT_FW_FEATURE_GET 0x1 + +enum sbi_fw_features_t { + SBI_FW_FEATURE_MISALIGNED_DELEG =3D 0, + + SBI_FW_FEATURE_MAX, +}; + #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index c672c8ba9a2a..3be48791455a 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -494,6 +494,16 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *= cpu_mask, } EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid); =20 +static int sbi_fw_feature_set(enum sbi_fw_features_t feature, bool set) +{ + struct sbiret ret; + + ret =3D sbi_ecall(SBI_EXT_FW_FEATURE, SBI_EXT_FW_FEATURE_SET, feature, + set, 0, 0, 0, 0); + + return sbi_err_map_linux_errno(ret.error); +} + static void sbi_srst_reset(unsigned long type, unsigned long reason) { sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason, @@ -624,6 +634,17 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority =3D 192; register_restart_handler(&sbi_srst_reboot_nb); } + /* + * TODO: this will likely need to be updated when SBI extension + * is ratified + */ + if ((sbi_spec_version >=3D sbi_mk_version(1, 0)) && + (sbi_probe_extension(SBI_EXT_FW_FEATURE) > 0)) { + pr_info("SBI FW_FEATURE extension detected\n"); + /* Request misaligned handling delegation */ + sbi_fw_feature_set(SBI_FW_FEATURE_MISALIGNED_DELEG, + true); + } } else { __sbi_set_timer =3D __sbi_set_timer_v01; __sbi_send_ipi =3D __sbi_send_ipi_v01; --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 03379EB64DA for ; Tue, 4 Jul 2023 14:12:05 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231565AbjGDOMD (ORCPT ); Tue, 4 Jul 2023 10:12:03 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40896 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231523AbjGDOL6 (ORCPT ); Tue, 4 Jul 2023 10:11:58 -0400 Received: from mail-pl1-x62d.google.com (mail-pl1-x62d.google.com [IPv6:2607:f8b0:4864:20::62d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 20FE8FF for ; Tue, 4 Jul 2023 07:11:48 -0700 (PDT) Received: by mail-pl1-x62d.google.com with SMTP id d9443c01a7336-1b87b9a8652so3779775ad.1 for ; Tue, 04 Jul 2023 07:11:48 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479907; x=1691071907; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=UuTcFW+DQo5Mx+nRwuPd1unzmxWKunsazuyjaErlxZk=; b=NIi0ovTyku0azaEtW/PoaDIbjTtvXx9iGqBIWZyGJFx7jlmzPxLeWyWo4Z7Nvqe5GT PJ6J7AGKB2u7OCWCJCA2QnAKpKoG825dHzY44D/uGqudXs1r6VV6/L9nKs2skx7U2P/o GOkfg1mIAmkhd2xQfpF9HurFbZBkJKfIemOH3JcLYecUvwRQBT0xBrz1u7/UJAXGu/bL e/7Y9aDfDVkrW3qNx037QCRpPX/DugMi0ZVEgQYVB0kbjFXxoIDV9zCfdC3gYyG47X2h ZokJovqD2e6r8bypNTml+4wi0ALsby9mOCF7WN6cal2vXu3QsshKNoNX56BNB4T6nSqU JBEQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479907; x=1691071907; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=UuTcFW+DQo5Mx+nRwuPd1unzmxWKunsazuyjaErlxZk=; b=LeK4a/MZu54R3bW7E5GtIU1axHqrvZgC4WTaSkuP1Q2KtETmJsJUguNloRCtwDpb41 33rsS/V3QuMS4vA3W4mlZJDp/+CsLfiGbwvTpj5nQygrXxuXD9GBWJWmPHue45oz8GVn Hoc1dpkuw7MJU4jrv5zdiUbR+918w5wm2ROpn9UkZoOreX4SyHiCaXtOzdGeXugjLTs+ ql9WZT6v/3UUiyOAPVXtsCTC93QCRsdrDI0kP+GytmUfE5vcSGAdvhtD9IH3obIyNJW0 AK39jLP86iU7BzhIz7HBn+KODSLF9E/fRyoz+hdkl0ALNdX85ZT2ZKDz0RHvs6hmJySy crdw== X-Gm-Message-State: ABy/qLbMR8X2DH2ulNPZh5wuGcZD0rqpkeRk7RQlDgE6ItqW1rUepJeO hQQYxPUSUjW/jtxly0yidWnxRw== X-Google-Smtp-Source: APBJJlHMbxfzESqCVaJXPyMu3D8RPz7R0/e0tLfbYZwlMqUdJidHRdMR0LJoHlFFQnNAxcaIIBBZvg== X-Received: by 2002:a17:902:b210:b0:1b1:9272:55e2 with SMTP id t16-20020a170902b21000b001b1927255e2mr13376436plr.3.1688479907657; Tue, 04 Jul 2023 07:11:47 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.11.31 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:11:47 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 7/9] riscv: report misaligned accesses emulation to hwprobe Date: Tue, 4 Jul 2023 16:09:22 +0200 Message-Id: <20230704140924.315594-8-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org hwprobe provides a way to report if misaligned access are emulated. In order to correctly populate that feature, if the SBI delegated us misaligned access handling, then we can check if it actually traps when doing a misaligned access. This can be checked using an exception table entry which will actually be used when a misaligned access is done from kernel mode. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/cpufeature.h | 2 ++ arch/riscv/kernel/setup.c | 2 ++ arch/riscv/kernel/traps_misaligned.c | 32 ++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/c= pufeature.h index 808d5403f2ac..7e968499db49 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -20,4 +20,6 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); =20 DECLARE_PER_CPU(long, misaligned_access_speed); =20 +void __init misaligned_emulation_init(void); + #endif diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 36b026057503..820a8158e4f7 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -23,6 +23,7 @@ =20 #include #include +#include #include #include #include @@ -284,6 +285,7 @@ void __init setup_arch(char **cmdline_p) =20 init_resources(); sbi_init(); + misaligned_emulation_init(); =20 #ifdef CONFIG_KASAN kasan_init(); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 39ec6caa6234..243ef9314734 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include =20 #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -441,3 +443,33 @@ int handle_misaligned_store(struct pt_regs *regs) =20 return 0; } + +void __init misaligned_emulation_init(void) +{ + int cpu; + unsigned long emulated =3D 1, tmp_var; + + /* Temporarily disable unaligned accesses support so that we fixup the + * exception for code below. + */ + unaligned_enabled =3D 0; + + __asm__ __volatile__ ( + "1:\n" + " ld %[tmp], 1(%[ptr])\n" + " li %[emulated], 0\n" + "2:\n" + _ASM_EXTABLE(1b, 2b) + : [emulated] "+r" (emulated), [tmp] "=3Dr" (tmp_var) + : [ptr] "r" (&tmp_var) + : "memory" ); + + unaligned_enabled =3D 1; + if (!emulated) + return; + + for_each_possible_cpu(cpu) { + per_cpu(misaligned_access_speed, cpu) =3D + RISCV_HWPROBE_MISALIGNED_EMULATED; + } +} --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8172BEB64DD for ; Tue, 4 Jul 2023 14:12:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231154AbjGDOMP (ORCPT ); Tue, 4 Jul 2023 10:12:15 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:40944 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231616AbjGDOMH (ORCPT ); Tue, 4 Jul 2023 10:12:07 -0400 Received: from mail-pl1-x633.google.com (mail-pl1-x633.google.com [IPv6:2607:f8b0:4864:20::633]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7599010DA for ; Tue, 4 Jul 2023 07:12:04 -0700 (PDT) Received: by mail-pl1-x633.google.com with SMTP id d9443c01a7336-1b7e0904a3aso6750085ad.0 for ; Tue, 04 Jul 2023 07:12:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479924; x=1691071924; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=AzBMq7R5xdVR7lBpakM9B1ewv3x8Aa9s+j6b6UY73u8=; b=hvWRzizVS0S/x2p6tjb3F48jVU/dzlutZSlw8XrL7FD9+thr9gbw2aaqzN6tsZKoSd 2Bjqk1qks5xIwsaJ5NMkcL3bSJO3KnFhsi8534y5jjbxdzdwq4hYpuTmR0c/k45hkPI2 SSdqBN/57ipAVGH1Gmk3Z0hN+ryPyEr6Ch9u/lhiHjq9W0Pgu99Ycd3dgy18oVMArTzR FDcNwx83BA3zeK//pwth3Bbji/waAPqqHxWVDJ1Vp+S5bLt98xaY0nfYjzC4wA1iq9J4 3Q8qbgzRw4CdhSpw1qzUM16Xpz1Oqy8g8bQnaf8MFIkxMvSrkE9rIuIugOSVUf1/Mp3k YBDw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479924; x=1691071924; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=AzBMq7R5xdVR7lBpakM9B1ewv3x8Aa9s+j6b6UY73u8=; b=ULOz/atDkVU7tYqPTi1QCQUAkmT7Zjo1TO2Qe700oIG0frLTQvlrbPsm3Hu4uN7Ttc OetnjhDurgNMbCbgyYlWc9JftKg+Z2Qy1INv8QNvcn98p5AB1Ug43Q/MdN6WyIIRNEAi X/Ty2mQcNWHl1ZU4vr04tn0/b/N52QsnFCkN22YbwFlWPugxPYVCVvs1/BYMgQY+iWl9 0vlTuSJLsF7JyZsSgxeoVwkHQxEgNVvxYyACW/Dx6RYfm3v8VymNS5VOXmIaEEWBFnPb tmp6teV9dtsUDoA7vZQWW0TqcQkesJ+lxpoODJrlmChGfbIEFU2pQrlNjQ8tVspJO9YX FmeA== X-Gm-Message-State: ABy/qLa+JO7pVulNeIThuseGGFC8eLcNq3Vr+/JHTET3o6nB/b7rMvt8 n0EzkJpqtEb7m9Zwy7D+qyxQdw== X-Google-Smtp-Source: APBJJlE8+jXKSctC4pk5eGzZC4+H+KcnNBUni6PU53miDEBuzspFdXv1MB6i2skH6gE/0p6Td5KxSA== X-Received: by 2002:a17:903:786:b0:1b3:ec39:f42c with SMTP id kn6-20020a170903078600b001b3ec39f42cmr13861775plb.5.1688479923889; Tue, 04 Jul 2023 07:12:03 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.11.48 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:12:03 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 8/9] riscv: add support for PR_SET_UNALIGN and PR_GET_UNALIGN Date: Tue, 4 Jul 2023 16:09:23 +0200 Message-Id: <20230704140924.315594-9-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Now that trap support is ready to handle misalignment errors in S-mode, allow the user to control the behavior of misalignment accesses using prctl(). Add an align_ctl flag in thread_struct which will be used to determine if we should SIGBUS the process or not on such fault. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/include/asm/cpufeature.h | 8 ++++++++ arch/riscv/include/asm/processor.h | 9 +++++++++ arch/riscv/kernel/process.c | 18 ++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 7 +++++++ 4 files changed, 42 insertions(+) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/c= pufeature.h index 7e968499db49..e2fd6fc7157f 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -6,6 +6,8 @@ #ifndef _ASM_CPUFEATURE_H #define _ASM_CPUFEATURE_H =20 +#include + /* * These are probed via a device_initcall(), via either the SBI or directly * from the corresponding CSRs. @@ -20,6 +22,12 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); =20 DECLARE_PER_CPU(long, misaligned_access_speed); =20 +static inline bool misaligned_access_emulated(void) +{ + return per_cpu(misaligned_access_speed, 0) =3D=3D + RISCV_HWPROBE_MISALIGNED_EMULATED; +} + void __init misaligned_emulation_init(void); =20 #endif diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/pr= ocessor.h index 94a0590c6971..4e6667d5ca68 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -7,6 +7,7 @@ #define _ASM_RISCV_PROCESSOR_H =20 #include +#include =20 #include =20 @@ -39,6 +40,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long align_ctl; }; =20 /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -51,6 +53,7 @@ static inline void arch_thread_struct_whitelist(unsigned = long *offset, =20 #define INIT_THREAD { \ .sp =3D sizeof(init_stack) + (long)&init_stack, \ + .align_ctl =3D PR_UNALIGN_NOPRINT, \ } =20 #define task_pt_regs(tsk) \ @@ -80,6 +83,12 @@ int riscv_of_parent_hartid(struct device_node *node, uns= igned long *hartid); extern void riscv_fill_hwcap(void); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struc= t *src); =20 +extern int get_unalign_ctl(struct task_struct *, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + #endif /* __ASSEMBLY__ */ =20 #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e2a060066730..88a71359396b 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -24,6 +24,7 @@ #include #include #include +#include =20 register unsigned long gp_in_global __asm__("gp"); =20 @@ -40,6 +41,23 @@ void arch_cpu_idle(void) cpu_do_idle(); } =20 +int set_unalign_ctl(struct task_struct *tsk, unsigned int val) +{ + if (!misaligned_access_emulated()) + return -EINVAL; + + tsk->thread.align_ctl =3D val; + return 0; +} + +int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) +{ + if (!misaligned_access_emulated()) + return -EINVAL; + + return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr); +} + void __show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 243ef9314734..5fb6758b0bf9 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -9,6 +9,7 @@ #include #include #include +#include =20 #include #include @@ -305,6 +306,9 @@ int handle_misaligned_load(struct pt_regs *regs) if (!unaligned_enabled) return -1; =20 + if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(regs, epc, &insn)) return -1; =20 @@ -398,6 +402,9 @@ int handle_misaligned_store(struct pt_regs *regs) if (!unaligned_enabled) return -1; =20 + if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(regs, epc, &insn)) return -1; =20 --=20 2.40.1 From nobody Sun Feb 8 09:10:55 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CEFAAEB64D9 for ; Tue, 4 Jul 2023 14:12:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231635AbjGDOMh (ORCPT ); Tue, 4 Jul 2023 10:12:37 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41696 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231603AbjGDOMa (ORCPT ); Tue, 4 Jul 2023 10:12:30 -0400 Received: from mail-pl1-x629.google.com (mail-pl1-x629.google.com [IPv6:2607:f8b0:4864:20::629]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1717E171B for ; Tue, 4 Jul 2023 07:12:21 -0700 (PDT) Received: by mail-pl1-x629.google.com with SMTP id d9443c01a7336-1b867f9198dso5151025ad.0 for ; Tue, 04 Jul 2023 07:12:21 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=rivosinc-com.20221208.gappssmtp.com; s=20221208; t=1688479940; x=1691071940; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=BAz1Akm/B9B/9fUXobAvdXu5vhNqRRhd34VReZOiu/0=; b=4kOCvuxSYIC+H/aG5g5WnJKyEXArtCDX3iPuHYPTABc2rObOGhpnKGYn8nwgQv5AMG tZJgiSUnNHIVW+QWOfEKQJZiG/ERa3ui57/aBwIs2gzEYHMxbZDKnU6oiutSqknnp8em b2CiSYBDHag4AUqF0eAZFKbyUEMnUIXcd26P7GDot5CYybEK+zKbxYLrBE+uXxqBBdyt c3SHqMAkkIYMkAGkC/cEKHPPR0/fpAoTNPc7n/PxknO1y/dnT80pWP5gTVLnx1UhS0eG QEhRXGd6/89Hi0SjyI23vM+wgSpl3FyLaehNEH0jiRw9KHEKqFNrkf/ST02yc9JQUNy7 djdg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1688479940; x=1691071940; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=BAz1Akm/B9B/9fUXobAvdXu5vhNqRRhd34VReZOiu/0=; b=le+ETORYWvB2K/2iQjszgt2gUnaiE47yVCQwggxnvOeuN/Xn+CuU45FbhEVhBL5tdT WxOpN2v6PvwZArdDc3JrIQnomPohbQZ8aQmW2AD3m0Plcnmeuh3SovL782yQMbJ3P0Vv vd4Tfrg3kcZtfefKW8FplByRpWtKAVOlIGlq68yVXvtD4EEdG5nMTWJTxNWl64xaJjiN +F33fGfGkcG61bxxK7eAZprpKZnjtlnN+CB7NgtXMmeyJ+3xtLlg9/jQxcauqVMfzsDY I6BcVEqu42BgWtDjVLUWayvSJiA0Ys1jcu/X6Qe/BfqCXyc0yj24YWrCydUv52cFp6U8 mQvw== X-Gm-Message-State: ABy/qLa2ETFxCuZ6htAMfUtjzmiUtyhhGDmO0c3OsznQBud+aVraG9OU IPhROBwR3GU8Ro3HzI+Kd7wbSg== X-Google-Smtp-Source: APBJJlHyBd5vK43azE7vHDn25p6SsQ0owCfsKSBoiqP1RbBD7jN5YfJE4aMAEKzgN6x1ApTeqn6yig== X-Received: by 2002:a17:903:3051:b0:1b8:1591:9f81 with SMTP id u17-20020a170903305100b001b815919f81mr13234157pla.4.1688479940252; Tue, 04 Jul 2023 07:12:20 -0700 (PDT) Received: from carbon-x1.home ([2a01:cb15:81c2:f100:ef7b:e0f7:d376:e859]) by smtp.gmail.com with ESMTPSA id o1-20020a170902bcc100b001b042c0939fsm17183735pls.99.2023.07.04.07.12.04 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 04 Jul 2023 07:12:19 -0700 (PDT) From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= , Stafford Horne , Brian Cain , Kefeng Wang , "Russell King (Oracle)" , Michael Ellerman , Sunil V L , Anup Patel , Atish Patra , Andrew Jones , Conor Dooley , Heiko Stuebner , Guo Ren , Alexandre Ghiti , Masahiro Yamada , Xianting Tian , Sia Jee Heng , Li Zhengyu , Jisheng Zhang , "Gautham R. Shenoy" , Mark Rutland , Peter Zijlstra , Marc Zyngier , =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Krzysztof Kozlowski , Evan Green , linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [RFC V2 PATCH 9/9] riscv: add floating point insn support to misaligned access emulation Date: Tue, 4 Jul 2023 16:09:24 +0200 Message-Id: <20230704140924.315594-10-cleger@rivosinc.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230704140924.315594-1-cleger@rivosinc.com> References: <20230704140924.315594-1-cleger@rivosinc.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This support is partially based of openSBI misaligned emulation floating point instruction support. It provides support for the existing floating point instructions (both for 32/64 bits as well as compressed ones). Since floating point registers are not part of the pt_regs struct, we need to modify them directly using some assembly. We also dirty the pt_regs status in case we modify them to be sure context switch will save FP state. With this support, Linux is on par with openSBI support. Signed-off-by: Cl=C3=A9ment L=C3=A9ger --- arch/riscv/kernel/fpu.S | 117 +++++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 152 ++++++++++++++++++++++++++- 2 files changed, 265 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index dd2205473de7..2785badb247c 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -104,3 +104,120 @@ ENTRY(__fstate_restore) csrc CSR_STATUS, t1 ret ENDPROC(__fstate_restore) + +#define get_f32(which) fmv.x.s a0, which; j 2f +#define put_f32(which) fmv.s.x which, a1; j 2f +#if __riscv_xlen =3D=3D 64 +# define get_f64(which) fmv.x.d a0, which; j 2f +# define put_f64(which) fmv.d.x which, a1; j 2f +#else +# define get_f64(which) fsd which, 0(a1); j 2f +# define put_f64(which) fld which, 0(a1); j 2f +#endif + +.macro fp_access_prologue + /* + * Compute jump offset to store the correct FP register since we don't + * have indirect FP register access + */ + sll t0, a0, 3 + la t2, 1f + add t0, t0, t2 + li t1, SR_FS + csrs CSR_STATUS, t1 + jr t0 +1: +.endm + +.macro fp_access_epilogue +2: + csrc CSR_STATUS, t1 + ret +.endm + +#define fp_access_body(__access_func) \ + __access_func(f0); \ + __access_func(f1); \ + __access_func(f2); \ + __access_func(f3); \ + __access_func(f4); \ + __access_func(f5); \ + __access_func(f6); \ + __access_func(f7); \ + __access_func(f8); \ + __access_func(f9); \ + __access_func(f10); \ + __access_func(f11); \ + __access_func(f12); \ + __access_func(f13); \ + __access_func(f14); \ + __access_func(f15); \ + __access_func(f16); \ + __access_func(f17); \ + __access_func(f18); \ + __access_func(f19); \ + __access_func(f20); \ + __access_func(f21); \ + __access_func(f22); \ + __access_func(f23); \ + __access_func(f24); \ + __access_func(f25); \ + __access_func(f26); \ + __access_func(f27); \ + __access_func(f28); \ + __access_func(f29); \ + __access_func(f30); \ + __access_func(f31) + + +/* + * Disable compressed instructions set to keep a constant offset between FP + * load/store/move instructions + */ +.option norvc +/* + * put_f32_reg - Set a FP register from a register containing the value + * a0 =3D FP register index to be set + * a1 =3D value to be loaded in the FP register + */ +SYM_FUNC_START(put_f32_reg) + fp_access_prologue + fp_access_body(put_f32) + fp_access_epilogue +SYM_FUNC_END(put_f32_reg) + +/* + * get_f32_reg - Get a FP register value and return it + * a0 =3D FP register index to be retrieved + */ +SYM_FUNC_START(get_f32_reg) + fp_access_prologue + fp_access_body(get_f32) + fp_access_epilogue +SYM_FUNC_END(put_f32_reg) + +/* + * put_f64_reg - Set a 64 bits FP register from a value or a pointer. + * a0 =3D FP register index to be set + * a1 =3D value/pointer to be loaded in the FP register (when xlen =3D=3D = 32 bits, we + * load the value to a pointer). + */ +SYM_FUNC_START(put_f64_reg) + fp_access_prologue + fp_access_body(put_f64) + fp_access_epilogue +SYM_FUNC_END(put_f64_reg) + +/* + * put_f64_reg - Get a 64 bits FP register value and returned it or store = it to + * a pointer. + * a0 =3D FP register index to be retrieved + * a1 =3D If xlen =3D=3D 32, pointer which should be loaded with the FP re= gister value + * or unused if xlen =3D=3D 64. In which case the FP register value is ret= urned + * through a0 + */ +SYM_FUNC_START(get_f64_reg) + fp_access_prologue + fp_access_body(get_f64) + fp_access_epilogue +SYM_FUNC_END(get_f64_reg) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps= _misaligned.c index 5fb6758b0bf9..c4c4672a4554 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -156,6 +156,115 @@ #define PRECISION_S 0 #define PRECISION_D 1 =20 +#ifdef CONFIG_FPU + +#define FP_GET_RD(insn) (insn >> 7 & 0x1F) + +extern void put_f32_reg(unsigned long fp_reg, unsigned long value); + +static int set_f32_rd(unsigned long insn, struct pt_regs *regs, + unsigned long val) +{ + unsigned long fp_reg =3D FP_GET_RD(insn); + + put_f32_reg(fp_reg, val); + regs->status |=3D SR_FS_DIRTY; + + return 0; +} + +extern void put_f64_reg(unsigned long fp_reg, unsigned long value); + +static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) +{ + unsigned long fp_reg =3D FP_GET_RD(insn); + unsigned long value; + +#if __riscv_xlen =3D=3D 32 + value =3D (unsigned long) &val; +#else + value =3D val; +#endif + put_f64_reg(fp_reg, value); + regs->status |=3D SR_FS_DIRTY; + + return 0; +} + +#if __riscv_xlen =3D=3D 32 +extern void get_f64_reg(unsigned long fp_reg, u64 *value); + +static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + u64 val; + + get_f64_reg(fp_reg, &val); + regs->status |=3D SR_FS_DIRTY; + + return val; +} +#else + +extern unsigned long get_f64_reg(unsigned long fp_reg); + +static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val =3D get_f64_reg(fp_reg); + regs->status |=3D SR_FS_DIRTY; + + return val; +} + +#endif + +extern unsigned long get_f32_reg(unsigned long fp_reg); + +static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg =3D (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val =3D get_f32_reg(fp_reg); + regs->status |=3D SR_FS_DIRTY; + + return val; +} + +#else /* CONFIG_FPU */ +static void set_f32_rd(unsigned long insn, struct pt_regs *regs, + unsigned long val) {} + +static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) = {} + +static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + return 0; +} + +static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + return 0; +} + +#endif + +#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs)) +#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs)) +#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs)) + +#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs)) +#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs)) +#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs)) + #ifdef CONFIG_RISCV_M_MODE static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { @@ -374,15 +483,21 @@ int handle_misaligned_load(struct pt_regs *regs) return -1; } =20 + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -EOPNOTSUPP; + val.data_u64 =3D 0; for (i =3D 0; i < len; i++) { if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) return -1; } =20 - if (fp) - return -1; - SET_RD(insn, regs, val.data_ulong << shift >> shift); + if (!fp) + SET_RD(insn, regs, val.data_ulong << shift >> shift); + else if (len =3D=3D 8) + set_f64_rd(insn, regs, val.data_u64); + else + set_f32_rd(insn, regs, val.data_ulong); =20 regs->epc =3D epc + INSN_LEN(insn); =20 @@ -395,7 +510,7 @@ int handle_misaligned_store(struct pt_regs *regs) unsigned long epc =3D regs->epc; unsigned long insn; unsigned long addr =3D regs->badaddr; - int i, len =3D 0; + int i, len =3D 0, fp =3D 0; =20 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); =20 @@ -418,6 +533,14 @@ int handle_misaligned_store(struct pt_regs *regs) } else if ((insn & INSN_MASK_SD) =3D=3D INSN_MATCH_SD) { len =3D 8; #endif + } else if ((insn & INSN_MASK_FSD) =3D=3D INSN_MATCH_FSD) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2(insn, regs); + } else if ((insn & INSN_MASK_FSW) =3D=3D INSN_MATCH_FSW) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2(insn, regs); } else if ((insn & INSN_MASK_SH) =3D=3D INSN_MATCH_SH) { len =3D 2; #if defined(CONFIG_64BIT) @@ -436,11 +559,32 @@ int handle_misaligned_store(struct pt_regs *regs) ((insn >> SH_RD) & 0x1f)) { len =3D 4; val.data_ulong =3D GET_RS2C(insn, regs); + } else if ((insn & INSN_MASK_C_FSD) =3D=3D INSN_MATCH_C_FSD) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSDSP) =3D=3D INSN_MATCH_C_FSDSP) { + fp =3D 1; + len =3D 8; + val.data_u64 =3D GET_F64_RS2C(insn, regs); +#if !defined(CONFIG_64BIT) + } else if ((insn & INSN_MASK_C_FSW) =3D=3D INSN_MATCH_C_FSW) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSWSP) =3D=3D INSN_MATCH_C_FSWSP) { + fp =3D 1; + len =3D 4; + val.data_ulong =3D GET_F32_RS2C(insn, regs); +#endif } else { regs->epc =3D epc; return -1; } =20 + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -EOPNOTSUPP; + for (i =3D 0; i < len; i++) { if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) return -1; --=20 2.40.1