From nobody Mon Dec 29 20:11:06 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id BE872C5AD4C for ; Thu, 23 Nov 2023 14:32:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345800AbjKWOcY (ORCPT ); Thu, 23 Nov 2023 09:32:24 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58360 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345820AbjKWOcU (ORCPT ); Thu, 23 Nov 2023 09:32:20 -0500 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0B622D50 for ; Thu, 23 Nov 2023 06:32:27 -0800 (PST) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4DCD1C433C8; Thu, 23 Nov 2023 14:32:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1700749946; bh=JR2VE5NBCn4H/f+6HbF5xLbWKYKxrxRzNHs2+4xlYt0=; h=From:To:Cc:Subject:Date:From; b=AdzdpZUwlcEd1C0e1sNAEOCIgXoCEA9/Gz00kDOkP49HAdmX4ysgQsWHxfozKRomn BhXGrqBtLihj4U3kSE7Sij/fVrAspvVpSj0F6P4Nx9Cjrwzr8KHaiYpGQfTgT6ENp/ nnpkcywHUyVQ6EhhqMJ/Aw8U9IKYAj71pC9pAmEMRX9DtF/GTi5MyqGo9RjeVjhrrI uMOW2kdTG5SsJqTeAVm8TaWbwKDVEvvGifhhKpSrQVVMjKgyzGp7kbN3oMZJHO7MXB phLZAGbYXbpyHvzV6ng5sSp1HWd1CriyYN1RITTSk+5LyfVPPepUitV8n2NPN0t5nu m3M6inZ7TklCA== From: Jisheng Zhang To: Paul Walmsley , Palmer Dabbelt , Albert Ou Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH] riscv: Use asm-generic for {read,write}{bwlq} and their relaxed variant Date: Thu, 23 Nov 2023 22:20:03 +0800 Message-Id: <20231123142003.1759-1-jszhang@kernel.org> X-Mailer: git-send-email 2.40.0 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The asm-generic implementation is functionally identical to the riscv version. Signed-off-by: Jisheng Zhang --- arch/riscv/include/asm/mmio.h | 62 +---------------------------------- 1 file changed, 1 insertion(+), 61 deletions(-) diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index 4c58ee7f95ec..a491590593ca 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -80,54 +80,7 @@ static inline u64 __raw_readq(const volatile void __iome= m *addr) #endif =20 /* - * Unordered I/O memory access primitives. These are even more relaxed th= an - * the relaxed versions, as they don't even order accesses between success= ive - * operations to the I/O regions. - */ -#define readb_cpu(c) ({ u8 __r =3D __raw_readb(c); __r; }) -#define readw_cpu(c) ({ u16 __r =3D le16_to_cpu((__force __le16)__raw_rea= dw(c)); __r; }) -#define readl_cpu(c) ({ u32 __r =3D le32_to_cpu((__force __le32)__raw_rea= dl(c)); __r; }) - -#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c))) -#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), = (c))) -#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), = (c))) - -#ifdef CONFIG_64BIT -#define readq_cpu(c) ({ u64 __r =3D le64_to_cpu((__force __le64)__raw_rea= dq(c)); __r; }) -#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), = (c))) -#endif - -/* - * Relaxed I/O memory access primitives. These follow the Device memory - * ordering rules but do not guarantee any ordering relative to Normal mem= ory - * accesses. These are defined to order the indicated access (either a re= ad or - * write) with all other I/O memory accesses to the same peripheral. Since= the - * platform specification defines that all I/O regions are strongly ordere= d on - * channel 0, no explicit fences are required to enforce this ordering. - */ -/* FIXME: These are now the same as asm-generic */ -#define __io_rbr() do {} while (0) -#define __io_rar() do {} while (0) -#define __io_rbw() do {} while (0) -#define __io_raw() do {} while (0) - -#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v =3D readb_cpu(c); __i= o_rar(); __v; }) -#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v =3D readw_cpu(c); __i= o_rar(); __v; }) -#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v =3D readl_cpu(c); __i= o_rar(); __v; }) - -#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw= (); }) -#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw= (); }) -#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw= (); }) - -#ifdef CONFIG_64BIT -#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v =3D readq_cpu(c); __i= o_rar(); __v; }) -#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw= (); }) -#endif - -/* - * I/O memory access primitives. Reads are ordered relative to any follow= ing - * Normal memory read and delay() loop. Writes are ordered relative to any - * prior Normal memory write. The memory barriers here are necessary as R= ISC-V + * I/O barriers. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) @@ -135,17 +88,4 @@ static inline u64 __raw_readq(const volatile void __iom= em *addr) #define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) #define __io_aw() mmiowb_set_pending() =20 -#define readb(c) ({ u8 __v; __io_br(); __v =3D readb_cpu(c); __io_ar(__v)= ; __v; }) -#define readw(c) ({ u16 __v; __io_br(); __v =3D readw_cpu(c); __io_ar(__v)= ; __v; }) -#define readl(c) ({ u32 __v; __io_br(); __v =3D readl_cpu(c); __io_ar(__v)= ; __v; }) - -#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); }) -#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); }) -#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); }) - -#ifdef CONFIG_64BIT -#define readq(c) ({ u64 __v; __io_br(); __v =3D readq_cpu(c); __io_ar(__v)= ; __v; }) -#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) -#endif - #endif /* _ASM_RISCV_MMIO_H */ --=20 2.42.0