The functions {read,write}{b,w,l,q}_cpu() do not need to be memory-ordered
atomic operations in Xen, based on their definitions for other architectures.
Therefore, {read,write}{b,w,l,q}_cpu() can be used instead of
{read,write}{b,w,l,q}(), allowing the caller to decide if additional
fences should be applied before or after {read,write}_atomic().
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in V6:
- revert changes connected to _write_atomic() prototype and in definition of write_atomic().
- update the commit message.
---
Changes in v5:
- new patch.
---
xen/arch/riscv/include/asm/atomic.h | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/xen/arch/riscv/include/asm/atomic.h b/xen/arch/riscv/include/asm/atomic.h
index 31b91a79c8..3c6bd86406 100644
--- a/xen/arch/riscv/include/asm/atomic.h
+++ b/xen/arch/riscv/include/asm/atomic.h
@@ -31,21 +31,17 @@
void __bad_atomic_size(void);
-/*
- * Legacy from Linux kernel. For some reason they wanted to have ordered
- * read/write access. Thereby read* is used instead of read*_cpu()
- */
static always_inline void read_atomic_size(const volatile void *p,
void *res,
unsigned int size)
{
switch ( size )
{
- case 1: *(uint8_t *)res = readb(p); break;
- case 2: *(uint16_t *)res = readw(p); break;
- case 4: *(uint32_t *)res = readl(p); break;
+ case 1: *(uint8_t *)res = readb_cpu(p); break;
+ case 2: *(uint16_t *)res = readw_cpu(p); break;
+ case 4: *(uint32_t *)res = readl_cpu(p); break;
#ifndef CONFIG_RISCV_32
- case 8: *(uint32_t *)res = readq(p); break;
+ case 8: *(uint32_t *)res = readq_cpu(p); break;
#endif
default: __bad_atomic_size(); break;
}
@@ -58,15 +54,16 @@ static always_inline void read_atomic_size(const volatile void *p,
})
static always_inline void _write_atomic(volatile void *p,
- unsigned long x, unsigned int size)
+ unsigned long x,
+ unsigned int size)
{
switch ( size )
{
- case 1: writeb(x, p); break;
- case 2: writew(x, p); break;
- case 4: writel(x, p); break;
+ case 1: writeb_cpu(x, p); break;
+ case 2: writew_cpu(x, p); break;
+ case 4: writel_cpu(x, p); break;
#ifndef CONFIG_RISCV_32
- case 8: writeq(x, p); break;
+ case 8: writeq_cpu(x, p); break;
#endif
default: __bad_atomic_size(); break;
}
--
2.46.0
On 02/09/2024 6:01 pm, Oleksii Kurochko wrote:
> diff --git a/xen/arch/riscv/include/asm/atomic.h b/xen/arch/riscv/include/asm/atomic.h
> index 31b91a79c8..3c6bd86406 100644
> --- a/xen/arch/riscv/include/asm/atomic.h
> +++ b/xen/arch/riscv/include/asm/atomic.h
> @@ -31,21 +31,17 @@
>
> void __bad_atomic_size(void);
>
> -/*
> - * Legacy from Linux kernel. For some reason they wanted to have ordered
> - * read/write access. Thereby read* is used instead of read*_cpu()
> - */
> static always_inline void read_atomic_size(const volatile void *p,
> void *res,
> unsigned int size)
> {
> switch ( size )
> {
> - case 1: *(uint8_t *)res = readb(p); break;
> - case 2: *(uint16_t *)res = readw(p); break;
> - case 4: *(uint32_t *)res = readl(p); break;
> + case 1: *(uint8_t *)res = readb_cpu(p); break;
> + case 2: *(uint16_t *)res = readw_cpu(p); break;
> + case 4: *(uint32_t *)res = readl_cpu(p); break;
> #ifndef CONFIG_RISCV_32
> - case 8: *(uint32_t *)res = readq(p); break;
> + case 8: *(uint32_t *)res = readq_cpu(p); break;
This cast looks suspiciously like it's wrong already in staging...
~Andrew
On Tue, 2024-09-03 at 15:21 +0100, Andrew Cooper wrote:
> On 02/09/2024 6:01 pm, Oleksii Kurochko wrote:
> > diff --git a/xen/arch/riscv/include/asm/atomic.h
> > b/xen/arch/riscv/include/asm/atomic.h
> > index 31b91a79c8..3c6bd86406 100644
> > --- a/xen/arch/riscv/include/asm/atomic.h
> > +++ b/xen/arch/riscv/include/asm/atomic.h
> > @@ -31,21 +31,17 @@
> >
> > void __bad_atomic_size(void);
> >
> > -/*
> > - * Legacy from Linux kernel. For some reason they wanted to have
> > ordered
> > - * read/write access. Thereby read* is used instead of read*_cpu()
> > - */
> > static always_inline void read_atomic_size(const volatile void *p,
> > void *res,
> > unsigned int size)
> > {
> > switch ( size )
> > {
> > - case 1: *(uint8_t *)res = readb(p); break;
> > - case 2: *(uint16_t *)res = readw(p); break;
> > - case 4: *(uint32_t *)res = readl(p); break;
> > + case 1: *(uint8_t *)res = readb_cpu(p); break;
> > + case 2: *(uint16_t *)res = readw_cpu(p); break;
> > + case 4: *(uint32_t *)res = readl_cpu(p); break;
> > #ifndef CONFIG_RISCV_32
> > - case 8: *(uint32_t *)res = readq(p); break;
> > + case 8: *(uint32_t *)res = readq_cpu(p); break;
>
> This cast looks suspiciously like it's wrong already in staging...
Thanks for noticing that, it should be really uint64_t. I'll update
that in the next patch version.
~ Oleksii
On 04/09/2024 11:27 am, oleksii.kurochko@gmail.com wrote:
> On Tue, 2024-09-03 at 15:21 +0100, Andrew Cooper wrote:
>> On 02/09/2024 6:01 pm, Oleksii Kurochko wrote:
>>> diff --git a/xen/arch/riscv/include/asm/atomic.h
>>> b/xen/arch/riscv/include/asm/atomic.h
>>> index 31b91a79c8..3c6bd86406 100644
>>> --- a/xen/arch/riscv/include/asm/atomic.h
>>> +++ b/xen/arch/riscv/include/asm/atomic.h
>>> @@ -31,21 +31,17 @@
>>>
>>> void __bad_atomic_size(void);
>>>
>>> -/*
>>> - * Legacy from Linux kernel. For some reason they wanted to have
>>> ordered
>>> - * read/write access. Thereby read* is used instead of read*_cpu()
>>> - */
>>> static always_inline void read_atomic_size(const volatile void *p,
>>> void *res,
>>> unsigned int size)
>>> {
>>> switch ( size )
>>> {
>>> - case 1: *(uint8_t *)res = readb(p); break;
>>> - case 2: *(uint16_t *)res = readw(p); break;
>>> - case 4: *(uint32_t *)res = readl(p); break;
>>> + case 1: *(uint8_t *)res = readb_cpu(p); break;
>>> + case 2: *(uint16_t *)res = readw_cpu(p); break;
>>> + case 4: *(uint32_t *)res = readl_cpu(p); break;
>>> #ifndef CONFIG_RISCV_32
>>> - case 8: *(uint32_t *)res = readq(p); break;
>>> + case 8: *(uint32_t *)res = readq_cpu(p); break;
>> This cast looks suspiciously like it's wrong already in staging...
> Thanks for noticing that, it should be really uint64_t. I'll update
> that in the next patch version.
This bug is in 4.19.
I know RISC-V is experimental, but this is the kind of thing that Jan
might consider for backporting.
Whether it gets backported or not, it wants to be in a standalone
bugfix, not as a part of "rewrite the accessors used".
~Andrew
On Wed, 2024-09-04 at 11:31 +0100, Andrew Cooper wrote:
> On 04/09/2024 11:27 am, oleksii.kurochko@gmail.com wrote:
> > On Tue, 2024-09-03 at 15:21 +0100, Andrew Cooper wrote:
> > > On 02/09/2024 6:01 pm, Oleksii Kurochko wrote:
> > > > diff --git a/xen/arch/riscv/include/asm/atomic.h
> > > > b/xen/arch/riscv/include/asm/atomic.h
> > > > index 31b91a79c8..3c6bd86406 100644
> > > > --- a/xen/arch/riscv/include/asm/atomic.h
> > > > +++ b/xen/arch/riscv/include/asm/atomic.h
> > > > @@ -31,21 +31,17 @@
> > > >
> > > > void __bad_atomic_size(void);
> > > >
> > > > -/*
> > > > - * Legacy from Linux kernel. For some reason they wanted to
> > > > have
> > > > ordered
> > > > - * read/write access. Thereby read* is used instead of
> > > > read*_cpu()
> > > > - */
> > > > static always_inline void read_atomic_size(const volatile void
> > > > *p,
> > > > void *res,
> > > > unsigned int size)
> > > > {
> > > > switch ( size )
> > > > {
> > > > - case 1: *(uint8_t *)res = readb(p); break;
> > > > - case 2: *(uint16_t *)res = readw(p); break;
> > > > - case 4: *(uint32_t *)res = readl(p); break;
> > > > + case 1: *(uint8_t *)res = readb_cpu(p); break;
> > > > + case 2: *(uint16_t *)res = readw_cpu(p); break;
> > > > + case 4: *(uint32_t *)res = readl_cpu(p); break;
> > > > #ifndef CONFIG_RISCV_32
> > > > - case 8: *(uint32_t *)res = readq(p); break;
> > > > + case 8: *(uint32_t *)res = readq_cpu(p); break;
> > > This cast looks suspiciously like it's wrong already in
> > > staging...
> > Thanks for noticing that, it should be really uint64_t. I'll update
> > that in the next patch version.
>
> This bug is in 4.19.
>
> I know RISC-V is experimental, but this is the kind of thing that Jan
> might consider for backporting.
>
> Whether it gets backported or not, it wants to be in a standalone
> bugfix, not as a part of "rewrite the accessors used".
It makes sense. I will send a separate patch tomorrow.
~ Oleksii
© 2016 - 2026 Red Hat, Inc.