All the LD/ST[W,L,Q] variants use the same template, only
modifying the access size used. Unify as a single pair of
LD/ST methods taking a MemOp argument. Thus use the 'm'
suffix for MemOp.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
system/memory_ldst.c.inc | 289 ++++++++-------------------------------
1 file changed, 58 insertions(+), 231 deletions(-)
diff --git a/system/memory_ldst.c.inc b/system/memory_ldst.c.inc
index 823fc3a7561..e0c0c3f5dca 100644
--- a/system/memory_ldst.c.inc
+++ b/system/memory_ldst.c.inc
@@ -20,39 +20,43 @@
*/
/* warning: addr must be aligned */
-static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
- hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
- enum device_endian endian)
+static inline
+uint64_t glue(address_space_ldm_internal, SUFFIX)(ARG1_DECL, MemOp mop,
+ hwaddr addr,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
{
+ const unsigned size = memop_size(mop);
uint8_t *ptr;
uint64_t val;
MemoryRegion *mr;
- hwaddr l = 4;
+ hwaddr l = size;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
RCU_READ_LOCK();
mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 4 || !memory_access_is_direct(mr, false, attrs)) {
+ if (l < size || !memory_access_is_direct(mr, false, attrs)) {
release_lock |= prepare_mmio_access(mr);
/* I/O case */
r = memory_region_dispatch_read(mr, addr1, &val,
- MO_32 | devend_memop(endian), attrs);
+ mop | devend_memop(endian), attrs);
} else {
/* RAM case */
- fuzz_dma_read_cb(addr, 4, mr);
+ fuzz_dma_read_cb(addr, size, mr);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
- val = ldl_le_p(ptr);
+ val = ldn_le_p(ptr, size);
break;
case DEVICE_BIG_ENDIAN:
- val = ldl_be_p(ptr);
+ val = ldn_be_p(ptr, size);
break;
default:
- val = ldl_p(ptr);
+ val = ldn_p(ptr, size);
break;
}
r = MEMTX_OK;
@@ -67,87 +71,30 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
return val;
}
+/* warning: addr must be aligned */
+static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
+ enum device_endian endian)
+{
+ return glue(address_space_ldm_internal, SUFFIX)(ARG1, MO_32, addr,
+ attrs, result, endian);
+}
+
/* warning: addr must be aligned */
static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
enum device_endian endian)
{
- uint8_t *ptr;
- uint64_t val;
- MemoryRegion *mr;
- hwaddr l = 8;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 8 || !memory_access_is_direct(mr, false, attrs)) {
- release_lock |= prepare_mmio_access(mr);
-
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val,
- MO_64 | devend_memop(endian), attrs);
- } else {
- /* RAM case */
- fuzz_dma_read_cb(addr, 8, mr);
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- val = ldq_le_p(ptr);
- break;
- case DEVICE_BIG_ENDIAN:
- val = ldq_be_p(ptr);
- break;
- default:
- val = ldq_p(ptr);
- break;
- }
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
- return val;
+ return glue(address_space_ldm_internal, SUFFIX)(ARG1, MO_64, addr,
+ attrs, result, endian);
}
uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
{
- uint8_t *ptr;
- uint64_t val;
- MemoryRegion *mr;
- hwaddr l = 1;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (!memory_access_is_direct(mr, false, attrs)) {
- release_lock |= prepare_mmio_access(mr);
-
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
- } else {
- /* RAM case */
- fuzz_dma_read_cb(addr, 1, mr);
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- val = ldub_p(ptr);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
- return val;
+ return glue(address_space_ldm_internal, SUFFIX)(ARG1, MO_8, addr,
+ attrs, result,
+ DEVICE_NATIVE_ENDIAN);
}
/* warning: addr must be aligned */
@@ -155,37 +102,46 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
enum device_endian endian)
{
+ return glue(address_space_ldm_internal, SUFFIX)(ARG1, MO_16, addr,
+ attrs, result, endian);
+}
+
+static inline
+void glue(address_space_stm_internal, SUFFIX)(ARG1_DECL, MemOp mop,
+ hwaddr addr, uint64_t val,
+ MemTxAttrs attrs,
+ MemTxResult *result,
+ enum device_endian endian)
+{
+ const unsigned size = memop_size(mop);
uint8_t *ptr;
- uint64_t val;
MemoryRegion *mr;
- hwaddr l = 2;
+ hwaddr l = size;
hwaddr addr1;
MemTxResult r;
bool release_lock = false;
RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, false, attrs);
- if (l < 2 || !memory_access_is_direct(mr, false, attrs)) {
+ mr = TRANSLATE(addr, &addr1, &l, true, attrs);
+ if (l < size || !memory_access_is_direct(mr, true, attrs)) {
release_lock |= prepare_mmio_access(mr);
-
- /* I/O case */
- r = memory_region_dispatch_read(mr, addr1, &val,
- MO_16 | devend_memop(endian), attrs);
+ r = memory_region_dispatch_write(mr, addr1, val,
+ mop | devend_memop(endian), attrs);
} else {
/* RAM case */
- fuzz_dma_read_cb(addr, 2, mr);
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
- val = lduw_le_p(ptr);
+ stn_le_p(ptr, size, val);
break;
case DEVICE_BIG_ENDIAN:
- val = lduw_be_p(ptr);
+ stn_be_p(ptr, size, val);
break;
default:
- val = lduw_p(ptr);
+ stn_p(ptr, size, val);
break;
}
+ invalidate_and_set_dirty(mr, addr1, size);
r = MEMTX_OK;
}
if (result) {
@@ -195,7 +151,6 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
bql_unlock();
}
RCU_READ_UNLOCK();
- return val;
}
/* warning: addr must be aligned */
@@ -203,74 +158,16 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, uint32_t val, MemTxAttrs attrs,
MemTxResult *result, enum device_endian endian)
{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 4;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 4 || !memory_access_is_direct(mr, true, attrs)) {
- release_lock |= prepare_mmio_access(mr);
- r = memory_region_dispatch_write(mr, addr1, val,
- MO_32 | devend_memop(endian), attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- stl_le_p(ptr, val);
- break;
- case DEVICE_BIG_ENDIAN:
- stl_be_p(ptr, val);
- break;
- default:
- stl_p(ptr, val);
- break;
- }
- invalidate_and_set_dirty(mr, addr1, 4);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
+ glue(address_space_stm_internal, SUFFIX)(ARG1, MO_32, addr, val,
+ attrs, result, endian);
}
void glue(address_space_stb, SUFFIX)(ARG1_DECL,
hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 1;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (!memory_access_is_direct(mr, true, attrs)) {
- release_lock |= prepare_mmio_access(mr);
- r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- stb_p(ptr, val);
- invalidate_and_set_dirty(mr, addr1, 1);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
+ glue(address_space_stm_internal, SUFFIX)(ARG1, MO_8, addr, val,
+ attrs, result,
+ DEVICE_NATIVE_ENDIAN);
}
/* warning: addr must be aligned */
@@ -278,86 +175,16 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, uint16_t val, MemTxAttrs attrs,
MemTxResult *result, enum device_endian endian)
{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 2;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 2 || !memory_access_is_direct(mr, true, attrs)) {
- release_lock |= prepare_mmio_access(mr);
- r = memory_region_dispatch_write(mr, addr1, val,
- MO_16 | devend_memop(endian), attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- stw_le_p(ptr, val);
- break;
- case DEVICE_BIG_ENDIAN:
- stw_be_p(ptr, val);
- break;
- default:
- stw_p(ptr, val);
- break;
- }
- invalidate_and_set_dirty(mr, addr1, 2);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
+ glue(address_space_stm_internal, SUFFIX)(ARG1, MO_16, addr, val,
+ attrs, result, endian);
}
static inline void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
hwaddr addr, uint64_t val, MemTxAttrs attrs,
MemTxResult *result, enum device_endian endian)
{
- uint8_t *ptr;
- MemoryRegion *mr;
- hwaddr l = 8;
- hwaddr addr1;
- MemTxResult r;
- bool release_lock = false;
-
- RCU_READ_LOCK();
- mr = TRANSLATE(addr, &addr1, &l, true, attrs);
- if (l < 8 || !memory_access_is_direct(mr, true, attrs)) {
- release_lock |= prepare_mmio_access(mr);
- r = memory_region_dispatch_write(mr, addr1, val,
- MO_64 | devend_memop(endian), attrs);
- } else {
- /* RAM case */
- ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
- switch (endian) {
- case DEVICE_LITTLE_ENDIAN:
- stq_le_p(ptr, val);
- break;
- case DEVICE_BIG_ENDIAN:
- stq_be_p(ptr, val);
- break;
- default:
- stq_p(ptr, val);
- break;
- }
- invalidate_and_set_dirty(mr, addr1, 8);
- r = MEMTX_OK;
- }
- if (result) {
- *result = r;
- }
- if (release_lock) {
- bql_unlock();
- }
- RCU_READ_UNLOCK();
+ glue(address_space_stm_internal, SUFFIX)(ARG1, MO_64, addr, val,
+ attrs, result, endian);
}
#define ENDIANNESS
--
2.52.0
On 12/18/25 01:31, Philippe Mathieu-Daudé wrote:
> All the LD/ST[W,L,Q] variants use the same template, only
> modifying the access size used. Unify as a single pair of
> LD/ST methods taking a MemOp argument. Thus use the 'm'
> suffix for MemOp.
>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
> system/memory_ldst.c.inc | 289 ++++++++-------------------------------
> 1 file changed, 58 insertions(+), 231 deletions(-)
>
> diff --git a/system/memory_ldst.c.inc b/system/memory_ldst.c.inc
> index 823fc3a7561..e0c0c3f5dca 100644
> --- a/system/memory_ldst.c.inc
> +++ b/system/memory_ldst.c.inc
> @@ -20,39 +20,43 @@
> */
>
> /* warning: addr must be aligned */
> -static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
> - hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
> - enum device_endian endian)
> +static inline
> +uint64_t glue(address_space_ldm_internal, SUFFIX)(ARG1_DECL, MemOp mop,
> + hwaddr addr,
> + MemTxAttrs attrs,
> + MemTxResult *result,
> + enum device_endian endian)
> {
> + const unsigned size = memop_size(mop);
> uint8_t *ptr;
> uint64_t val;
> MemoryRegion *mr;
> - hwaddr l = 4;
> + hwaddr l = size;
> hwaddr addr1;
> MemTxResult r;
> bool release_lock = false;
>
> RCU_READ_LOCK();
> mr = TRANSLATE(addr, &addr1, &l, false, attrs);
> - if (l < 4 || !memory_access_is_direct(mr, false, attrs)) {
> + if (l < size || !memory_access_is_direct(mr, false, attrs)) {
> release_lock |= prepare_mmio_access(mr);
>
> /* I/O case */
> r = memory_region_dispatch_read(mr, addr1, &val,
> - MO_32 | devend_memop(endian), attrs);
> + mop | devend_memop(endian), attrs);
> } else {
> /* RAM case */
> - fuzz_dma_read_cb(addr, 4, mr);
> + fuzz_dma_read_cb(addr, size, mr);
> ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
> switch (endian) {
> case DEVICE_LITTLE_ENDIAN:
> - val = ldl_le_p(ptr);
> + val = ldn_le_p(ptr, size);
> break;
> case DEVICE_BIG_ENDIAN:
> - val = ldl_be_p(ptr);
> + val = ldn_be_p(ptr, size);
> break;
> default:
> - val = ldl_p(ptr);
> + val = ldn_p(ptr, size);
> break;
> }
> r = MEMTX_OK;
> @@ -67,87 +71,30 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
> return val;
> }
>
> +/* warning: addr must be aligned */
> +static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
> + hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
> + enum device_endian endian)
> +{
> + return glue(address_space_ldm_internal, SUFFIX)(ARG1, MO_32, addr,
> + attrs, result, endian);
> +}
> +
> /* warning: addr must be aligned */
Do we know why this warning is here?
Do we know why we aren't asserting alignment?
It makes me wonder if the ldn_*_p above shouldn't be qatomic_ld.
And more so for the stores.
But that's an existing problem, not new with the refactor, so
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
r~
© 2016 - 2026 Red Hat, Inc.