Split out int_st_mmio_leN, to be used by both do_st_mmio_leN
and do_st16_mmio_leN. Move the locks down into the two
functions, since each one now covers all accesses to once page.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
accel/tcg/cputlb.c | 88 ++++++++++++++++++++++++++++++----------------
1 file changed, 58 insertions(+), 30 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 8a7e386415..ff253e78d2 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2678,21 +2678,11 @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
- uint64_t val_le, vaddr addr, int size,
- int mmu_idx, uintptr_t ra)
+static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra,
+ MemoryRegion *mr, hwaddr mr_offset)
{
- MemoryRegionSection *section;
- hwaddr mr_offset;
- MemoryRegion *mr;
- MemTxAttrs attrs;
-
- tcg_debug_assert(size > 0 && size <= 8);
-
- attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
- mr = section->mr;
-
do {
MemOp this_mop;
unsigned this_size;
@@ -2704,7 +2694,7 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
this_mop |= MO_LE;
r = memory_region_dispatch_write(mr, mr_offset, val_le,
- this_mop, attrs);
+ this_mop, full->attrs);
if (unlikely(r != MEMTX_OK)) {
io_failed(env, full, addr, this_size, MMU_DATA_STORE,
mmu_idx, r, ra);
@@ -2722,6 +2712,56 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
return val_le;
}
+static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ hwaddr mr_offset;
+ MemoryRegion *mr;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 0 && size <= 8);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
+ ra, mr, mr_offset);
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
+static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+ Int128 val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 8 && size <= 16);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ qemu_mutex_lock_iothread();
+ int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
+ mmu_idx, ra, mr, mr_offset);
+ ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
+ qemu_mutex_unlock_iothread();
+
+ return ret;
+}
+
/*
* Wrapper for the above.
*/
@@ -2733,7 +2773,6 @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
return do_st_mmio_leN(env, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
@@ -2788,11 +2827,8 @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
- p->addr, 8, mmu_idx, ra);
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
- p->addr + 8, size - 8, mmu_idx, ra);
+ return do_st16_mmio_leN(env, p->full, val_le, p->addr,
+ size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8);
}
@@ -2836,7 +2872,6 @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
int mmu_idx, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -2852,7 +2887,6 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap16(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -2872,7 +2906,6 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -2892,7 +2925,6 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
@@ -3020,11 +3052,7 @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap128(val);
}
- a = int128_getlo(val);
- b = int128_gethi(val);
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
+ do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
--
2.34.1