accel/tcg/cputlb.c | 626 +++++++++++++++++++++++++++++++++++++------ accel/tcg/softmmu_template.h | 454 ------------------------------- 2 files changed, 546 insertions(+), 534 deletions(-) delete mode 100644 accel/tcg/softmmu_template.h
The following changes since commit a6ae23831b05a11880b40f7d58e332c45a6b04f7:
Merge remote-tracking branch 'remotes/ehabkost/tags/python-next-pull-request' into staging (2019-05-03 15:26:09 +0100)
are available in the Git repository at:
https://github.com/stsquad/qemu.git tags/pull-demacro-softmmu-100519-1
for you to fetch changes up to 4601f8d10d7628bcaf2a8179af36e04b42879e91:
cputlb: Do unaligned store recursion to outermost function (2019-05-10 20:23:21 +0100)
----------------------------------------------------------------
Demacrofy the SoftMMU
- the demacro itself
- refactor TLB_RECHECK and fix bug
- move unaligned handler out
----------------------------------------------------------------
Alex Bennée (1):
accel/tcg: demacro cputlb
Richard Henderson (4):
cputlb: Move TLB_RECHECK handling into load/store_helper
cputlb: Drop attribute flatten
cputlb: Do unaligned load recursion to outermost function
cputlb: Do unaligned store recursion to outermost function
accel/tcg/cputlb.c | 626 +++++++++++++++++++++++++++++++++++++------
accel/tcg/softmmu_template.h | 454 -------------------------------
2 files changed, 546 insertions(+), 534 deletions(-)
delete mode 100644 accel/tcg/softmmu_template.h
--
2.20.1
On Fri, 10 May 2019 at 21:01, Alex Bennée <alex.bennee@linaro.org> wrote: > > The following changes since commit a6ae23831b05a11880b40f7d58e332c45a6b04f7: > > Merge remote-tracking branch 'remotes/ehabkost/tags/python-next-pull-request' into staging (2019-05-03 15:26:09 +0100) > > are available in the Git repository at: > > https://github.com/stsquad/qemu.git tags/pull-demacro-softmmu-100519-1 > > for you to fetch changes up to 4601f8d10d7628bcaf2a8179af36e04b42879e91: > > cputlb: Do unaligned store recursion to outermost function (2019-05-10 20:23:21 +0100) > > ---------------------------------------------------------------- > Demacrofy the SoftMMU > > - the demacro itself > - refactor TLB_RECHECK and fix bug > - move unaligned handler out > Applied, thanks. Please update the changelog at https://wiki.qemu.org/ChangeLog/4.1 for any user-visible changes. -- PMM
From: Richard Henderson <richard.henderson@linaro.org>
Having this in io_readx/io_writex meant that we forgot to
re-compute index after tlb_fill. It also means we can use
the normal aligned memory load path. It also fixes a bug
in that we had cached a use of index across a tlb_fill.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 12f21865ee7..9c04eb1687c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -856,9 +856,8 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
}
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
- int mmu_idx,
- target_ulong addr, uintptr_t retaddr,
- bool recheck, MMUAccessType access_type, int size)
+ int mmu_idx, target_ulong addr, uintptr_t retaddr,
+ MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
@@ -868,30 +867,6 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (recheck) {
- /*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- CPUTLBEntry *entry;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
-
- entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = (access_type == MMU_DATA_LOAD ?
- entry->addr_read : entry->addr_code);
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access */
- uintptr_t haddr = addr + entry->addend;
-
- return ldn_p((void *)haddr, size);
- }
- /* Fall through for handling IO accesses */
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -925,9 +900,8 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
- int mmu_idx,
- uint64_t val, target_ulong addr,
- uintptr_t retaddr, bool recheck, int size)
+ int mmu_idx, uint64_t val, target_ulong addr,
+ uintptr_t retaddr, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
@@ -936,30 +910,6 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
bool locked = false;
MemTxResult r;
- if (recheck) {
- /*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- CPUTLBEntry *entry;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
-
- entry = tlb_entry(env, mmu_idx, addr);
- tlb_addr = tlb_addr_write(entry);
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access */
- uintptr_t haddr = addr + entry->addend;
-
- stn_p((void *)haddr, size, val);
- return;
- }
- /* Fall through for handling IO accesses */
- }
-
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
@@ -1218,14 +1168,15 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
const size_t tlb_off = code_read ?
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
+ const MMUAccessType access_type =
+ code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
uint64_t res;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr,
- code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
mmu_idx, retaddr);
}
@@ -1234,8 +1185,7 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(ENV_GET_CPU(env), addr, size,
- code_read ? MMU_INST_FETCH : MMU_DATA_LOAD,
- mmu_idx, retaddr);
+ access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
@@ -1244,17 +1194,33 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
- uint64_t tmp;
-
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
- tmp = io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
- tlb_addr & TLB_RECHECK,
- code_read ? MMU_INST_FETCH : MMU_DATA_LOAD, size);
- return handle_bswap(tmp, size, big_endian);
+ if (tlb_addr & TLB_RECHECK) {
+ /*
+ * This is a TLB_RECHECK access, where the MMU protection
+ * covers a smaller range than a target page, and we must
+ * repeat the MMU check here. This tlb_fill() call might
+ * longjump out if this access should cause a guest exception.
+ */
+ tlb_fill(ENV_GET_CPU(env), addr, size,
+ access_type, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+ tlb_addr &= ~TLB_RECHECK;
+ if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
+ /* RAM access */
+ goto do_aligned_access;
+ }
+ }
+
+ res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
+ retaddr, access_type, size);
+ return handle_bswap(res, size, big_endian);
}
/* Handle slow unaligned access (it spans two pages or IO). */
@@ -1281,8 +1247,8 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
return res & MAKE_64BIT_MASK(0, size * 8);
}
+ do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
-
switch (size) {
case 1:
res = ldub_p(haddr);
@@ -1446,15 +1412,33 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
-
if ((addr & (size - 1)) != 0) {
goto do_unaligned_access;
}
- io_writex(env, iotlbentry, mmu_idx,
+ if (tlb_addr & TLB_RECHECK) {
+ /*
+ * This is a TLB_RECHECK access, where the MMU protection
+ * covers a smaller range than a target page, and we must
+ * repeat the MMU check here. This tlb_fill() call might
+ * longjump out if this access should cause a guest exception.
+ */
+ tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+
+ tlb_addr = tlb_addr_write(entry);
+ tlb_addr &= ~TLB_RECHECK;
+ if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
+ /* RAM access */
+ goto do_aligned_access;
+ }
+ }
+
+ io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
handle_bswap(val, size, big_endian),
- addr, retaddr, tlb_addr & TLB_RECHECK, size);
+ addr, retaddr, size);
return;
}
@@ -1502,8 +1486,8 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return;
}
+ do_aligned_access:
haddr = (void *)((uintptr_t)addr + entry->addend);
-
switch (size) {
case 1:
stb_p(haddr, val);
--
2.20.1
From: Richard Henderson <richard.henderson@linaro.org>
Going to approach this problem via __attribute__((always_inline))
instead, but full conversion will take several steps.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 9c04eb1687c..ccbb47d8d1c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1291,51 +1291,44 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
* We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
*/
-tcg_target_ulong __attribute__((flatten))
-helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 1, false, false);
}
-tcg_target_ulong __attribute__((flatten))
-helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, false, false);
}
-tcg_target_ulong __attribute__((flatten))
-helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, true, false);
}
-tcg_target_ulong __attribute__((flatten))
-helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, false, false);
}
-tcg_target_ulong __attribute__((flatten))
-helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, true, false);
}
-uint64_t __attribute__((flatten))
-helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, false, false);
}
-uint64_t __attribute__((flatten))
-helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, true, false);
}
@@ -1519,51 +1512,44 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
}
}
-void __attribute__((flatten))
-helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 1, false);
}
-void __attribute__((flatten))
-helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 2, false);
}
-void __attribute__((flatten))
-helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 2, true);
}
-void __attribute__((flatten))
-helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 4, false);
}
-void __attribute__((flatten))
-helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 4, true);
}
-void __attribute__((flatten))
-helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 8, false);
}
-void __attribute__((flatten))
-helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, 8, true);
}
@@ -1627,51 +1613,44 @@ helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
/* Code access functions. */
-uint8_t __attribute__((flatten))
-helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 1, false, true);
}
-uint16_t __attribute__((flatten))
-helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, false, true);
}
-uint16_t __attribute__((flatten))
-helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 2, true, true);
}
-uint32_t __attribute__((flatten))
-helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, false, true);
}
-uint32_t __attribute__((flatten))
-helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 4, true, true);
}
-uint64_t __attribute__((flatten))
-helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, false, true);
}
-uint64_t __attribute__((flatten))
-helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
- uintptr_t retaddr)
+uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, 8, true, true);
}
--
2.20.1
From: Richard Henderson <richard.henderson@linaro.org>
If we attempt to recurse from load_helper back to load_helper,
even via intermediary, we do not get all of the constants
expanded away as desired.
But if we recurse back to the original helper (or a shim that
has a consistent function signature), the operands are folded
away as desired.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index ccbb47d8d1c..e4d0c943011 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1157,10 +1157,13 @@ static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
* is disassembled. It shouldn't be called directly by guest code.
*/
-static uint64_t load_helper(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr,
- size_t size, bool big_endian,
- bool code_read)
+typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr);
+
+static inline uint64_t __attribute__((always_inline))
+load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+ uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
+ FullLoadHelper *full_load)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1233,8 +1236,8 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
do_unaligned_access:
addr1 = addr & ~(size - 1);
addr2 = addr1 + size;
- r1 = load_helper(env, addr1, oi, retaddr, size, big_endian, code_read);
- r2 = load_helper(env, addr2, oi, retaddr, size, big_endian, code_read);
+ r1 = full_load(env, addr1, oi, retaddr);
+ r2 = full_load(env, addr2, oi, retaddr);
shift = (addr & (size - 1)) * 8;
if (big_endian) {
@@ -1291,46 +1294,83 @@ static uint64_t load_helper(CPUArchState *env, target_ulong addr,
* We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
*/
+static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, false,
+ full_ldub_mmu);
+}
+
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 1, false, false);
+ return full_ldub_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, false,
+ full_le_lduw_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 2, false, false);
+ return full_le_lduw_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, false,
+ full_be_lduw_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 2, true, false);
+ return full_be_lduw_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, false,
+ full_le_ldul_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 4, false, false);
+ return full_le_ldul_mmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, false,
+ full_be_ldul_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 4, true, false);
+ return full_be_ldul_mmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 8, false, false);
+ return load_helper(env, addr, oi, retaddr, 8, false, false,
+ helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 8, true, false);
+ return load_helper(env, addr, oi, retaddr, 8, true, false,
+ helper_be_ldq_mmu);
}
/*
@@ -1613,44 +1653,81 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
/* Code access functions. */
+static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 1, false, true,
+ full_ldub_cmmu);
+}
+
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 1, false, true);
+ return full_ldub_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, false, true,
+ full_le_lduw_cmmu);
}
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 2, false, true);
+ return full_le_lduw_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 2, true, true,
+ full_be_lduw_cmmu);
}
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 2, true, true);
+ return full_be_lduw_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, false, true,
+ full_le_ldul_cmmu);
}
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 4, false, true);
+ return full_le_ldul_cmmu(env, addr, oi, retaddr);
+}
+
+static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
+{
+ return load_helper(env, addr, oi, retaddr, 4, true, true,
+ full_be_ldul_cmmu);
}
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 4, true, true);
+ return full_be_ldul_cmmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 8, false, true);
+ return load_helper(env, addr, oi, retaddr, 8, false, true,
+ helper_le_ldq_cmmu);
}
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, 8, true, true);
+ return load_helper(env, addr, oi, retaddr, 8, true, true,
+ helper_be_ldq_cmmu);
}
--
2.20.1
From: Richard Henderson <richard.henderson@linaro.org>
This is less tricky than for loads, because we always fall
back to single byte stores to implement unaligned stores.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e4d0c943011..a0833247684 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1413,9 +1413,9 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
* Store Helpers
*/
-static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr, size_t size,
- bool big_endian)
+static inline void __attribute__((always_inline))
+store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
+ TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
@@ -1514,7 +1514,7 @@ static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Little-endian extract. */
val8 = val >> (i * 8);
}
- store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian);
+ helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
}
return;
}
--
2.20.1
© 2016 - 2026 Red Hat, Inc.