1
This is v4 of my notdirty + rom patch set with two suggested name
1
The following changes since commit ee26ce674a93c824713542cec3b6a9ca85459165:
2
changes (qemu_build_not_reached, TLB_DISCARD_WRITE) from David and Alex.
3
2
4
3
Merge remote-tracking branch 'remotes/jsnow/tags/python-pull-request' into staging (2021-10-12 16:08:33 -0700)
5
r~
6
7
8
The following changes since commit 240ab11fb72049d6373cbbec8d788f8e411a00bc:
9
10
Merge remote-tracking branch 'remotes/aperard/tags/pull-xen-20190924' into staging (2019-09-24 15:36:31 +0100)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://github.com/rth7680/qemu.git tags/pull-tcg-20190925
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211013
15
8
16
for you to fetch changes up to ae57db63acf5a0399232f852acc5c1d83ef63400:
9
for you to fetch changes up to 76e366e728549b3324cc2dee6745d6a4f1af18e6:
17
10
18
cputlb: Pass retaddr to tb_check_watchpoint (2019-09-25 10:56:28 -0700)
11
tcg: Canonicalize alignment flags in MemOp (2021-10-13 09:14:35 -0700)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Fixes for TLB_BSWAP
14
Use MO_128 for 16-byte atomic memory operations.
22
Coversion of NOTDIRTY and ROM handling to cputlb
15
Add cpu_ld/st_mmu memory primitives.
23
Followup cleanups to cputlb
16
Move helper_ld/st memory helpers out of tcg.h.
17
Canonicalize alignment flags in MemOp.
24
18
25
----------------------------------------------------------------
19
----------------------------------------------------------------
26
Richard Henderson (16):
20
BALATON Zoltan (1):
27
exec: Use TARGET_PAGE_BITS_MIN for TLB flags
21
memory: Log access direction for invalid accesses
28
cputlb: Disable __always_inline__ without optimization
29
qemu/compiler.h: Add qemu_build_not_reached
30
cputlb: Use qemu_build_not_reached in load/store_helpers
31
cputlb: Split out load/store_memop
32
cputlb: Introduce TLB_BSWAP
33
exec: Adjust notdirty tracing
34
cputlb: Move ROM handling from I/O path to TLB path
35
cputlb: Move NOTDIRTY handling from I/O path to TLB path
36
cputlb: Partially inline memory_region_section_get_iotlb
37
cputlb: Merge and move memory_notdirty_write_{prepare,complete}
38
cputlb: Handle TLB_NOTDIRTY in probe_access
39
cputlb: Remove cpu->mem_io_vaddr
40
cputlb: Remove tb_invalidate_phys_page_range is_cpu_write_access
41
cputlb: Pass retaddr to tb_invalidate_phys_page_fast
42
cputlb: Pass retaddr to tb_check_watchpoint
43
22
44
accel/tcg/translate-all.h | 8 +-
23
Richard Henderson (14):
45
include/exec/cpu-all.h | 23 ++-
24
target/arm: Use MO_128 for 16 byte atomics
46
include/exec/cpu-common.h | 3 -
25
target/i386: Use MO_128 for 16 byte atomics
47
include/exec/exec-all.h | 6 +-
26
target/ppc: Use MO_128 for 16 byte atomics
48
include/exec/memory-internal.h | 65 --------
27
target/s390x: Use MO_128 for 16 byte atomics
49
include/hw/core/cpu.h | 2 -
28
target/hexagon: Implement cpu_mmu_index
50
include/qemu/compiler.h | 26 +++
29
accel/tcg: Add cpu_{ld,st}*_mmu interfaces
51
accel/tcg/cputlb.c | 348 +++++++++++++++++++++++++----------------
30
accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h
52
accel/tcg/translate-all.c | 51 +++---
31
target/mips: Use cpu_*_data_ra for msa load/store
53
exec.c | 158 +------------------
32
target/mips: Use 8-byte memory ops for msa load/store
54
hw/core/cpu.c | 1 -
33
target/s390x: Use cpu_*_mmu instead of helper_*_mmu
55
memory.c | 20 ---
34
target/sparc: Use cpu_*_mmu instead of helper_*_mmu
56
trace-events | 4 +-
35
target/arm: Use cpu_*_mmu instead of helper_*_mmu
57
13 files changed, 288 insertions(+), 427 deletions(-)
36
tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h
37
tcg: Canonicalize alignment flags in MemOp
58
38
39
docs/devel/loads-stores.rst | 52 +++++-
40
include/exec/cpu_ldst.h | 332 ++++++++++++++++++-----------------
41
include/tcg/tcg-ldst.h | 74 ++++++++
42
include/tcg/tcg.h | 158 -----------------
43
target/hexagon/cpu.h | 9 +
44
accel/tcg/cputlb.c | 393 ++++++++++++++----------------------------
45
accel/tcg/user-exec.c | 385 +++++++++++++++++------------------------
46
softmmu/memory.c | 20 +--
47
target/arm/helper-a64.c | 61 ++-----
48
target/arm/m_helper.c | 6 +-
49
target/i386/tcg/mem_helper.c | 2 +-
50
target/m68k/op_helper.c | 1 -
51
target/mips/tcg/msa_helper.c | 389 ++++++++++-------------------------------
52
target/ppc/mem_helper.c | 1 -
53
target/ppc/translate.c | 12 +-
54
target/s390x/tcg/mem_helper.c | 13 +-
55
target/sparc/ldst_helper.c | 14 +-
56
tcg/tcg-op.c | 7 +-
57
tcg/tcg.c | 1 +
58
tcg/tci.c | 1 +
59
accel/tcg/ldst_common.c.inc | 307 +++++++++++++++++++++++++++++++++
60
21 files changed, 1032 insertions(+), 1206 deletions(-)
61
create mode 100644 include/tcg/tcg-ldst.h
62
create mode 100644 accel/tcg/ldst_common.c.inc
63
diff view generated by jsdifflib
1
We can use notdirty_write for the write and return a valid host
1
From: BALATON Zoltan <balaton@eik.bme.hu>
2
pointer for this case.
2
3
In memory_region_access_valid() invalid accesses are logged to help
4
debugging but the log message does not say if it was a read or write.
5
Log that too to better identify the access causing the problem.
3
6
4
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
9
Message-Id: <20211011173616.F1DE0756022@zero.eik.bme.hu>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
accel/tcg/cputlb.c | 26 +++++++++++++++++---------
12
softmmu/memory.c | 20 ++++++++++----------
9
1 file changed, 17 insertions(+), 9 deletions(-)
13
1 file changed, 10 insertions(+), 10 deletions(-)
10
14
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
diff --git a/softmmu/memory.c b/softmmu/memory.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
17
--- a/softmmu/memory.c
14
+++ b/accel/tcg/cputlb.c
18
+++ b/softmmu/memory.c
15
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
19
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
16
return NULL;
20
{
21
if (mr->ops->valid.accepts
22
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
23
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
24
- "0x%" HWADDR_PRIX ", size %u, "
25
- "region '%s', reason: rejected\n",
26
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
27
+ ", size %u, region '%s', reason: rejected\n",
28
+ is_write ? "write" : "read",
29
addr, size, memory_region_name(mr));
30
return false;
17
}
31
}
18
32
19
- /* Handle watchpoints. */
33
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
20
- if (tlb_addr & TLB_WATCHPOINT) {
34
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
21
- cpu_check_watchpoint(env_cpu(env), addr, size,
35
- "0x%" HWADDR_PRIX ", size %u, "
22
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
36
- "region '%s', reason: unaligned\n",
23
- wp_access, retaddr);
37
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
24
- }
38
+ ", size %u, region '%s', reason: unaligned\n",
25
+ if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
39
+ is_write ? "write" : "read",
26
+ CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
40
addr, size, memory_region_name(mr));
27
41
return false;
28
- /* Reject I/O access, or other required slow-path. */
29
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
30
- return NULL;
31
+ /* Reject I/O access, or other required slow-path. */
32
+ if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
33
+ return NULL;
34
+ }
35
+
36
+ /* Handle watchpoints. */
37
+ if (tlb_addr & TLB_WATCHPOINT) {
38
+ cpu_check_watchpoint(env_cpu(env), addr, size,
39
+ iotlbentry->attrs, wp_access, retaddr);
40
+ }
41
+
42
+ /* Handle clean RAM pages. */
43
+ if (tlb_addr & TLB_NOTDIRTY) {
44
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
45
+ }
46
}
42
}
47
43
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
48
return (void *)((uintptr_t)addr + entry->addend);
44
45
if (size > mr->ops->valid.max_access_size
46
|| size < mr->ops->valid.min_access_size) {
47
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
48
- "0x%" HWADDR_PRIX ", size %u, "
49
- "region '%s', reason: invalid size "
50
- "(min:%u max:%u)\n",
51
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
52
+ ", size %u, region '%s', reason: invalid size "
53
+ "(min:%u max:%u)\n",
54
+ is_write ? "write" : "read",
55
addr, size, memory_region_name(mr),
56
mr->ops->valid.min_access_size,
57
mr->ops->valid.max_access_size);
49
--
58
--
50
2.17.1
59
2.25.1
51
60
52
61
diff view generated by jsdifflib
1
Fixes the previous TLB_WATCHPOINT patches because we are currently
1
Cc: qemu-arm@nongnu.org
2
failing to set cpu->mem_io_pc with the call to cpu_check_watchpoint.
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Pass down the retaddr directly because it's readily available.
4
5
Fixes: 50b107c5d61
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
4
---
10
accel/tcg/translate-all.h | 2 +-
5
target/arm/helper-a64.c | 8 ++++----
11
accel/tcg/translate-all.c | 6 +++---
6
1 file changed, 4 insertions(+), 4 deletions(-)
12
exec.c | 2 +-
13
3 files changed, 5 insertions(+), 5 deletions(-)
14
7
15
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
8
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
16
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/translate-all.h
10
--- a/target/arm/helper-a64.c
18
+++ b/accel/tcg/translate-all.h
11
+++ b/target/arm/helper-a64.c
19
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
12
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
20
tb_page_addr_t start, int len,
13
assert(HAVE_CMPXCHG128);
21
uintptr_t retaddr);
14
22
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
15
mem_idx = cpu_mmu_index(env, false);
23
-void tb_check_watchpoint(CPUState *cpu);
16
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
24
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
17
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
25
18
26
#ifdef CONFIG_USER_ONLY
19
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
27
int page_unprotect(target_ulong address, uintptr_t pc);
20
newv = int128_make128(new_lo, new_hi);
28
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
21
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
29
index XXXXXXX..XXXXXXX 100644
22
assert(HAVE_CMPXCHG128);
30
--- a/accel/tcg/translate-all.c
23
31
+++ b/accel/tcg/translate-all.c
24
mem_idx = cpu_mmu_index(env, false);
32
@@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
25
- oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
33
#endif
26
+ oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx);
34
27
35
/* user-mode: call with mmap_lock held */
28
/*
36
-void tb_check_watchpoint(CPUState *cpu)
29
* High and low need to be switched here because this is not actually a
37
+void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
30
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
38
{
31
assert(HAVE_CMPXCHG128);
39
TranslationBlock *tb;
32
40
33
mem_idx = cpu_mmu_index(env, false);
41
assert_memory_lock();
34
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
42
35
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
43
- tb = tcg_tb_lookup(cpu->mem_io_pc);
36
44
+ tb = tcg_tb_lookup(retaddr);
37
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
45
if (tb) {
38
newv = int128_make128(new_lo, new_hi);
46
/* We can use retranslation to find the PC. */
39
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
47
- cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
40
assert(HAVE_CMPXCHG128);
48
+ cpu_restore_state_from_tb(cpu, tb, retaddr, true);
41
49
tb_phys_invalidate(tb, -1);
42
mem_idx = cpu_mmu_index(env, false);
50
} else {
43
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
51
/* The exception probably happened in a helper. The CPU state should
44
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
52
diff --git a/exec.c b/exec.c
45
53
index XXXXXXX..XXXXXXX 100644
46
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
54
--- a/exec.c
47
newv = int128_make128(new_lo, new_hi);
55
+++ b/exec.c
56
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
57
cpu->watchpoint_hit = wp;
58
59
mmap_lock();
60
- tb_check_watchpoint(cpu);
61
+ tb_check_watchpoint(cpu, ra);
62
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
63
cpu->exception_index = EXCP_DEBUG;
64
mmap_unlock();
65
--
48
--
66
2.17.1
49
2.25.1
67
50
68
51
diff view generated by jsdifflib
1
Rather than rely on cpu->mem_io_pc, pass retaddr down directly.
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
3
Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access
4
parameter is non-zero exactly when retaddr would be non-zero, so that
5
is a simple replacement.
6
7
Recognize that current_tb_not_found is true only when mem_io_pc
8
(and now retaddr) are also non-zero, so remove a redundant test.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
3
---
14
accel/tcg/translate-all.h | 3 ++-
4
target/i386/tcg/mem_helper.c | 2 +-
15
accel/tcg/cputlb.c | 6 +-----
5
1 file changed, 1 insertion(+), 1 deletion(-)
16
accel/tcg/translate-all.c | 39 +++++++++++++++++++--------------------
17
3 files changed, 22 insertions(+), 26 deletions(-)
18
6
19
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
7
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
20
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/translate-all.h
9
--- a/target/i386/tcg/mem_helper.c
22
+++ b/accel/tcg/translate-all.h
10
+++ b/target/i386/tcg/mem_helper.c
23
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
11
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
24
tb_page_addr_t end);
12
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
25
void page_collection_unlock(struct page_collection *set);
13
26
void tb_invalidate_phys_page_fast(struct page_collection *pages,
14
int mem_idx = cpu_mmu_index(env, false);
27
- tb_page_addr_t start, int len);
15
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
28
+ tb_page_addr_t start, int len,
16
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
29
+ uintptr_t retaddr);
17
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
30
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
18
31
void tb_check_watchpoint(CPUState *cpu);
19
if (int128_eq(oldv, cmpv)) {
32
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/cputlb.c
36
+++ b/accel/tcg/cputlb.c
37
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
38
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
39
struct page_collection *pages
40
= page_collection_lock(ram_addr, ram_addr + size);
41
-
42
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
43
- cpu->mem_io_pc = retaddr;
44
-
45
- tb_invalidate_phys_page_fast(pages, ram_addr, size);
46
+ tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
47
page_collection_unlock(pages);
48
}
49
50
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/accel/tcg/translate-all.c
53
+++ b/accel/tcg/translate-all.c
54
@@ -XXX,XX +XXX,XX @@ static void
55
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
56
PageDesc *p, tb_page_addr_t start,
57
tb_page_addr_t end,
58
- int is_cpu_write_access)
59
+ uintptr_t retaddr)
60
{
61
TranslationBlock *tb;
62
tb_page_addr_t tb_start, tb_end;
63
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
64
#ifdef TARGET_HAS_PRECISE_SMC
65
CPUState *cpu = current_cpu;
66
CPUArchState *env = NULL;
67
- int current_tb_not_found = is_cpu_write_access;
68
+ bool current_tb_not_found = retaddr != 0;
69
+ bool current_tb_modified = false;
70
TranslationBlock *current_tb = NULL;
71
- int current_tb_modified = 0;
72
target_ulong current_pc = 0;
73
target_ulong current_cs_base = 0;
74
uint32_t current_flags = 0;
75
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
76
if (!(tb_end <= start || tb_start >= end)) {
77
#ifdef TARGET_HAS_PRECISE_SMC
78
if (current_tb_not_found) {
79
- current_tb_not_found = 0;
80
- current_tb = NULL;
81
- if (cpu->mem_io_pc) {
82
- /* now we have a real cpu fault */
83
- current_tb = tcg_tb_lookup(cpu->mem_io_pc);
84
- }
85
+ current_tb_not_found = false;
86
+ /* now we have a real cpu fault */
87
+ current_tb = tcg_tb_lookup(retaddr);
88
}
89
if (current_tb == tb &&
90
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
91
- /* If we are modifying the current TB, we must stop
92
- its execution. We could be more precise by checking
93
- that the modification is after the current PC, but it
94
- would require a specialized function to partially
95
- restore the CPU state */
96
-
97
- current_tb_modified = 1;
98
- cpu_restore_state_from_tb(cpu, current_tb,
99
- cpu->mem_io_pc, true);
100
+ /*
101
+ * If we are modifying the current TB, we must stop
102
+ * its execution. We could be more precise by checking
103
+ * that the modification is after the current PC, but it
104
+ * would require a specialized function to partially
105
+ * restore the CPU state.
106
+ */
107
+ current_tb_modified = true;
108
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
109
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
110
&current_flags);
111
}
112
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
113
* Call with all @pages in the range [@start, @start + len[ locked.
114
*/
115
void tb_invalidate_phys_page_fast(struct page_collection *pages,
116
- tb_page_addr_t start, int len)
117
+ tb_page_addr_t start, int len,
118
+ uintptr_t retaddr)
119
{
120
PageDesc *p;
121
122
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
123
}
124
} else {
125
do_invalidate:
126
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
127
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
128
+ retaddr);
129
}
130
}
131
#else
132
--
20
--
133
2.17.1
21
2.25.1
134
22
135
23
diff view generated by jsdifflib
1
With the merge of notdirty handling into store_helper,
1
Cc: qemu-ppc@nongnu.org
2
the last user of cpu->mem_io_vaddr was removed.
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
include/hw/core/cpu.h | 2 --
5
target/ppc/translate.c | 12 +++++++-----
9
accel/tcg/cputlb.c | 2 --
6
1 file changed, 7 insertions(+), 5 deletions(-)
10
hw/core/cpu.c | 1 -
11
3 files changed, 5 deletions(-)
12
7
13
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
8
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
14
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
15
--- a/include/hw/core/cpu.h
10
--- a/target/ppc/translate.c
16
+++ b/include/hw/core/cpu.h
11
+++ b/target/ppc/translate.c
17
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
12
@@ -XXX,XX +XXX,XX @@ static void gen_std(DisasContext *ctx)
18
* @next_cpu: Next CPU sharing TB cache.
13
if (HAVE_ATOMIC128) {
19
* @opaque: User data.
14
TCGv_i32 oi = tcg_temp_new_i32();
20
* @mem_io_pc: Host Program Counter at which the memory was accessed.
15
if (ctx->le_mode) {
21
- * @mem_io_vaddr: Target virtual address at which the memory was accessed.
16
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
22
* @kvm_fd: vCPU file descriptor for KVM.
17
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
23
* @work_mutex: Lock to prevent multiple access to queued_work_*.
18
+ ctx->mem_idx));
24
* @queued_work_first: First asynchronous work pending.
19
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
25
@@ -XXX,XX +XXX,XX @@ struct CPUState {
20
} else {
26
* we store some rarely used information in the CPU context.
21
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
27
*/
22
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
28
uintptr_t mem_io_pc;
23
+ ctx->mem_idx));
29
- vaddr mem_io_vaddr;
24
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
30
/*
25
}
31
* This is only needed for the legacy cpu_unassigned_access() hook;
26
tcg_temp_free_i32(oi);
32
* when all targets using it have been converted to use
27
@@ -XXX,XX +XXX,XX @@ static void gen_lqarx(DisasContext *ctx)
33
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
28
if (HAVE_ATOMIC128) {
34
index XXXXXXX..XXXXXXX 100644
29
TCGv_i32 oi = tcg_temp_new_i32();
35
--- a/accel/tcg/cputlb.c
30
if (ctx->le_mode) {
36
+++ b/accel/tcg/cputlb.c
31
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
37
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
32
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
38
cpu_io_recompile(cpu, retaddr);
33
ctx->mem_idx));
39
}
34
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
40
35
} else {
41
- cpu->mem_io_vaddr = addr;
36
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
42
cpu->mem_io_access_type = access_type;
37
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
43
38
ctx->mem_idx));
44
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
39
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
45
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
40
}
46
if (!cpu->can_do_io) {
41
@@ -XXX,XX +XXX,XX @@ static void gen_stqcx_(DisasContext *ctx)
47
cpu_io_recompile(cpu, retaddr);
42
48
}
43
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
49
- cpu->mem_io_vaddr = addr;
44
if (HAVE_CMPXCHG128) {
50
cpu->mem_io_pc = retaddr;
45
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
51
46
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
52
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
47
if (ctx->le_mode) {
53
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
48
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
54
index XXXXXXX..XXXXXXX 100644
49
EA, lo, hi, oi);
55
--- a/hw/core/cpu.c
56
+++ b/hw/core/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(CPUState *cpu)
58
cpu->interrupt_request = 0;
59
cpu->halted = 0;
60
cpu->mem_io_pc = 0;
61
- cpu->mem_io_vaddr = 0;
62
cpu->icount_extra = 0;
63
atomic_set(&cpu->icount_decr_ptr->u32, 0);
64
cpu->can_do_io = 1;
65
--
50
--
66
2.17.1
51
2.25.1
67
52
68
53
diff view generated by jsdifflib
1
There is only one caller, tlb_set_page_with_attrs. We cannot
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
inline the entire function because the AddressSpaceDispatch
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
structure is private to exec.c, and cannot easily be moved to
4
include/exec/memory-internal.h.
5
6
Compute is_ram and is_romd once within tlb_set_page_with_attrs.
7
Fold the number of tests against these predicates. Compute
8
cpu_physical_memory_is_clean outside of the tlb lock region.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
4
---
13
include/exec/exec-all.h | 6 +---
5
target/s390x/tcg/mem_helper.c | 4 ++--
14
accel/tcg/cputlb.c | 68 ++++++++++++++++++++++++++---------------
6
1 file changed, 2 insertions(+), 2 deletions(-)
15
exec.c | 22 ++-----------
16
3 files changed, 47 insertions(+), 49 deletions(-)
17
7
18
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
8
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
19
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
20
--- a/include/exec/exec-all.h
10
--- a/target/s390x/tcg/mem_helper.c
21
+++ b/include/exec/exec-all.h
11
+++ b/target/s390x/tcg/mem_helper.c
22
@@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
12
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
23
hwaddr *xlat, hwaddr *plen,
13
assert(HAVE_CMPXCHG128);
24
MemTxAttrs attrs, int *prot);
14
25
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
15
mem_idx = cpu_mmu_index(env, false);
26
- MemoryRegionSection *section,
16
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
27
- target_ulong vaddr,
17
+ oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
28
- hwaddr paddr, hwaddr xlat,
18
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
29
- int prot,
19
fail = !int128_eq(oldv, cmpv);
30
- target_ulong *address);
20
31
+ MemoryRegionSection *section);
21
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
32
#endif
22
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
33
23
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
34
/* vl.c */
24
} else if (HAVE_CMPXCHG128) {
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
25
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
36
index XXXXXXX..XXXXXXX 100644
26
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
37
--- a/accel/tcg/cputlb.c
27
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
38
+++ b/accel/tcg/cputlb.c
28
cc = !int128_eq(ov, cv);
39
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
29
} else {
40
MemoryRegionSection *section;
41
unsigned int index;
42
target_ulong address;
43
- target_ulong code_address;
44
+ target_ulong write_address;
45
uintptr_t addend;
46
CPUTLBEntry *te, tn;
47
hwaddr iotlb, xlat, sz, paddr_page;
48
target_ulong vaddr_page;
49
int asidx = cpu_asidx_from_attrs(cpu, attrs);
50
int wp_flags;
51
+ bool is_ram, is_romd;
52
53
assert_cpu_is_self(cpu);
54
55
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
56
if (attrs.byte_swap) {
57
address |= TLB_BSWAP;
58
}
59
- if (!memory_region_is_ram(section->mr) &&
60
- !memory_region_is_romd(section->mr)) {
61
- /* IO memory case */
62
- address |= TLB_MMIO;
63
- addend = 0;
64
- } else {
65
+
66
+ is_ram = memory_region_is_ram(section->mr);
67
+ is_romd = memory_region_is_romd(section->mr);
68
+
69
+ if (is_ram || is_romd) {
70
+ /* RAM and ROMD both have associated host memory. */
71
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
72
+ } else {
73
+ /* I/O does not; force the host address to NULL. */
74
+ addend = 0;
75
+ }
76
+
77
+ write_address = address;
78
+ if (is_ram) {
79
+ iotlb = memory_region_get_ram_addr(section->mr) + xlat;
80
+ /*
81
+ * Computing is_clean is expensive; avoid all that unless
82
+ * the page is actually writable.
83
+ */
84
+ if (prot & PAGE_WRITE) {
85
+ if (section->readonly) {
86
+ write_address |= TLB_DISCARD_WRITE;
87
+ } else if (cpu_physical_memory_is_clean(iotlb)) {
88
+ write_address |= TLB_NOTDIRTY;
89
+ }
90
+ }
91
+ } else {
92
+ /* I/O or ROMD */
93
+ iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
94
+ /*
95
+ * Writes to romd devices must go through MMIO to enable write.
96
+ * Reads to romd devices go through the ram_ptr found above,
97
+ * but of course reads to I/O must go through MMIO.
98
+ */
99
+ write_address |= TLB_MMIO;
100
+ if (!is_romd) {
101
+ address = write_address;
102
+ }
103
}
104
105
- code_address = address;
106
- iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
107
- paddr_page, xlat, prot, &address);
108
wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
109
TARGET_PAGE_SIZE);
110
111
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
112
/*
113
* At this point iotlb contains a physical section number in the lower
114
* TARGET_PAGE_BITS, and either
115
- * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
116
- * + the offset within section->mr of the page base (otherwise)
117
+ * + the ram_addr_t of the page base of the target RAM (RAM)
118
+ * + the offset within section->mr of the page base (I/O, ROMD)
119
* We subtract the vaddr_page (which is page aligned and thus won't
120
* disturb the low bits) to give an offset which can be added to the
121
* (non-page-aligned) vaddr of the eventual memory access to get
122
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
123
}
124
125
if (prot & PAGE_EXEC) {
126
- tn.addr_code = code_address;
127
+ tn.addr_code = address;
128
} else {
129
tn.addr_code = -1;
130
}
131
132
tn.addr_write = -1;
133
if (prot & PAGE_WRITE) {
134
- tn.addr_write = address;
135
- if (memory_region_is_romd(section->mr)) {
136
- /* Use the MMIO path so that the device can switch states. */
137
- tn.addr_write |= TLB_MMIO;
138
- } else if (memory_region_is_ram(section->mr)) {
139
- if (section->readonly) {
140
- tn.addr_write |= TLB_DISCARD_WRITE;
141
- } else if (cpu_physical_memory_is_clean(
142
- memory_region_get_ram_addr(section->mr) + xlat)) {
143
- tn.addr_write |= TLB_NOTDIRTY;
144
- }
145
- }
146
+ tn.addr_write = write_address;
147
if (prot & PAGE_WRITE_INV) {
148
tn.addr_write |= TLB_INVALID_MASK;
149
}
150
diff --git a/exec.c b/exec.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/exec.c
153
+++ b/exec.c
154
@@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
155
156
/* Called from RCU critical section */
157
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
158
- MemoryRegionSection *section,
159
- target_ulong vaddr,
160
- hwaddr paddr, hwaddr xlat,
161
- int prot,
162
- target_ulong *address)
163
+ MemoryRegionSection *section)
164
{
165
- hwaddr iotlb;
166
-
167
- if (memory_region_is_ram(section->mr)) {
168
- /* Normal RAM. */
169
- iotlb = memory_region_get_ram_addr(section->mr) + xlat;
170
- } else {
171
- AddressSpaceDispatch *d;
172
-
173
- d = flatview_to_dispatch(section->fv);
174
- iotlb = section - d->map.sections;
175
- iotlb += xlat;
176
- }
177
-
178
- return iotlb;
179
+ AddressSpaceDispatch *d = flatview_to_dispatch(section->fv);
180
+ return section - d->map.sections;
181
}
182
#endif /* defined(CONFIG_USER_ONLY) */
183
184
--
30
--
185
2.17.1
31
2.25.1
186
32
187
33
diff view generated by jsdifflib
1
All callers pass false to this argument. Remove it and pass the
1
The function is trivial for user-only, but still must be present.
2
constant on to tb_invalidate_phys_page_range__locked.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/translate-all.h | 3 +--
7
target/hexagon/cpu.h | 9 +++++++++
9
accel/tcg/translate-all.c | 6 ++----
8
1 file changed, 9 insertions(+)
10
exec.c | 4 ++--
11
3 files changed, 5 insertions(+), 8 deletions(-)
12
9
13
diff --git a/accel/tcg/translate-all.h b/accel/tcg/translate-all.h
10
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/translate-all.h
12
--- a/target/hexagon/cpu.h
16
+++ b/accel/tcg/translate-all.h
13
+++ b/target/hexagon/cpu.h
17
@@ -XXX,XX +XXX,XX @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
18
void page_collection_unlock(struct page_collection *set);
15
#endif
19
void tb_invalidate_phys_page_fast(struct page_collection *pages,
20
tb_page_addr_t start, int len);
21
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
22
- int is_cpu_write_access);
23
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
24
void tb_check_watchpoint(CPUState *cpu);
25
26
#ifdef CONFIG_USER_ONLY
27
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/translate-all.c
30
+++ b/accel/tcg/translate-all.c
31
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
32
*
33
* Called with mmap_lock held for user-mode emulation
34
*/
35
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
36
- int is_cpu_write_access)
37
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
38
{
39
struct page_collection *pages;
40
PageDesc *p;
41
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
42
return;
43
}
44
pages = page_collection_lock(start, end);
45
- tb_invalidate_phys_page_range__locked(pages, p, start, end,
46
- is_cpu_write_access);
47
+ tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
48
page_collection_unlock(pages);
49
}
16
}
50
17
51
diff --git a/exec.c b/exec.c
18
+static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
52
index XXXXXXX..XXXXXXX 100644
19
+{
53
--- a/exec.c
20
+#ifdef CONFIG_USER_ONLY
54
+++ b/exec.c
21
+ return MMU_USER_IDX;
55
@@ -XXX,XX +XXX,XX @@ const char *parse_cpu_option(const char *cpu_option)
22
+#else
56
void tb_invalidate_phys_addr(target_ulong addr)
23
+#error System mode not supported on Hexagon yet
57
{
24
+#endif
58
mmap_lock();
25
+}
59
- tb_invalidate_phys_page_range(addr, addr + 1, 0);
26
+
60
+ tb_invalidate_phys_page_range(addr, addr + 1);
27
typedef struct CPUHexagonState CPUArchState;
61
mmap_unlock();
28
typedef HexagonCPU ArchCPU;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
65
return;
66
}
67
ram_addr = memory_region_get_ram_addr(mr) + addr;
68
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
69
+ tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
70
rcu_read_unlock();
71
}
72
29
73
--
30
--
74
2.17.1
31
2.25.1
75
32
76
33
diff view generated by jsdifflib
1
This forced inlining can result in missing symbols,
1
These functions are much closer to the softmmu helper
2
which makes a debugging build harder to follow.
2
functions, in that they take the complete MemOpIdx,
3
and from that they may enforce required alignment.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
The previous cpu_ldst.h functions did not have alignment info,
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
and so did not enforce it. Retain this by adding MO_UNALN to
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
the MemOp that we create in calling the new functions.
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
9
Note that we are not yet enforcing alignment for user-only,
10
but we now have the information with which to do so.
11
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
14
---
10
include/qemu/compiler.h | 11 +++++++++++
15
docs/devel/loads-stores.rst | 52 ++++-
11
accel/tcg/cputlb.c | 4 ++--
16
include/exec/cpu_ldst.h | 245 ++++++++--------------
12
2 files changed, 13 insertions(+), 2 deletions(-)
17
accel/tcg/cputlb.c | 392 ++++++++++++------------------------
18
accel/tcg/user-exec.c | 385 +++++++++++++++--------------------
19
accel/tcg/ldst_common.c.inc | 307 ++++++++++++++++++++++++++++
20
5 files changed, 717 insertions(+), 664 deletions(-)
21
create mode 100644 accel/tcg/ldst_common.c.inc
13
22
14
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
23
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/compiler.h
25
--- a/docs/devel/loads-stores.rst
17
+++ b/include/qemu/compiler.h
26
+++ b/docs/devel/loads-stores.rst
27
@@ -XXX,XX +XXX,XX @@ Regexes for git grep
28
- ``\<ldn_\([hbl]e\)?_p\>``
29
- ``\<stn_\([hbl]e\)?_p\>``
30
31
-``cpu_{ld,st}*_mmuidx_ra``
32
-~~~~~~~~~~~~~~~~~~~~~~~~~~
33
+``cpu_{ld,st}*_mmu``
34
+~~~~~~~~~~~~~~~~~~~~
35
36
-These functions operate on a guest virtual address plus a context,
37
-known as a "mmu index" or ``mmuidx``, which controls how that virtual
38
-address is translated. The meaning of the indexes are target specific,
39
-but specifying a particular index might be necessary if, for instance,
40
-the helper requires an "always as non-privileged" access rather that
41
-the default access for the current state of the guest CPU.
42
+These functions operate on a guest virtual address, plus a context
43
+known as a "mmu index" which controls how that virtual address is
44
+translated, plus a ``MemOp`` which contains alignment requirements
45
+among other things. The ``MemOp`` and mmu index are combined into
46
+a single argument of type ``MemOpIdx``.
47
+
48
+The meaning of the indexes are target specific, but specifying a
49
+particular index might be necessary if, for instance, the helper
50
+requires a "always as non-privileged" access rather than the
51
+default access for the current state of the guest CPU.
52
53
These functions may cause a guest CPU exception to be taken
54
(e.g. for an alignment fault or MMU fault) which will result in
55
@@ -XXX,XX +XXX,XX @@ function, which is a return address into the generated code [#gpc]_.
56
57
Function names follow the pattern:
58
59
+load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)``
60
+
61
+store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)``
62
+
63
+``size``
64
+ - ``b`` : 8 bits
65
+ - ``w`` : 16 bits
66
+ - ``l`` : 32 bits
67
+ - ``q`` : 64 bits
68
+
69
+``end``
70
+ - (empty) : for target endian, or 8 bit sizes
71
+ - ``_be`` : big endian
72
+ - ``_le`` : little endian
73
+
74
+Regexes for git grep:
75
+ - ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>``
76
+ - ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>``
77
+
78
+
79
+``cpu_{ld,st}*_mmuidx_ra``
80
+~~~~~~~~~~~~~~~~~~~~~~~~~~
81
+
82
+These functions work like the ``cpu_{ld,st}_mmu`` functions except
83
+that the ``mmuidx`` parameter is not combined with a ``MemOp``,
84
+and therefore there is no required alignment supplied or enforced.
85
+
86
+Function names follow the pattern:
87
+
88
load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
89
90
store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
91
@@ -XXX,XX +XXX,XX @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
92
93
These are generally the preferred way to do accesses by guest
94
virtual address from helper functions, unless the access should
95
-be performed with a context other than the default.
96
+be performed with a context other than the default, or alignment
97
+should be enforced for the access.
98
99
Function names follow the pattern:
100
101
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/include/exec/cpu_ldst.h
104
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@
105
@@ -XXX,XX +XXX,XX @@
19
# define QEMU_NONSTRING
106
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
107
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
108
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
109
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
110
*
111
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
112
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
113
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
114
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
115
*
116
* sign is:
117
* (empty): for 32 and 64 bit sizes
118
@@ -XXX,XX +XXX,XX @@
119
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
120
* the index to use; the "data" and "code" suffixes take the index from
121
* cpu_mmu_index().
122
+ *
123
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
124
+ * MemOp including alignment requirements. The alignment will be enforced.
125
*/
126
#ifndef CPU_LDST_H
127
#define CPU_LDST_H
128
129
+#include "exec/memopidx.h"
130
+
131
#if defined(CONFIG_USER_ONLY)
132
/* sparc32plus has 64bit long but 32bit space address
133
* this can make bad result with g2h() and h2g()
134
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
135
136
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
137
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
138
-
139
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
140
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
141
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
142
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
143
-
144
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
145
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
146
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
147
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
148
149
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
150
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
151
-
152
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
153
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
154
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
155
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
156
-
157
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
158
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
159
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
160
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
161
162
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
163
-
164
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
165
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
166
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
167
-
168
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
169
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
170
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
171
172
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
173
uint32_t val, uintptr_t ra);
174
-
175
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
176
uint32_t val, uintptr_t ra);
177
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
178
uint32_t val, uintptr_t ra);
179
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
180
uint64_t val, uintptr_t ra);
181
-
182
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
183
uint32_t val, uintptr_t ra);
184
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
185
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
186
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
187
uint64_t val, uintptr_t ra);
188
189
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
190
+ int mmu_idx, uintptr_t ra);
191
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
192
+ int mmu_idx, uintptr_t ra);
193
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
194
+ int mmu_idx, uintptr_t ra);
195
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
196
+ int mmu_idx, uintptr_t ra);
197
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
198
+ int mmu_idx, uintptr_t ra);
199
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
200
+ int mmu_idx, uintptr_t ra);
201
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
202
+ int mmu_idx, uintptr_t ra);
203
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
204
+ int mmu_idx, uintptr_t ra);
205
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
206
+ int mmu_idx, uintptr_t ra);
207
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
208
+ int mmu_idx, uintptr_t ra);
209
+
210
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
211
+ int mmu_idx, uintptr_t ra);
212
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
213
+ int mmu_idx, uintptr_t ra);
214
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
215
+ int mmu_idx, uintptr_t ra);
216
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
217
+ int mmu_idx, uintptr_t ra);
218
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
219
+ int mmu_idx, uintptr_t ra);
220
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
221
+ int mmu_idx, uintptr_t ra);
222
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
223
+ int mmu_idx, uintptr_t ra);
224
+
225
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
226
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
227
+ MemOpIdx oi, uintptr_t ra);
228
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
229
+ MemOpIdx oi, uintptr_t ra);
230
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
231
+ MemOpIdx oi, uintptr_t ra);
232
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
233
+ MemOpIdx oi, uintptr_t ra);
234
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
235
+ MemOpIdx oi, uintptr_t ra);
236
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
237
+ MemOpIdx oi, uintptr_t ra);
238
+
239
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
240
+ MemOpIdx oi, uintptr_t ra);
241
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
242
+ MemOpIdx oi, uintptr_t ra);
243
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
244
+ MemOpIdx oi, uintptr_t ra);
245
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
246
+ MemOpIdx oi, uintptr_t ra);
247
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
248
+ MemOpIdx oi, uintptr_t ra);
249
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
250
+ MemOpIdx oi, uintptr_t ra);
251
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
252
+ MemOpIdx oi, uintptr_t ra);
253
+
254
#if defined(CONFIG_USER_ONLY)
255
256
extern __thread uintptr_t helper_retaddr;
257
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
258
helper_retaddr = 0;
259
}
260
261
-/*
262
- * Provide the same *_mmuidx_ra interface as for softmmu.
263
- * The mmu_idx argument is ignored.
264
- */
265
-
266
-static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
267
- int mmu_idx, uintptr_t ra)
268
-{
269
- return cpu_ldub_data_ra(env, addr, ra);
270
-}
271
-
272
-static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
273
- int mmu_idx, uintptr_t ra)
274
-{
275
- return cpu_ldsb_data_ra(env, addr, ra);
276
-}
277
-
278
-static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
279
- int mmu_idx, uintptr_t ra)
280
-{
281
- return cpu_lduw_be_data_ra(env, addr, ra);
282
-}
283
-
284
-static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
285
- int mmu_idx, uintptr_t ra)
286
-{
287
- return cpu_ldsw_be_data_ra(env, addr, ra);
288
-}
289
-
290
-static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
291
- int mmu_idx, uintptr_t ra)
292
-{
293
- return cpu_ldl_be_data_ra(env, addr, ra);
294
-}
295
-
296
-static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
297
- int mmu_idx, uintptr_t ra)
298
-{
299
- return cpu_ldq_be_data_ra(env, addr, ra);
300
-}
301
-
302
-static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
303
- int mmu_idx, uintptr_t ra)
304
-{
305
- return cpu_lduw_le_data_ra(env, addr, ra);
306
-}
307
-
308
-static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
309
- int mmu_idx, uintptr_t ra)
310
-{
311
- return cpu_ldsw_le_data_ra(env, addr, ra);
312
-}
313
-
314
-static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
315
- int mmu_idx, uintptr_t ra)
316
-{
317
- return cpu_ldl_le_data_ra(env, addr, ra);
318
-}
319
-
320
-static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
321
- int mmu_idx, uintptr_t ra)
322
-{
323
- return cpu_ldq_le_data_ra(env, addr, ra);
324
-}
325
-
326
-static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
327
- uint32_t val, int mmu_idx, uintptr_t ra)
328
-{
329
- cpu_stb_data_ra(env, addr, val, ra);
330
-}
331
-
332
-static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
333
- uint32_t val, int mmu_idx,
334
- uintptr_t ra)
335
-{
336
- cpu_stw_be_data_ra(env, addr, val, ra);
337
-}
338
-
339
-static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
340
- uint32_t val, int mmu_idx,
341
- uintptr_t ra)
342
-{
343
- cpu_stl_be_data_ra(env, addr, val, ra);
344
-}
345
-
346
-static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
347
- uint64_t val, int mmu_idx,
348
- uintptr_t ra)
349
-{
350
- cpu_stq_be_data_ra(env, addr, val, ra);
351
-}
352
-
353
-static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
354
- uint32_t val, int mmu_idx,
355
- uintptr_t ra)
356
-{
357
- cpu_stw_le_data_ra(env, addr, val, ra);
358
-}
359
-
360
-static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
361
- uint32_t val, int mmu_idx,
362
- uintptr_t ra)
363
-{
364
- cpu_stl_le_data_ra(env, addr, val, ra);
365
-}
366
-
367
-static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
368
- uint64_t val, int mmu_idx,
369
- uintptr_t ra)
370
-{
371
- cpu_stq_le_data_ra(env, addr, val, ra);
372
-}
373
-
374
#else
375
376
/* Needed for TCG_OVERSIZED_GUEST */
377
@@ -XXX,XX +XXX,XX @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
378
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
379
}
380
381
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
382
- int mmu_idx, uintptr_t ra);
383
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
384
- int mmu_idx, uintptr_t ra);
385
-
386
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
387
- int mmu_idx, uintptr_t ra);
388
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
389
- int mmu_idx, uintptr_t ra);
390
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
391
- int mmu_idx, uintptr_t ra);
392
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
393
- int mmu_idx, uintptr_t ra);
394
-
395
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
396
- int mmu_idx, uintptr_t ra);
397
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
398
- int mmu_idx, uintptr_t ra);
399
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
400
- int mmu_idx, uintptr_t ra);
401
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
402
- int mmu_idx, uintptr_t ra);
403
-
404
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
405
- int mmu_idx, uintptr_t retaddr);
406
-
407
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
408
- int mmu_idx, uintptr_t retaddr);
409
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
410
- int mmu_idx, uintptr_t retaddr);
411
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
412
- int mmu_idx, uintptr_t retaddr);
413
-
414
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
415
- int mmu_idx, uintptr_t retaddr);
416
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
417
- int mmu_idx, uintptr_t retaddr);
418
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
419
- int mmu_idx, uintptr_t retaddr);
420
-
421
#endif /* defined(CONFIG_USER_ONLY) */
422
423
#ifdef TARGET_WORDS_BIGENDIAN
424
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
425
# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
426
# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
427
# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
428
+# define cpu_ldw_mmu cpu_ldw_be_mmu
429
+# define cpu_ldl_mmu cpu_ldl_be_mmu
430
+# define cpu_ldq_mmu cpu_ldq_be_mmu
431
# define cpu_stw_data cpu_stw_be_data
432
# define cpu_stl_data cpu_stl_be_data
433
# define cpu_stq_data cpu_stq_be_data
434
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
435
# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
436
# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
437
# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
438
+# define cpu_stw_mmu cpu_stw_be_mmu
439
+# define cpu_stl_mmu cpu_stl_be_mmu
440
+# define cpu_stq_mmu cpu_stq_be_mmu
441
#else
442
# define cpu_lduw_data cpu_lduw_le_data
443
# define cpu_ldsw_data cpu_ldsw_le_data
444
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
445
# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
446
# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
447
# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
448
+# define cpu_ldw_mmu cpu_ldw_le_mmu
449
+# define cpu_ldl_mmu cpu_ldl_le_mmu
450
+# define cpu_ldq_mmu cpu_ldq_le_mmu
451
# define cpu_stw_data cpu_stw_le_data
452
# define cpu_stl_data cpu_stl_le_data
453
# define cpu_stq_data cpu_stq_le_data
454
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
455
# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
456
# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
457
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
458
+# define cpu_stw_mmu cpu_stw_le_mmu
459
+# define cpu_stl_mmu cpu_stl_le_mmu
460
+# define cpu_stq_mmu cpu_stq_le_mmu
20
#endif
461
#endif
21
462
22
+/*
463
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
23
+ * Forced inlining may be desired to encourage constant propagation
24
+ * of function parameters. However, it can also make debugging harder,
25
+ * so disable it for a non-optimizing build.
26
+ */
27
+#if defined(__OPTIMIZE__)
28
+#define QEMU_ALWAYS_INLINE __attribute__((always_inline))
29
+#else
30
+#define QEMU_ALWAYS_INLINE
31
+#endif
32
+
33
/* Implement C11 _Generic via GCC builtins. Example:
34
*
35
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
36
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
464
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
37
index XXXXXXX..XXXXXXX 100644
465
index XXXXXXX..XXXXXXX 100644
38
--- a/accel/tcg/cputlb.c
466
--- a/accel/tcg/cputlb.c
39
+++ b/accel/tcg/cputlb.c
467
+++ b/accel/tcg/cputlb.c
40
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
468
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
41
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
469
cpu_loop_exit_atomic(env_cpu(env), retaddr);
42
TCGMemOpIdx oi, uintptr_t retaddr);
470
}
43
471
44
-static inline uint64_t __attribute__((always_inline))
472
+/*
45
+static inline uint64_t QEMU_ALWAYS_INLINE
473
+ * Verify that we have passed the correct MemOp to the correct function.
46
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
474
+ *
47
uintptr_t retaddr, MemOp op, bool code_read,
475
+ * In the case of the helper_*_mmu functions, we will have done this by
48
FullLoadHelper *full_load)
476
+ * using the MemOp to look up the helper during code generation.
477
+ *
478
+ * In the case of the cpu_*_mmu functions, this is up to the caller.
479
+ * We could present one function to target code, and dispatch based on
480
+ * the MemOp, but so far we have worked hard to avoid an indirect function
481
+ * call along the memory path.
482
+ */
483
+static void validate_memop(MemOpIdx oi, MemOp expected)
484
+{
485
+#ifdef CONFIG_DEBUG_TCG
486
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
487
+ assert(have == expected);
488
+#endif
489
+}
490
+
491
/*
492
* Load Helpers
493
*
494
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
495
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
496
MemOpIdx oi, uintptr_t retaddr)
497
{
498
+ validate_memop(oi, MO_UB);
499
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
500
}
501
502
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
503
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
504
MemOpIdx oi, uintptr_t retaddr)
505
{
506
+ validate_memop(oi, MO_LEUW);
507
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
508
full_le_lduw_mmu);
509
}
510
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
511
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
512
MemOpIdx oi, uintptr_t retaddr)
513
{
514
+ validate_memop(oi, MO_BEUW);
515
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
516
full_be_lduw_mmu);
517
}
518
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
519
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
520
MemOpIdx oi, uintptr_t retaddr)
521
{
522
+ validate_memop(oi, MO_LEUL);
523
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
524
full_le_ldul_mmu);
525
}
526
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
527
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
528
MemOpIdx oi, uintptr_t retaddr)
529
{
530
+ validate_memop(oi, MO_BEUL);
531
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
532
full_be_ldul_mmu);
533
}
534
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
535
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
536
MemOpIdx oi, uintptr_t retaddr)
537
{
538
+ validate_memop(oi, MO_LEQ);
539
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
540
helper_le_ldq_mmu);
541
}
542
@@ -XXX,XX +XXX,XX @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
543
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
544
MemOpIdx oi, uintptr_t retaddr)
545
{
546
+ validate_memop(oi, MO_BEQ);
547
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
548
helper_be_ldq_mmu);
549
}
49
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
550
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
50
* Store Helpers
51
*/
551
*/
52
552
53
-static inline void __attribute__((always_inline))
553
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
54
+static inline void QEMU_ALWAYS_INLINE
554
- int mmu_idx, uintptr_t retaddr,
55
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
555
- MemOp op, FullLoadHelper *full_load)
56
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
556
+ MemOpIdx oi, uintptr_t retaddr,
57
{
557
+ FullLoadHelper *full_load)
558
{
559
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
560
uint64_t ret;
561
562
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
563
-
564
ret = full_load(env, addr, oi, retaddr);
565
-
566
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
567
-
568
return ret;
569
}
570
571
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
572
- int mmu_idx, uintptr_t ra)
573
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
574
{
575
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
576
+ return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
577
}
578
579
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
580
- int mmu_idx, uintptr_t ra)
581
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
582
+ MemOpIdx oi, uintptr_t ra)
583
{
584
- return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
585
+ return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
586
}
587
588
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
589
- int mmu_idx, uintptr_t ra)
590
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
591
+ MemOpIdx oi, uintptr_t ra)
592
{
593
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
594
+ return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
595
}
596
597
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
598
- int mmu_idx, uintptr_t ra)
599
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
600
+ MemOpIdx oi, uintptr_t ra)
601
{
602
- return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
603
+ return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
604
}
605
606
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
607
- int mmu_idx, uintptr_t ra)
608
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
609
+ MemOpIdx oi, uintptr_t ra)
610
{
611
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
612
+ return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
613
}
614
615
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
616
- int mmu_idx, uintptr_t ra)
617
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
618
+ MemOpIdx oi, uintptr_t ra)
619
{
620
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
621
+ return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
622
}
623
624
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
625
- int mmu_idx, uintptr_t ra)
626
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
627
+ MemOpIdx oi, uintptr_t ra)
628
{
629
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
630
-}
631
-
632
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
633
- int mmu_idx, uintptr_t ra)
634
-{
635
- return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
636
-}
637
-
638
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
639
- int mmu_idx, uintptr_t ra)
640
-{
641
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
642
-}
643
-
644
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
645
- int mmu_idx, uintptr_t ra)
646
-{
647
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
648
-}
649
-
650
-uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
651
- uintptr_t retaddr)
652
-{
653
- return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
654
-}
655
-
656
-int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
657
-{
658
- return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
659
-}
660
-
661
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
662
- uintptr_t retaddr)
663
-{
664
- return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
665
-}
666
-
667
-int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
668
-{
669
- return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
670
-}
671
-
672
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
673
- uintptr_t retaddr)
674
-{
675
- return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
676
-}
677
-
678
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
679
- uintptr_t retaddr)
680
-{
681
- return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
682
-}
683
-
684
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
685
- uintptr_t retaddr)
686
-{
687
- return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
688
-}
689
-
690
-int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
691
-{
692
- return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
693
-}
694
-
695
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
696
- uintptr_t retaddr)
697
-{
698
- return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
699
-}
700
-
701
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
702
- uintptr_t retaddr)
703
-{
704
- return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
705
-}
706
-
707
-uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
708
-{
709
- return cpu_ldub_data_ra(env, ptr, 0);
710
-}
711
-
712
-int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
713
-{
714
- return cpu_ldsb_data_ra(env, ptr, 0);
715
-}
716
-
717
-uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
718
-{
719
- return cpu_lduw_be_data_ra(env, ptr, 0);
720
-}
721
-
722
-int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
723
-{
724
- return cpu_ldsw_be_data_ra(env, ptr, 0);
725
-}
726
-
727
-uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
728
-{
729
- return cpu_ldl_be_data_ra(env, ptr, 0);
730
-}
731
-
732
-uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
733
-{
734
- return cpu_ldq_be_data_ra(env, ptr, 0);
735
-}
736
-
737
-uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
738
-{
739
- return cpu_lduw_le_data_ra(env, ptr, 0);
740
-}
741
-
742
-int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
743
-{
744
- return cpu_ldsw_le_data_ra(env, ptr, 0);
745
-}
746
-
747
-uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
748
-{
749
- return cpu_ldl_le_data_ra(env, ptr, 0);
750
-}
751
-
752
-uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
753
-{
754
- return cpu_ldq_le_data_ra(env, ptr, 0);
755
+ return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
756
}
757
758
/*
759
@@ -XXX,XX +XXX,XX @@ store_memop(void *haddr, uint64_t val, MemOp op)
760
}
761
}
762
763
+static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
764
+ MemOpIdx oi, uintptr_t retaddr);
765
+
766
static void __attribute__((noinline))
767
store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
768
uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
769
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
770
for (i = 0; i < size; ++i) {
771
/* Big-endian extract. */
772
uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
773
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
774
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
775
}
776
} else {
777
for (i = 0; i < size; ++i) {
778
/* Little-endian extract. */
779
uint8_t val8 = val >> (i * 8);
780
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
781
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
782
}
783
}
784
}
785
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
786
store_memop(haddr, val, op);
787
}
788
789
-void __attribute__((noinline))
790
-helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
791
- MemOpIdx oi, uintptr_t retaddr)
792
+static void __attribute__((noinline))
793
+full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
794
+ MemOpIdx oi, uintptr_t retaddr)
795
{
796
+ validate_memop(oi, MO_UB);
797
store_helper(env, addr, val, oi, retaddr, MO_UB);
798
}
799
800
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
801
+ MemOpIdx oi, uintptr_t retaddr)
802
+{
803
+ full_stb_mmu(env, addr, val, oi, retaddr);
804
+}
805
+
806
+static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
807
+ MemOpIdx oi, uintptr_t retaddr)
808
+{
809
+ validate_memop(oi, MO_LEUW);
810
+ store_helper(env, addr, val, oi, retaddr, MO_LEUW);
811
+}
812
+
813
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
814
MemOpIdx oi, uintptr_t retaddr)
815
{
816
- store_helper(env, addr, val, oi, retaddr, MO_LEUW);
817
+ full_le_stw_mmu(env, addr, val, oi, retaddr);
818
+}
819
+
820
+static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
821
+ MemOpIdx oi, uintptr_t retaddr)
822
+{
823
+ validate_memop(oi, MO_BEUW);
824
+ store_helper(env, addr, val, oi, retaddr, MO_BEUW);
825
}
826
827
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
828
MemOpIdx oi, uintptr_t retaddr)
829
{
830
- store_helper(env, addr, val, oi, retaddr, MO_BEUW);
831
+ full_be_stw_mmu(env, addr, val, oi, retaddr);
832
+}
833
+
834
+static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
835
+ MemOpIdx oi, uintptr_t retaddr)
836
+{
837
+ validate_memop(oi, MO_LEUL);
838
+ store_helper(env, addr, val, oi, retaddr, MO_LEUL);
839
}
840
841
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
842
MemOpIdx oi, uintptr_t retaddr)
843
{
844
- store_helper(env, addr, val, oi, retaddr, MO_LEUL);
845
+ full_le_stl_mmu(env, addr, val, oi, retaddr);
846
+}
847
+
848
+static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
849
+ MemOpIdx oi, uintptr_t retaddr)
850
+{
851
+ validate_memop(oi, MO_BEUL);
852
+ store_helper(env, addr, val, oi, retaddr, MO_BEUL);
853
}
854
855
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
856
MemOpIdx oi, uintptr_t retaddr)
857
{
858
- store_helper(env, addr, val, oi, retaddr, MO_BEUL);
859
+ full_be_stl_mmu(env, addr, val, oi, retaddr);
860
}
861
862
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
863
MemOpIdx oi, uintptr_t retaddr)
864
{
865
+ validate_memop(oi, MO_LEQ);
866
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
867
}
868
869
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
870
MemOpIdx oi, uintptr_t retaddr)
871
{
872
+ validate_memop(oi, MO_BEQ);
873
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
874
}
875
876
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
877
* Store Helpers for cpu_ldst.h
878
*/
879
880
-static inline void QEMU_ALWAYS_INLINE
881
-cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
882
- int mmu_idx, uintptr_t retaddr, MemOp op)
883
+typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
884
+ uint64_t val, MemOpIdx oi, uintptr_t retaddr);
885
+
886
+static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
887
+ uint64_t val, MemOpIdx oi, uintptr_t ra,
888
+ FullStoreHelper *full_store)
889
{
890
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
891
-
892
trace_guest_st_before_exec(env_cpu(env), addr, oi);
893
-
894
- store_helper(env, addr, val, oi, retaddr, op);
895
-
896
+ full_store(env, addr, val, oi, ra);
897
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
898
}
899
900
-void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
901
- int mmu_idx, uintptr_t retaddr)
902
+void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
903
+ MemOpIdx oi, uintptr_t retaddr)
904
{
905
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
906
+ cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
907
}
908
909
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
910
- int mmu_idx, uintptr_t retaddr)
911
+void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
912
+ MemOpIdx oi, uintptr_t retaddr)
913
{
914
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
915
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
916
}
917
918
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
919
- int mmu_idx, uintptr_t retaddr)
920
+void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
921
+ MemOpIdx oi, uintptr_t retaddr)
922
{
923
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
924
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
925
}
926
927
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
928
- int mmu_idx, uintptr_t retaddr)
929
+void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
930
+ MemOpIdx oi, uintptr_t retaddr)
931
{
932
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
933
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
934
}
935
936
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
937
- int mmu_idx, uintptr_t retaddr)
938
+void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
939
+ MemOpIdx oi, uintptr_t retaddr)
940
{
941
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
942
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
943
}
944
945
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
946
- int mmu_idx, uintptr_t retaddr)
947
+void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
948
+ MemOpIdx oi, uintptr_t retaddr)
949
{
950
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
951
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
952
}
953
954
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
955
- int mmu_idx, uintptr_t retaddr)
956
+void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
957
+ MemOpIdx oi, uintptr_t retaddr)
958
{
959
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
960
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
961
}
962
963
-void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
964
- uint32_t val, uintptr_t retaddr)
965
-{
966
- cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
967
-}
968
-
969
-void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
970
- uint32_t val, uintptr_t retaddr)
971
-{
972
- cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
973
-}
974
-
975
-void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
976
- uint32_t val, uintptr_t retaddr)
977
-{
978
- cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
979
-}
980
-
981
-void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
982
- uint64_t val, uintptr_t retaddr)
983
-{
984
- cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
985
-}
986
-
987
-void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
988
- uint32_t val, uintptr_t retaddr)
989
-{
990
- cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
991
-}
992
-
993
-void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
994
- uint32_t val, uintptr_t retaddr)
995
-{
996
- cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
997
-}
998
-
999
-void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
1000
- uint64_t val, uintptr_t retaddr)
1001
-{
1002
- cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
1003
-}
1004
-
1005
-void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1006
-{
1007
- cpu_stb_data_ra(env, ptr, val, 0);
1008
-}
1009
-
1010
-void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1011
-{
1012
- cpu_stw_be_data_ra(env, ptr, val, 0);
1013
-}
1014
-
1015
-void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1016
-{
1017
- cpu_stl_be_data_ra(env, ptr, val, 0);
1018
-}
1019
-
1020
-void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1021
-{
1022
- cpu_stq_be_data_ra(env, ptr, val, 0);
1023
-}
1024
-
1025
-void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1026
-{
1027
- cpu_stw_le_data_ra(env, ptr, val, 0);
1028
-}
1029
-
1030
-void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1031
-{
1032
- cpu_stl_le_data_ra(env, ptr, val, 0);
1033
-}
1034
-
1035
-void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1036
-{
1037
- cpu_stq_le_data_ra(env, ptr, val, 0);
1038
-}
1039
+#include "ldst_common.c.inc"
1040
1041
/*
1042
* First set of functions passes in OI and RETADDR.
1043
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
1044
index XXXXXXX..XXXXXXX 100644
1045
--- a/accel/tcg/user-exec.c
1046
+++ b/accel/tcg/user-exec.c
1047
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
1048
1049
/* The softmmu versions of these helpers are in cputlb.c. */
1050
1051
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
1052
+/*
1053
+ * Verify that we have passed the correct MemOp to the correct function.
1054
+ *
1055
+ * We could present one function to target code, and dispatch based on
1056
+ * the MemOp, but so far we have worked hard to avoid an indirect function
1057
+ * call along the memory path.
1058
+ */
1059
+static void validate_memop(MemOpIdx oi, MemOp expected)
1060
{
1061
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1062
- uint32_t ret;
1063
+#ifdef CONFIG_DEBUG_TCG
1064
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1065
+ assert(have == expected);
1066
+#endif
1067
+}
1068
1069
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1070
- ret = ldub_p(g2h(env_cpu(env), ptr));
1071
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1072
+static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
1073
+ MemOpIdx oi, uintptr_t ra, MMUAccessType type)
1074
+{
1075
+ void *ret;
1076
+
1077
+ /* TODO: Enforce guest required alignment. */
1078
+
1079
+ ret = g2h(env_cpu(env), addr);
1080
+ set_helper_retaddr(ra);
1081
return ret;
1082
}
1083
1084
-int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
1085
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
1086
+ MemOpIdx oi, uintptr_t ra)
1087
{
1088
- return (int8_t)cpu_ldub_data(env, ptr);
1089
-}
1090
+ void *haddr;
1091
+ uint8_t ret;
1092
1093
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
1094
-{
1095
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1096
- uint32_t ret;
1097
-
1098
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1099
- ret = lduw_be_p(g2h(env_cpu(env), ptr));
1100
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1101
+ validate_memop(oi, MO_UB);
1102
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1103
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1104
+ ret = ldub_p(haddr);
1105
+ clear_helper_retaddr();
1106
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1107
return ret;
1108
}
1109
1110
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
1111
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
1112
+ MemOpIdx oi, uintptr_t ra)
1113
{
1114
- return (int16_t)cpu_lduw_be_data(env, ptr);
1115
-}
1116
+ void *haddr;
1117
+ uint16_t ret;
1118
1119
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
1120
-{
1121
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1122
- uint32_t ret;
1123
-
1124
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1125
- ret = ldl_be_p(g2h(env_cpu(env), ptr));
1126
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1127
+ validate_memop(oi, MO_BEUW);
1128
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1129
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1130
+ ret = lduw_be_p(haddr);
1131
+ clear_helper_retaddr();
1132
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1133
return ret;
1134
}
1135
1136
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
1137
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
1138
+ MemOpIdx oi, uintptr_t ra)
1139
{
1140
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1141
+ void *haddr;
1142
+ uint32_t ret;
1143
+
1144
+ validate_memop(oi, MO_BEUL);
1145
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1146
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1147
+ ret = ldl_be_p(haddr);
1148
+ clear_helper_retaddr();
1149
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1150
+ return ret;
1151
+}
1152
+
1153
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
1154
+ MemOpIdx oi, uintptr_t ra)
1155
+{
1156
+ void *haddr;
1157
uint64_t ret;
1158
1159
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1160
- ret = ldq_be_p(g2h(env_cpu(env), ptr));
1161
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1162
+ validate_memop(oi, MO_BEQ);
1163
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1164
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1165
+ ret = ldq_be_p(haddr);
1166
+ clear_helper_retaddr();
1167
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1168
return ret;
1169
}
1170
1171
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
1172
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
1173
+ MemOpIdx oi, uintptr_t ra)
1174
{
1175
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1176
+ void *haddr;
1177
+ uint16_t ret;
1178
+
1179
+ validate_memop(oi, MO_LEUW);
1180
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1181
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1182
+ ret = lduw_le_p(haddr);
1183
+ clear_helper_retaddr();
1184
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1185
+ return ret;
1186
+}
1187
+
1188
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
1189
+ MemOpIdx oi, uintptr_t ra)
1190
+{
1191
+ void *haddr;
1192
uint32_t ret;
1193
1194
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1195
- ret = lduw_le_p(g2h(env_cpu(env), ptr));
1196
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1197
+ validate_memop(oi, MO_LEUL);
1198
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1199
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1200
+ ret = ldl_le_p(haddr);
1201
+ clear_helper_retaddr();
1202
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1203
return ret;
1204
}
1205
1206
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
1207
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
1208
+ MemOpIdx oi, uintptr_t ra)
1209
{
1210
- return (int16_t)cpu_lduw_le_data(env, ptr);
1211
-}
1212
-
1213
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
1214
-{
1215
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1216
- uint32_t ret;
1217
-
1218
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1219
- ret = ldl_le_p(g2h(env_cpu(env), ptr));
1220
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1221
- return ret;
1222
-}
1223
-
1224
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
1225
-{
1226
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1227
+ void *haddr;
1228
uint64_t ret;
1229
1230
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1231
- ret = ldq_le_p(g2h(env_cpu(env), ptr));
1232
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1233
+ validate_memop(oi, MO_LEQ);
1234
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1235
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1236
+ ret = ldq_le_p(haddr);
1237
+ clear_helper_retaddr();
1238
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1239
return ret;
1240
}
1241
1242
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1243
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1244
+ MemOpIdx oi, uintptr_t ra)
1245
{
1246
- uint32_t ret;
1247
+ void *haddr;
1248
1249
- set_helper_retaddr(retaddr);
1250
- ret = cpu_ldub_data(env, ptr);
1251
+ validate_memop(oi, MO_UB);
1252
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1253
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1254
+ stb_p(haddr, val);
1255
clear_helper_retaddr();
1256
- return ret;
1257
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1258
}
1259
1260
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1261
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1262
+ MemOpIdx oi, uintptr_t ra)
1263
{
1264
- return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr);
1265
-}
1266
+ void *haddr;
1267
1268
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1269
-{
1270
- uint32_t ret;
1271
-
1272
- set_helper_retaddr(retaddr);
1273
- ret = cpu_lduw_be_data(env, ptr);
1274
+ validate_memop(oi, MO_BEUW);
1275
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1276
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1277
+ stw_be_p(haddr, val);
1278
clear_helper_retaddr();
1279
- return ret;
1280
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1281
}
1282
1283
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1284
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1285
+ MemOpIdx oi, uintptr_t ra)
1286
{
1287
- return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr);
1288
-}
1289
+ void *haddr;
1290
1291
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1292
-{
1293
- uint32_t ret;
1294
-
1295
- set_helper_retaddr(retaddr);
1296
- ret = cpu_ldl_be_data(env, ptr);
1297
+ validate_memop(oi, MO_BEUL);
1298
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1299
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1300
+ stl_be_p(haddr, val);
1301
clear_helper_retaddr();
1302
- return ret;
1303
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1304
}
1305
1306
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1307
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1308
+ MemOpIdx oi, uintptr_t ra)
1309
{
1310
- uint64_t ret;
1311
+ void *haddr;
1312
1313
- set_helper_retaddr(retaddr);
1314
- ret = cpu_ldq_be_data(env, ptr);
1315
+ validate_memop(oi, MO_BEQ);
1316
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1317
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1318
+ stq_be_p(haddr, val);
1319
clear_helper_retaddr();
1320
- return ret;
1321
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1322
}
1323
1324
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1325
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1326
+ MemOpIdx oi, uintptr_t ra)
1327
{
1328
- uint32_t ret;
1329
+ void *haddr;
1330
1331
- set_helper_retaddr(retaddr);
1332
- ret = cpu_lduw_le_data(env, ptr);
1333
+ validate_memop(oi, MO_LEUW);
1334
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1335
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1336
+ stw_le_p(haddr, val);
1337
clear_helper_retaddr();
1338
- return ret;
1339
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1340
}
1341
1342
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1343
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1344
+ MemOpIdx oi, uintptr_t ra)
1345
{
1346
- return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr);
1347
-}
1348
+ void *haddr;
1349
1350
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1351
-{
1352
- uint32_t ret;
1353
-
1354
- set_helper_retaddr(retaddr);
1355
- ret = cpu_ldl_le_data(env, ptr);
1356
+ validate_memop(oi, MO_LEUL);
1357
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1358
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1359
+ stl_le_p(haddr, val);
1360
clear_helper_retaddr();
1361
- return ret;
1362
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1363
}
1364
1365
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1366
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1367
+ MemOpIdx oi, uintptr_t ra)
1368
{
1369
- uint64_t ret;
1370
+ void *haddr;
1371
1372
- set_helper_retaddr(retaddr);
1373
- ret = cpu_ldq_le_data(env, ptr);
1374
- clear_helper_retaddr();
1375
- return ret;
1376
-}
1377
-
1378
-void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1379
-{
1380
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1381
-
1382
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1383
- stb_p(g2h(env_cpu(env), ptr), val);
1384
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1385
-}
1386
-
1387
-void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1388
-{
1389
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1390
-
1391
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1392
- stw_be_p(g2h(env_cpu(env), ptr), val);
1393
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1394
-}
1395
-
1396
-void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1397
-{
1398
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1399
-
1400
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1401
- stl_be_p(g2h(env_cpu(env), ptr), val);
1402
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1403
-}
1404
-
1405
-void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1406
-{
1407
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1408
-
1409
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1410
- stq_be_p(g2h(env_cpu(env), ptr), val);
1411
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1412
-}
1413
-
1414
-void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1415
-{
1416
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1417
-
1418
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1419
- stw_le_p(g2h(env_cpu(env), ptr), val);
1420
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1421
-}
1422
-
1423
-void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1424
-{
1425
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1426
-
1427
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1428
- stl_le_p(g2h(env_cpu(env), ptr), val);
1429
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1430
-}
1431
-
1432
-void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1433
-{
1434
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1435
-
1436
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1437
- stq_le_p(g2h(env_cpu(env), ptr), val);
1438
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1439
-}
1440
-
1441
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1442
- uint32_t val, uintptr_t retaddr)
1443
-{
1444
- set_helper_retaddr(retaddr);
1445
- cpu_stb_data(env, ptr, val);
1446
- clear_helper_retaddr();
1447
-}
1448
-
1449
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1450
- uint32_t val, uintptr_t retaddr)
1451
-{
1452
- set_helper_retaddr(retaddr);
1453
- cpu_stw_be_data(env, ptr, val);
1454
- clear_helper_retaddr();
1455
-}
1456
-
1457
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1458
- uint32_t val, uintptr_t retaddr)
1459
-{
1460
- set_helper_retaddr(retaddr);
1461
- cpu_stl_be_data(env, ptr, val);
1462
- clear_helper_retaddr();
1463
-}
1464
-
1465
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1466
- uint64_t val, uintptr_t retaddr)
1467
-{
1468
- set_helper_retaddr(retaddr);
1469
- cpu_stq_be_data(env, ptr, val);
1470
- clear_helper_retaddr();
1471
-}
1472
-
1473
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1474
- uint32_t val, uintptr_t retaddr)
1475
-{
1476
- set_helper_retaddr(retaddr);
1477
- cpu_stw_le_data(env, ptr, val);
1478
- clear_helper_retaddr();
1479
-}
1480
-
1481
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1482
- uint32_t val, uintptr_t retaddr)
1483
-{
1484
- set_helper_retaddr(retaddr);
1485
- cpu_stl_le_data(env, ptr, val);
1486
- clear_helper_retaddr();
1487
-}
1488
-
1489
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1490
- uint64_t val, uintptr_t retaddr)
1491
-{
1492
- set_helper_retaddr(retaddr);
1493
- cpu_stq_le_data(env, ptr, val);
1494
+ validate_memop(oi, MO_LEQ);
1495
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1496
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1497
+ stq_le_p(haddr, val);
1498
clear_helper_retaddr();
1499
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1500
}
1501
1502
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1503
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1504
return ret;
1505
}
1506
1507
+#include "ldst_common.c.inc"
1508
+
1509
/*
1510
* Do not allow unaligned operations to proceed. Return the host address.
1511
*
1512
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
1513
new file mode 100644
1514
index XXXXXXX..XXXXXXX
1515
--- /dev/null
1516
+++ b/accel/tcg/ldst_common.c.inc
1517
@@ -XXX,XX +XXX,XX @@
1518
+/*
1519
+ * Routines common to user and system emulation of load/store.
1520
+ *
1521
+ * Copyright (c) 2003 Fabrice Bellard
1522
+ *
1523
+ * SPDX-License-Identifier: GPL-2.0-or-later
1524
+ *
1525
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1526
+ * See the COPYING file in the top-level directory.
1527
+ */
1528
+
1529
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1530
+ int mmu_idx, uintptr_t ra)
1531
+{
1532
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1533
+ return cpu_ldb_mmu(env, addr, oi, ra);
1534
+}
1535
+
1536
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1537
+ int mmu_idx, uintptr_t ra)
1538
+{
1539
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
1540
+}
1541
+
1542
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1543
+ int mmu_idx, uintptr_t ra)
1544
+{
1545
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1546
+ return cpu_ldw_be_mmu(env, addr, oi, ra);
1547
+}
1548
+
1549
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1550
+ int mmu_idx, uintptr_t ra)
1551
+{
1552
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
1553
+}
1554
+
1555
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1556
+ int mmu_idx, uintptr_t ra)
1557
+{
1558
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1559
+ return cpu_ldl_be_mmu(env, addr, oi, ra);
1560
+}
1561
+
1562
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1563
+ int mmu_idx, uintptr_t ra)
1564
+{
1565
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1566
+ return cpu_ldq_be_mmu(env, addr, oi, ra);
1567
+}
1568
+
1569
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1570
+ int mmu_idx, uintptr_t ra)
1571
+{
1572
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1573
+ return cpu_ldw_le_mmu(env, addr, oi, ra);
1574
+}
1575
+
1576
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1577
+ int mmu_idx, uintptr_t ra)
1578
+{
1579
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
1580
+}
1581
+
1582
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1583
+ int mmu_idx, uintptr_t ra)
1584
+{
1585
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1586
+ return cpu_ldl_le_mmu(env, addr, oi, ra);
1587
+}
1588
+
1589
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1590
+ int mmu_idx, uintptr_t ra)
1591
+{
1592
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1593
+ return cpu_ldq_le_mmu(env, addr, oi, ra);
1594
+}
1595
+
1596
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1597
+ int mmu_idx, uintptr_t ra)
1598
+{
1599
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1600
+ cpu_stb_mmu(env, addr, val, oi, ra);
1601
+}
1602
+
1603
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1604
+ int mmu_idx, uintptr_t ra)
1605
+{
1606
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1607
+ cpu_stw_be_mmu(env, addr, val, oi, ra);
1608
+}
1609
+
1610
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1611
+ int mmu_idx, uintptr_t ra)
1612
+{
1613
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1614
+ cpu_stl_be_mmu(env, addr, val, oi, ra);
1615
+}
1616
+
1617
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1618
+ int mmu_idx, uintptr_t ra)
1619
+{
1620
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1621
+ cpu_stq_be_mmu(env, addr, val, oi, ra);
1622
+}
1623
+
1624
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1625
+ int mmu_idx, uintptr_t ra)
1626
+{
1627
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1628
+ cpu_stw_le_mmu(env, addr, val, oi, ra);
1629
+}
1630
+
1631
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1632
+ int mmu_idx, uintptr_t ra)
1633
+{
1634
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1635
+ cpu_stl_le_mmu(env, addr, val, oi, ra);
1636
+}
1637
+
1638
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1639
+ int mmu_idx, uintptr_t ra)
1640
+{
1641
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1642
+ cpu_stq_le_mmu(env, addr, val, oi, ra);
1643
+}
1644
+
1645
+/*--------------------------*/
1646
+
1647
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1648
+{
1649
+ return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1650
+}
1651
+
1652
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1653
+{
1654
+ return (int8_t)cpu_ldub_data_ra(env, addr, ra);
1655
+}
1656
+
1657
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1658
+{
1659
+ return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1660
+}
1661
+
1662
+int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1663
+{
1664
+ return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
1665
+}
1666
+
1667
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1668
+{
1669
+ return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1670
+}
1671
+
1672
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1673
+{
1674
+ return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1675
+}
1676
+
1677
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1678
+{
1679
+ return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1680
+}
1681
+
1682
+int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1683
+{
1684
+ return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
1685
+}
1686
+
1687
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1688
+{
1689
+ return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1690
+}
1691
+
1692
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1693
+{
1694
+ return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1695
+}
1696
+
1697
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
1698
+ uint32_t val, uintptr_t ra)
1699
+{
1700
+ cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1701
+}
1702
+
1703
+void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
1704
+ uint32_t val, uintptr_t ra)
1705
+{
1706
+ cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1707
+}
1708
+
1709
+void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
1710
+ uint32_t val, uintptr_t ra)
1711
+{
1712
+ cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1713
+}
1714
+
1715
+void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
1716
+ uint64_t val, uintptr_t ra)
1717
+{
1718
+ cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1719
+}
1720
+
1721
+void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
1722
+ uint32_t val, uintptr_t ra)
1723
+{
1724
+ cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1725
+}
1726
+
1727
+void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
1728
+ uint32_t val, uintptr_t ra)
1729
+{
1730
+ cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1731
+}
1732
+
1733
+void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
1734
+ uint64_t val, uintptr_t ra)
1735
+{
1736
+ cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1737
+}
1738
+
1739
+/*--------------------------*/
1740
+
1741
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
1742
+{
1743
+ return cpu_ldub_data_ra(env, addr, 0);
1744
+}
1745
+
1746
+int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
1747
+{
1748
+ return (int8_t)cpu_ldub_data(env, addr);
1749
+}
1750
+
1751
+uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
1752
+{
1753
+ return cpu_lduw_be_data_ra(env, addr, 0);
1754
+}
1755
+
1756
+int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
1757
+{
1758
+ return (int16_t)cpu_lduw_be_data(env, addr);
1759
+}
1760
+
1761
+uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
1762
+{
1763
+ return cpu_ldl_be_data_ra(env, addr, 0);
1764
+}
1765
+
1766
+uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
1767
+{
1768
+ return cpu_ldq_be_data_ra(env, addr, 0);
1769
+}
1770
+
1771
+uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
1772
+{
1773
+ return cpu_lduw_le_data_ra(env, addr, 0);
1774
+}
1775
+
1776
+int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
1777
+{
1778
+ return (int16_t)cpu_lduw_le_data(env, addr);
1779
+}
1780
+
1781
+uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
1782
+{
1783
+ return cpu_ldl_le_data_ra(env, addr, 0);
1784
+}
1785
+
1786
+uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
1787
+{
1788
+ return cpu_ldq_le_data_ra(env, addr, 0);
1789
+}
1790
+
1791
+void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1792
+{
1793
+ cpu_stb_data_ra(env, addr, val, 0);
1794
+}
1795
+
1796
+void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1797
+{
1798
+ cpu_stw_be_data_ra(env, addr, val, 0);
1799
+}
1800
+
1801
+void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1802
+{
1803
+ cpu_stl_be_data_ra(env, addr, val, 0);
1804
+}
1805
+
1806
+void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1807
+{
1808
+ cpu_stq_be_data_ra(env, addr, val, 0);
1809
+}
1810
+
1811
+void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1812
+{
1813
+ cpu_stw_le_data_ra(env, addr, val, 0);
1814
+}
1815
+
1816
+void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1817
+{
1818
+ cpu_stl_le_data_ra(env, addr, val, 0);
1819
+}
1820
+
1821
+void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1822
+{
1823
+ cpu_stq_le_data_ra(env, addr, val, 0);
1824
+}
58
--
1825
--
59
2.17.1
1826
2.25.1
60
1827
61
1828
diff view generated by jsdifflib
1
These bits do not need to vary with the actual page size
1
The previous placement in tcg/tcg.h was not logical.
2
used by the guest.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/exec/cpu-all.h | 16 ++++++++++------
6
include/exec/cpu_ldst.h | 87 +++++++++++++++++++++++++++++++++++
10
1 file changed, 10 insertions(+), 6 deletions(-)
7
include/tcg/tcg.h | 87 -----------------------------------
11
8
target/arm/helper-a64.c | 1 -
12
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
9
target/m68k/op_helper.c | 1 -
13
index XXXXXXX..XXXXXXX 100644
10
target/ppc/mem_helper.c | 1 -
14
--- a/include/exec/cpu-all.h
11
target/s390x/tcg/mem_helper.c | 1 -
15
+++ b/include/exec/cpu-all.h
12
6 files changed, 87 insertions(+), 91 deletions(-)
16
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@
19
#define CPU_LDST_H
20
21
#include "exec/memopidx.h"
22
+#include "qemu/int128.h"
23
24
#if defined(CONFIG_USER_ONLY)
25
/* sparc32plus has 64bit long but 32bit space address
26
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
27
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
28
MemOpIdx oi, uintptr_t ra);
29
30
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
31
+ uint32_t cmpv, uint32_t newv,
32
+ MemOpIdx oi, uintptr_t retaddr);
33
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
34
+ uint32_t cmpv, uint32_t newv,
35
+ MemOpIdx oi, uintptr_t retaddr);
36
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
37
+ uint32_t cmpv, uint32_t newv,
38
+ MemOpIdx oi, uintptr_t retaddr);
39
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
40
+ uint64_t cmpv, uint64_t newv,
41
+ MemOpIdx oi, uintptr_t retaddr);
42
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
43
+ uint32_t cmpv, uint32_t newv,
44
+ MemOpIdx oi, uintptr_t retaddr);
45
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
46
+ uint32_t cmpv, uint32_t newv,
47
+ MemOpIdx oi, uintptr_t retaddr);
48
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
49
+ uint64_t cmpv, uint64_t newv,
50
+ MemOpIdx oi, uintptr_t retaddr);
51
+
52
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
53
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
54
+ (CPUArchState *env, target_ulong addr, TYPE val, \
55
+ MemOpIdx oi, uintptr_t retaddr);
56
+
57
+#ifdef CONFIG_ATOMIC64
58
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
59
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
60
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
61
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
62
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
63
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
64
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
65
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
66
+#else
67
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
68
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
69
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
70
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
71
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
72
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
73
+#endif
74
+
75
+GEN_ATOMIC_HELPER_ALL(fetch_add)
76
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
77
+GEN_ATOMIC_HELPER_ALL(fetch_and)
78
+GEN_ATOMIC_HELPER_ALL(fetch_or)
79
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
80
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
81
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
82
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
83
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
84
+
85
+GEN_ATOMIC_HELPER_ALL(add_fetch)
86
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
87
+GEN_ATOMIC_HELPER_ALL(and_fetch)
88
+GEN_ATOMIC_HELPER_ALL(or_fetch)
89
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
90
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
91
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
92
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
93
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
94
+
95
+GEN_ATOMIC_HELPER_ALL(xchg)
96
+
97
+#undef GEN_ATOMIC_HELPER_ALL
98
+#undef GEN_ATOMIC_HELPER
99
+
100
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
101
+ Int128 cmpv, Int128 newv,
102
+ MemOpIdx oi, uintptr_t retaddr);
103
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
104
+ Int128 cmpv, Int128 newv,
105
+ MemOpIdx oi, uintptr_t retaddr);
106
+
107
+Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
108
+ MemOpIdx oi, uintptr_t retaddr);
109
+Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
110
+ MemOpIdx oi, uintptr_t retaddr);
111
+void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
112
+ MemOpIdx oi, uintptr_t retaddr);
113
+void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
114
+ MemOpIdx oi, uintptr_t retaddr);
115
+
116
#if defined(CONFIG_USER_ONLY)
117
118
extern __thread uintptr_t helper_retaddr;
119
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
120
index XXXXXXX..XXXXXXX 100644
121
--- a/include/tcg/tcg.h
122
+++ b/include/tcg/tcg.h
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/queue.h"
125
#include "tcg/tcg-mo.h"
126
#include "tcg-target.h"
127
-#include "qemu/int128.h"
128
#include "tcg/tcg-cond.h"
129
130
/* XXX: make safe guess about sizes */
131
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
132
#endif
133
#endif /* CONFIG_SOFTMMU */
134
135
-uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
136
- uint32_t cmpv, uint32_t newv,
137
- MemOpIdx oi, uintptr_t retaddr);
138
-uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
139
- uint32_t cmpv, uint32_t newv,
140
- MemOpIdx oi, uintptr_t retaddr);
141
-uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
142
- uint32_t cmpv, uint32_t newv,
143
- MemOpIdx oi, uintptr_t retaddr);
144
-uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
145
- uint64_t cmpv, uint64_t newv,
146
- MemOpIdx oi, uintptr_t retaddr);
147
-uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
148
- uint32_t cmpv, uint32_t newv,
149
- MemOpIdx oi, uintptr_t retaddr);
150
-uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
151
- uint32_t cmpv, uint32_t newv,
152
- MemOpIdx oi, uintptr_t retaddr);
153
-uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
154
- uint64_t cmpv, uint64_t newv,
155
- MemOpIdx oi, uintptr_t retaddr);
156
-
157
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
158
-TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
159
- (CPUArchState *env, target_ulong addr, TYPE val, \
160
- MemOpIdx oi, uintptr_t retaddr);
161
-
162
-#ifdef CONFIG_ATOMIC64
163
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
164
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
165
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
166
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
167
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
168
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
169
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
170
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
171
-#else
172
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
173
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
174
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
175
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
176
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
177
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
178
-#endif
179
-
180
-GEN_ATOMIC_HELPER_ALL(fetch_add)
181
-GEN_ATOMIC_HELPER_ALL(fetch_sub)
182
-GEN_ATOMIC_HELPER_ALL(fetch_and)
183
-GEN_ATOMIC_HELPER_ALL(fetch_or)
184
-GEN_ATOMIC_HELPER_ALL(fetch_xor)
185
-GEN_ATOMIC_HELPER_ALL(fetch_smin)
186
-GEN_ATOMIC_HELPER_ALL(fetch_umin)
187
-GEN_ATOMIC_HELPER_ALL(fetch_smax)
188
-GEN_ATOMIC_HELPER_ALL(fetch_umax)
189
-
190
-GEN_ATOMIC_HELPER_ALL(add_fetch)
191
-GEN_ATOMIC_HELPER_ALL(sub_fetch)
192
-GEN_ATOMIC_HELPER_ALL(and_fetch)
193
-GEN_ATOMIC_HELPER_ALL(or_fetch)
194
-GEN_ATOMIC_HELPER_ALL(xor_fetch)
195
-GEN_ATOMIC_HELPER_ALL(smin_fetch)
196
-GEN_ATOMIC_HELPER_ALL(umin_fetch)
197
-GEN_ATOMIC_HELPER_ALL(smax_fetch)
198
-GEN_ATOMIC_HELPER_ALL(umax_fetch)
199
-
200
-GEN_ATOMIC_HELPER_ALL(xchg)
201
-
202
-#undef GEN_ATOMIC_HELPER_ALL
203
-#undef GEN_ATOMIC_HELPER
204
-
205
-Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
206
- Int128 cmpv, Int128 newv,
207
- MemOpIdx oi, uintptr_t retaddr);
208
-Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
209
- Int128 cmpv, Int128 newv,
210
- MemOpIdx oi, uintptr_t retaddr);
211
-
212
-Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
213
- MemOpIdx oi, uintptr_t retaddr);
214
-Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
215
- MemOpIdx oi, uintptr_t retaddr);
216
-void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
217
- MemOpIdx oi, uintptr_t retaddr);
218
-void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
219
- MemOpIdx oi, uintptr_t retaddr);
220
-
221
#ifdef CONFIG_DEBUG_TCG
222
void tcg_assert_listed_vecop(TCGOpcode);
223
#else
224
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/target/arm/helper-a64.c
227
+++ b/target/arm/helper-a64.c
228
@@ -XXX,XX +XXX,XX @@
229
#include "exec/cpu_ldst.h"
230
#include "qemu/int128.h"
231
#include "qemu/atomic128.h"
232
-#include "tcg/tcg.h"
233
#include "fpu/softfloat.h"
234
#include <zlib.h> /* For crc32 */
235
236
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/target/m68k/op_helper.c
239
+++ b/target/m68k/op_helper.c
240
@@ -XXX,XX +XXX,XX @@
241
#include "exec/exec-all.h"
242
#include "exec/cpu_ldst.h"
243
#include "semihosting/semihost.h"
244
-#include "tcg/tcg.h"
17
245
18
#if !defined(CONFIG_USER_ONLY)
246
#if !defined(CONFIG_USER_ONLY)
19
247
20
-/* Flags stored in the low bits of the TLB virtual address. These are
248
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
21
- * defined so that fast path ram access is all zeros.
249
index XXXXXXX..XXXXXXX 100644
22
+/*
250
--- a/target/ppc/mem_helper.c
23
+ * Flags stored in the low bits of the TLB virtual address.
251
+++ b/target/ppc/mem_helper.c
24
+ * These are defined so that fast path ram access is all zeros.
252
@@ -XXX,XX +XXX,XX @@
25
* The flags all must be between TARGET_PAGE_BITS and
253
#include "exec/helper-proto.h"
26
* maximum address alignment bit.
254
#include "helper_regs.h"
27
+ *
255
#include "exec/cpu_ldst.h"
28
+ * Use TARGET_PAGE_BITS_MIN so that these bits are constant
256
-#include "tcg/tcg.h"
29
+ * when TARGET_PAGE_BITS_VARY is in effect.
257
#include "internal.h"
30
*/
258
#include "qemu/atomic128.h"
31
/* Zero if TLB entry is valid. */
259
32
-#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
260
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
33
+#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
261
index XXXXXXX..XXXXXXX 100644
34
/* Set if TLB entry references a clean RAM page. The iotlb entry will
262
--- a/target/s390x/tcg/mem_helper.c
35
contain the page physical address. */
263
+++ b/target/s390x/tcg/mem_helper.c
36
-#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
264
@@ -XXX,XX +XXX,XX @@
37
+#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
265
#include "exec/cpu_ldst.h"
38
/* Set if TLB entry is an IO callback. */
266
#include "qemu/int128.h"
39
-#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
267
#include "qemu/atomic128.h"
40
+#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
268
-#include "tcg/tcg.h"
41
/* Set if TLB entry contains a watchpoint. */
269
#include "trace.h"
42
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS - 4))
270
43
+#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
271
#if !defined(CONFIG_USER_ONLY)
44
45
/* Use this mask to check interception with an alignment mask
46
* in a TCG backend.
47
--
272
--
48
2.17.1
273
2.25.1
49
274
50
275
diff view generated by jsdifflib
Deleted patch
1
Use this as a compile-time assert that a particular
2
code path is not reachable.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/qemu/compiler.h | 15 +++++++++++++++
8
1 file changed, 15 insertions(+)
9
10
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/compiler.h
13
+++ b/include/qemu/compiler.h
14
@@ -XXX,XX +XXX,XX @@
15
#define QEMU_GENERIC9(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC8(x, __VA_ARGS__))
16
#define QEMU_GENERIC10(x, a0, ...) QEMU_GENERIC_IF(x, a0, QEMU_GENERIC9(x, __VA_ARGS__))
17
18
+/**
19
+ * qemu_build_not_reached()
20
+ *
21
+ * The compiler, during optimization, is expected to prove that a call
22
+ * to this function cannot be reached and remove it. If the compiler
23
+ * supports QEMU_ERROR, this will be reported at compile time; otherwise
24
+ * this will be reported at link time due to the missing symbol.
25
+ */
26
+#ifdef __OPTIMIZE__
27
+extern void QEMU_NORETURN QEMU_ERROR("code path is reachable")
28
+ qemu_build_not_reached(void);
29
+#else
30
+#define qemu_build_not_reached() g_assert_not_reached()
31
+#endif
32
+
33
#endif /* COMPILER_H */
34
--
35
2.17.1
36
37
diff view generated by jsdifflib
1
Handle bswap on ram directly in load/store_helper. This fixes a
1
We should not have been using the helper_ret_* set of
2
bug with the previous implementation in that one cannot use the
2
functions, as they are supposed to be private to tcg.
3
I/O path for RAM.
3
Nor should we have been using the plain cpu_*_data set
4
of functions, as they do not handle unwinding properly.
4
5
5
Fixes: a26fc6f5152b47f1
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
include/exec/cpu-all.h | 4 ++-
9
target/mips/tcg/msa_helper.c | 420 +++++++++++------------------------
11
accel/tcg/cputlb.c | 72 +++++++++++++++++++++++++-----------------
10
1 file changed, 135 insertions(+), 285 deletions(-)
12
2 files changed, 46 insertions(+), 30 deletions(-)
13
11
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
12
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-all.h
14
--- a/target/mips/tcg/msa_helper.c
17
+++ b/include/exec/cpu-all.h
15
+++ b/target/mips/tcg/msa_helper.c
18
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
16
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
19
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
17
target_ulong addr)
20
/* Set if TLB entry contains a watchpoint. */
18
{
21
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
19
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
22
+/* Set if TLB entry requires byte swap. */
20
- MEMOP_IDX(DF_BYTE)
23
+#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
21
-#if !defined(CONFIG_USER_ONLY)
24
22
+ uintptr_t ra = GETPC();
25
/* Use this mask to check interception with an alignment mask
23
+
26
* in a TCG backend.
24
#if !defined(HOST_WORDS_BIGENDIAN)
27
*/
25
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
28
#define TLB_FLAGS_MASK \
26
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
29
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT)
27
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
30
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
28
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
31
29
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
32
/**
30
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
31
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
34
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
32
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
35
index XXXXXXX..XXXXXXX 100644
33
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
36
--- a/accel/tcg/cputlb.c
34
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
37
+++ b/accel/tcg/cputlb.c
35
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
38
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
36
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
39
address |= TLB_INVALID_MASK;
37
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
40
}
38
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
41
if (attrs.byte_swap) {
39
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
42
- /* Force the access through the I/O slow path. */
40
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
43
- address |= TLB_MMIO;
41
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
44
+ address |= TLB_BSWAP;
42
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
45
}
43
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
46
if (!memory_region_is_ram(section->mr) &&
44
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
47
!memory_region_is_romd(section->mr)) {
45
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
48
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
46
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
49
bool locked = false;
47
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
50
MemTxResult r;
48
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
51
49
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
52
- if (iotlbentry->attrs.byte_swap) {
50
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
53
- op ^= MO_BSWAP;
51
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
54
- }
52
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
55
-
53
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
56
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
54
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
57
mr = section->mr;
55
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
58
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
56
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
59
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
57
#else
60
bool locked = false;
58
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
61
MemTxResult r;
59
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
62
60
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
63
- if (iotlbentry->attrs.byte_swap) {
61
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
64
- op ^= MO_BSWAP;
62
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
65
- }
63
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
66
-
64
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
67
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
65
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
68
mr = section->mr;
66
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
69
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
67
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
70
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
68
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
71
wp_access, retaddr);
69
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
72
}
70
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
73
71
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
74
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO)) {
72
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
75
- /* I/O access */
73
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
76
+ /* Reject I/O access, or other required slow-path. */
74
-#endif
77
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
75
-#else
78
return NULL;
76
-#if !defined(HOST_WORDS_BIGENDIAN)
79
}
77
- pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
80
78
- pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
81
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
79
- pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
82
/* Handle anything that isn't just a straight memory access. */
80
- pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
83
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
81
- pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
84
CPUIOTLBEntry *iotlbentry;
82
- pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
85
+ bool need_swap;
83
- pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
86
84
- pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
87
/* For anything that is unaligned, recurse through full_load. */
85
- pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
88
if ((addr & (size - 1)) != 0) {
86
- pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
89
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
87
- pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
90
/* On watchpoint hit, this will longjmp out. */
88
- pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
91
cpu_check_watchpoint(env_cpu(env), addr, size,
89
- pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
92
iotlbentry->attrs, BP_MEM_READ, retaddr);
90
- pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
93
-
91
- pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
94
- /* The backing page may or may not require I/O. */
92
- pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
95
- tlb_addr &= ~TLB_WATCHPOINT;
93
-#else
96
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
94
- pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
97
- goto do_aligned_access;
95
- pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
98
- }
96
- pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
99
}
97
- pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
100
98
- pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
101
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
99
- pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
102
+
100
- pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
103
/* Handle I/O access. */
101
- pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
104
- return io_readx(env, iotlbentry, mmu_idx, addr,
102
- pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
105
- retaddr, access_type, op);
103
- pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
106
+ if (likely(tlb_addr & TLB_MMIO)) {
104
- pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
107
+ return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
105
- pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
108
+ access_type, op ^ (need_swap * MO_BSWAP));
106
- pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
109
+ }
107
- pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
110
+
108
- pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
111
+ haddr = (void *)((uintptr_t)addr + entry->addend);
109
- pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
112
+
110
-#endif
113
+ /*
111
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
114
+ * Keep these two load_memop separate to ensure that the compiler
112
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
115
+ * is able to fold the entire function to a single instruction.
113
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
116
+ * There is a build-time assert inside to remind you of this. ;-)
114
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
117
+ */
115
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
118
+ if (unlikely(need_swap)) {
116
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
119
+ return load_memop(haddr, op ^ MO_BSWAP);
117
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
120
+ }
118
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
121
+ return load_memop(haddr, op);
119
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
122
}
120
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
123
121
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
124
/* Handle slow unaligned access (it spans two pages or IO). */
122
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
125
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
123
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
126
return res & MAKE_64BIT_MASK(0, size * 8);
124
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
127
}
125
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
128
126
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
129
- do_aligned_access:
127
#endif
130
haddr = (void *)((uintptr_t)addr + entry->addend);
128
}
131
return load_memop(haddr, op);
129
132
}
130
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
133
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
131
target_ulong addr)
134
/* Handle anything that isn't just a straight memory access. */
132
{
135
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
133
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
136
CPUIOTLBEntry *iotlbentry;
134
- MEMOP_IDX(DF_HALF)
137
+ bool need_swap;
135
-#if !defined(CONFIG_USER_ONLY)
138
136
+ uintptr_t ra = GETPC();
139
/* For anything that is unaligned, recurse through byte stores. */
137
+
140
if ((addr & (size - 1)) != 0) {
138
#if !defined(HOST_WORDS_BIGENDIAN)
141
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
139
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
142
/* On watchpoint hit, this will longjmp out. */
140
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
143
cpu_check_watchpoint(env_cpu(env), addr, size,
141
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
144
iotlbentry->attrs, BP_MEM_WRITE, retaddr);
142
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
145
-
143
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
146
- /* The backing page may or may not require I/O. */
144
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
147
- tlb_addr &= ~TLB_WATCHPOINT;
145
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
148
- if ((tlb_addr & ~TARGET_PAGE_MASK) == 0) {
146
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
149
- goto do_aligned_access;
147
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
150
- }
148
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
151
}
149
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
152
150
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
153
+ need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
151
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
154
+
152
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
155
/* Handle I/O access. */
153
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
156
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, op);
154
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
157
+ if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
155
#else
158
+ io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
156
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
159
+ op ^ (need_swap * MO_BSWAP));
157
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
160
+ return;
158
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
161
+ }
159
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
162
+
160
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
163
+ haddr = (void *)((uintptr_t)addr + entry->addend);
161
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
164
+
162
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
165
+ /*
163
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
166
+ * Keep these two store_memop separate to ensure that the compiler
164
-#endif
167
+ * is able to fold the entire function to a single instruction.
165
-#else
168
+ * There is a build-time assert inside to remind you of this. ;-)
166
-#if !defined(HOST_WORDS_BIGENDIAN)
169
+ */
167
- pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
170
+ if (unlikely(need_swap)) {
168
- pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
171
+ store_memop(haddr, val, op ^ MO_BSWAP);
169
- pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
172
+ } else {
170
- pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
173
+ store_memop(haddr, val, op);
171
- pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
174
+ }
172
- pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
175
return;
173
- pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
176
}
174
- pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
177
175
-#else
178
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
176
- pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
179
return;
177
- pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
180
}
178
- pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
181
179
- pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
182
- do_aligned_access:
180
- pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
183
haddr = (void *)((uintptr_t)addr + entry->addend);
181
- pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
184
store_memop(haddr, val, op);
182
- pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
183
- pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
184
-#endif
185
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
186
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
187
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
188
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
189
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
190
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
191
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
192
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
193
#endif
194
}
195
196
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
197
target_ulong addr)
198
{
199
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
200
- MEMOP_IDX(DF_WORD)
201
-#if !defined(CONFIG_USER_ONLY)
202
+ uintptr_t ra = GETPC();
203
+
204
#if !defined(HOST_WORDS_BIGENDIAN)
205
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
206
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
207
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
208
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
209
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
210
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
211
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
212
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
213
#else
214
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
215
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
216
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
217
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
218
-#endif
219
-#else
220
-#if !defined(HOST_WORDS_BIGENDIAN)
221
- pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
222
- pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
223
- pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
224
- pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
225
-#else
226
- pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
227
- pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
228
- pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
229
- pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
230
-#endif
231
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
232
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
233
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
234
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
235
#endif
236
}
237
238
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
239
target_ulong addr)
240
{
241
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
242
- MEMOP_IDX(DF_DOUBLE)
243
-#if !defined(CONFIG_USER_ONLY)
244
- pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
245
- pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
246
-#else
247
- pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
248
- pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
249
-#endif
250
+ uintptr_t ra = GETPC();
251
+
252
+ pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
253
+ pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
254
}
255
256
#define MSA_PAGESPAN(x) \
257
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
258
{
259
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
260
int mmu_idx = cpu_mmu_index(env, false);
261
+ uintptr_t ra = GETPC();
262
+
263
+ ensure_writable_pages(env, addr, mmu_idx, ra);
264
265
- MEMOP_IDX(DF_BYTE)
266
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
267
-#if !defined(CONFIG_USER_ONLY)
268
#if !defined(HOST_WORDS_BIGENDIAN)
269
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC());
270
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC());
271
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC());
272
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC());
273
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC());
274
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC());
275
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC());
276
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC());
277
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC());
278
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC());
279
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
280
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
281
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
282
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
283
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
284
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
285
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
286
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
287
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
288
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
289
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
290
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
291
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
292
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
293
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
294
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
295
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
296
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
297
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
298
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
299
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
300
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
301
#else
302
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC());
303
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC());
304
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC());
305
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC());
306
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC());
307
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC());
308
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC());
309
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC());
310
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC());
311
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC());
312
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
313
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
314
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
315
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
316
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC());
317
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC());
318
-#endif
319
-#else
320
-#if !defined(HOST_WORDS_BIGENDIAN)
321
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]);
322
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]);
323
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]);
324
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]);
325
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]);
326
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]);
327
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]);
328
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]);
329
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]);
330
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]);
331
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
332
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
333
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
334
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
335
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
336
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
337
-#else
338
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]);
339
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]);
340
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]);
341
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]);
342
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]);
343
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]);
344
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]);
345
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]);
346
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
347
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
348
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
349
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
350
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
351
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
352
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]);
353
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]);
354
-#endif
355
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
356
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
357
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
358
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
359
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
360
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
361
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
362
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
363
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
364
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
365
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
366
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
367
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
368
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
369
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
370
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
371
#endif
372
}
373
374
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
375
{
376
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
377
int mmu_idx = cpu_mmu_index(env, false);
378
+ uintptr_t ra = GETPC();
379
+
380
+ ensure_writable_pages(env, addr, mmu_idx, ra);
381
382
- MEMOP_IDX(DF_HALF)
383
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
384
-#if !defined(CONFIG_USER_ONLY)
385
#if !defined(HOST_WORDS_BIGENDIAN)
386
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
387
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
388
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
389
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
390
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
391
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
392
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
393
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
394
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
395
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
396
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
397
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
398
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
399
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
400
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
401
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
402
#else
403
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
404
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
405
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
406
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
407
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
408
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
409
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
410
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
411
-#endif
412
-#else
413
-#if !defined(HOST_WORDS_BIGENDIAN)
414
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
415
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
416
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
417
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
418
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
419
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
420
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
421
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
422
-#else
423
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
424
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
425
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
426
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
427
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
428
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
429
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
430
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
431
-#endif
432
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
433
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
434
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
435
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
436
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
437
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
438
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
439
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
440
#endif
441
}
442
443
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
444
{
445
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
446
int mmu_idx = cpu_mmu_index(env, false);
447
+ uintptr_t ra = GETPC();
448
+
449
+ ensure_writable_pages(env, addr, mmu_idx, ra);
450
451
- MEMOP_IDX(DF_WORD)
452
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
453
-#if !defined(CONFIG_USER_ONLY)
454
#if !defined(HOST_WORDS_BIGENDIAN)
455
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
456
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
457
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
458
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
459
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
460
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
461
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
462
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
463
#else
464
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
465
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
466
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
467
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
468
-#endif
469
-#else
470
-#if !defined(HOST_WORDS_BIGENDIAN)
471
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
472
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
473
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
474
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
475
-#else
476
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
477
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
478
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
479
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
480
-#endif
481
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
482
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
483
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
484
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
485
#endif
486
}
487
488
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
489
{
490
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
491
int mmu_idx = cpu_mmu_index(env, false);
492
+ uintptr_t ra = GETPC();
493
494
- MEMOP_IDX(DF_DOUBLE)
495
ensure_writable_pages(env, addr, mmu_idx, GETPC());
496
-#if !defined(CONFIG_USER_ONLY)
497
- helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
498
- helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
499
-#else
500
- cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
501
- cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
502
-#endif
503
+
504
+ cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
505
+ cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
185
}
506
}
186
--
507
--
187
2.17.1
508
2.25.1
188
509
189
510
diff view generated by jsdifflib
1
We will shortly be using these more than once.
1
Rather than use 4-16 separate operations, use 2 operations
2
2
plus some byte reordering as necessary.
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
4
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/cputlb.c | 107 +++++++++++++++++++++++----------------------
7
target/mips/tcg/msa_helper.c | 201 +++++++++++++----------------------
8
1 file changed, 55 insertions(+), 52 deletions(-)
8
1 file changed, 71 insertions(+), 130 deletions(-)
9
9
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
12
--- a/target/mips/tcg/msa_helper.c
13
+++ b/accel/tcg/cputlb.c
13
+++ b/target/mips/tcg/msa_helper.c
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
14
@@ -XXX,XX +XXX,XX @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
15
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
15
#define MEMOP_IDX(DF)
16
TCGMemOpIdx oi, uintptr_t retaddr);
16
#endif
17
17
18
+static inline uint64_t QEMU_ALWAYS_INLINE
18
+#ifdef TARGET_WORDS_BIGENDIAN
19
+load_memop(const void *haddr, MemOp op)
19
+static inline uint64_t bswap16x4(uint64_t x)
20
+{
20
+{
21
+ switch (op) {
21
+ uint64_t m = 0x00ff00ff00ff00ffull;
22
+ case MO_UB:
22
+ return ((x & m) << 8) | ((x >> 8) & m);
23
+ return ldub_p(haddr);
24
+ case MO_BEUW:
25
+ return lduw_be_p(haddr);
26
+ case MO_LEUW:
27
+ return lduw_le_p(haddr);
28
+ case MO_BEUL:
29
+ return (uint32_t)ldl_be_p(haddr);
30
+ case MO_LEUL:
31
+ return (uint32_t)ldl_le_p(haddr);
32
+ case MO_BEQ:
33
+ return ldq_be_p(haddr);
34
+ case MO_LEQ:
35
+ return ldq_le_p(haddr);
36
+ default:
37
+ qemu_build_not_reached();
38
+ }
39
+}
23
+}
40
+
24
+
41
static inline uint64_t QEMU_ALWAYS_INLINE
25
+static inline uint64_t bswap32x2(uint64_t x)
42
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
43
uintptr_t retaddr, MemOp op, bool code_read,
44
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
45
46
do_aligned_access:
47
haddr = (void *)((uintptr_t)addr + entry->addend);
48
- switch (op) {
49
- case MO_UB:
50
- res = ldub_p(haddr);
51
- break;
52
- case MO_BEUW:
53
- res = lduw_be_p(haddr);
54
- break;
55
- case MO_LEUW:
56
- res = lduw_le_p(haddr);
57
- break;
58
- case MO_BEUL:
59
- res = (uint32_t)ldl_be_p(haddr);
60
- break;
61
- case MO_LEUL:
62
- res = (uint32_t)ldl_le_p(haddr);
63
- break;
64
- case MO_BEQ:
65
- res = ldq_be_p(haddr);
66
- break;
67
- case MO_LEQ:
68
- res = ldq_le_p(haddr);
69
- break;
70
- default:
71
- qemu_build_not_reached();
72
- }
73
-
74
- return res;
75
+ return load_memop(haddr, op);
76
}
77
78
/*
79
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
80
* Store Helpers
81
*/
82
83
+static inline void QEMU_ALWAYS_INLINE
84
+store_memop(void *haddr, uint64_t val, MemOp op)
85
+{
26
+{
86
+ switch (op) {
27
+ return ror64(bswap64(x), 32);
87
+ case MO_UB:
88
+ stb_p(haddr, val);
89
+ break;
90
+ case MO_BEUW:
91
+ stw_be_p(haddr, val);
92
+ break;
93
+ case MO_LEUW:
94
+ stw_le_p(haddr, val);
95
+ break;
96
+ case MO_BEUL:
97
+ stl_be_p(haddr, val);
98
+ break;
99
+ case MO_LEUL:
100
+ stl_le_p(haddr, val);
101
+ break;
102
+ case MO_BEQ:
103
+ stq_be_p(haddr, val);
104
+ break;
105
+ case MO_LEQ:
106
+ stq_le_p(haddr, val);
107
+ break;
108
+ default:
109
+ qemu_build_not_reached();
110
+ }
111
+}
28
+}
29
+#endif
112
+
30
+
113
static inline void QEMU_ALWAYS_INLINE
31
void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
114
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
32
target_ulong addr)
115
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
33
{
116
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
34
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
117
35
uintptr_t ra = GETPC();
118
do_aligned_access:
36
+ uint64_t d0, d1;
119
haddr = (void *)((uintptr_t)addr + entry->addend);
37
120
- switch (op) {
38
-#if !defined(HOST_WORDS_BIGENDIAN)
121
- case MO_UB:
39
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
122
- stb_p(haddr, val);
40
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
123
- break;
41
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
124
- case MO_BEUW:
42
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
125
- stw_be_p(haddr, val);
43
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
126
- break;
44
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
127
- case MO_LEUW:
45
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
128
- stw_le_p(haddr, val);
46
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
129
- break;
47
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
130
- case MO_BEUL:
48
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
131
- stl_be_p(haddr, val);
49
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
132
- break;
50
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
133
- case MO_LEUL:
51
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
134
- stl_le_p(haddr, val);
52
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
135
- break;
53
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
136
- case MO_BEQ:
54
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
137
- stq_be_p(haddr, val);
55
-#else
138
- break;
56
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
139
- case MO_LEQ:
57
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
140
- stq_le_p(haddr, val);
58
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
141
- break;
59
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
142
- default:
60
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
143
- qemu_build_not_reached();
61
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
144
- }
62
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
145
+ store_memop(haddr, val, op);
63
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
146
}
64
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
147
65
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
148
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
66
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
67
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
68
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
69
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
70
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
71
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
72
-#endif
73
+ /* Load 8 bytes at a time. Vector element ordering makes this LE. */
74
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
75
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
76
+ pwd->d[0] = d0;
77
+ pwd->d[1] = d1;
78
}
79
80
void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
81
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
82
{
83
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
84
uintptr_t ra = GETPC();
85
+ uint64_t d0, d1;
86
87
-#if !defined(HOST_WORDS_BIGENDIAN)
88
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
89
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
90
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
91
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
92
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
93
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
94
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
95
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
96
-#else
97
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
98
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
99
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
100
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
101
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
102
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
103
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
104
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
105
+ /*
106
+ * Load 8 bytes at a time. Use little-endian load, then for
107
+ * big-endian target, we must then swap the four halfwords.
108
+ */
109
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
110
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
111
+#ifdef TARGET_WORDS_BIGENDIAN
112
+ d0 = bswap16x4(d0);
113
+ d1 = bswap16x4(d1);
114
#endif
115
+ pwd->d[0] = d0;
116
+ pwd->d[1] = d1;
117
}
118
119
void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
120
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
121
{
122
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
123
uintptr_t ra = GETPC();
124
+ uint64_t d0, d1;
125
126
-#if !defined(HOST_WORDS_BIGENDIAN)
127
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
128
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
129
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
130
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
131
-#else
132
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
133
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
134
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
135
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
136
+ /*
137
+ * Load 8 bytes at a time. Use little-endian load, then for
138
+ * big-endian target, we must then bswap the two words.
139
+ */
140
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
141
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
142
+#ifdef TARGET_WORDS_BIGENDIAN
143
+ d0 = bswap32x2(d0);
144
+ d1 = bswap32x2(d1);
145
#endif
146
+ pwd->d[0] = d0;
147
+ pwd->d[1] = d1;
148
}
149
150
void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
151
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
152
{
153
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
154
uintptr_t ra = GETPC();
155
+ uint64_t d0, d1;
156
157
- pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
158
- pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
159
+ d0 = cpu_ldq_data_ra(env, addr + 0, ra);
160
+ d1 = cpu_ldq_data_ra(env, addr + 8, ra);
161
+ pwd->d[0] = d0;
162
+ pwd->d[1] = d1;
163
}
164
165
#define MSA_PAGESPAN(x) \
166
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
167
168
ensure_writable_pages(env, addr, mmu_idx, ra);
169
170
-#if !defined(HOST_WORDS_BIGENDIAN)
171
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
172
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
173
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
174
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
175
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
176
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
177
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
178
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
179
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
180
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
181
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
182
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
183
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
184
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
185
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
186
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
187
-#else
188
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
189
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
190
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
191
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
192
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
193
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
194
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
195
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
196
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
197
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
198
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
199
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
200
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
201
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
202
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
203
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
204
-#endif
205
+ /* Store 8 bytes at a time. Vector element ordering makes this LE. */
206
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
207
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
208
}
209
210
void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
211
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
212
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
213
int mmu_idx = cpu_mmu_index(env, false);
214
uintptr_t ra = GETPC();
215
+ uint64_t d0, d1;
216
217
ensure_writable_pages(env, addr, mmu_idx, ra);
218
219
-#if !defined(HOST_WORDS_BIGENDIAN)
220
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
221
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
222
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
223
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
224
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
225
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
226
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
227
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
228
-#else
229
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
230
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
231
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
232
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
233
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
234
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
235
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
236
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
237
+ /* Store 8 bytes at a time. See helper_msa_ld_h. */
238
+ d0 = pwd->d[0];
239
+ d1 = pwd->d[1];
240
+#ifdef TARGET_WORDS_BIGENDIAN
241
+ d0 = bswap16x4(d0);
242
+ d1 = bswap16x4(d1);
243
#endif
244
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
245
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
246
}
247
248
void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
249
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
250
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
251
int mmu_idx = cpu_mmu_index(env, false);
252
uintptr_t ra = GETPC();
253
+ uint64_t d0, d1;
254
255
ensure_writable_pages(env, addr, mmu_idx, ra);
256
257
-#if !defined(HOST_WORDS_BIGENDIAN)
258
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
259
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
260
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
261
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
262
-#else
263
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
264
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
265
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
266
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
267
+ /* Store 8 bytes at a time. See helper_msa_ld_w. */
268
+ d0 = pwd->d[0];
269
+ d1 = pwd->d[1];
270
+#ifdef TARGET_WORDS_BIGENDIAN
271
+ d0 = bswap32x2(d0);
272
+ d1 = bswap32x2(d1);
273
#endif
274
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
275
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
276
}
277
278
void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
279
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
280
281
ensure_writable_pages(env, addr, mmu_idx, GETPC());
282
283
- cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
284
- cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
285
+ cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
286
+ cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
287
}
149
--
288
--
150
2.17.1
289
2.25.1
151
290
152
291
diff view generated by jsdifflib
1
It does not require going through the whole I/O path
1
The helper_*_mmu functions were the only thing available
2
in order to discard a write.
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
3
5
4
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
9
---
7
include/exec/cpu-all.h | 5 ++++-
10
target/s390x/tcg/mem_helper.c | 8 ++++----
8
include/exec/cpu-common.h | 1 -
11
1 file changed, 4 insertions(+), 4 deletions(-)
9
accel/tcg/cputlb.c | 36 ++++++++++++++++++++--------------
10
exec.c | 41 +--------------------------------------
11
4 files changed, 26 insertions(+), 57 deletions(-)
12
12
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
13
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
15
--- a/target/s390x/tcg/mem_helper.c
16
+++ b/include/exec/cpu-all.h
16
+++ b/target/s390x/tcg/mem_helper.c
17
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
17
@@ -XXX,XX +XXX,XX @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
18
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
18
* page. This is especially relevant to speed up TLB_NOTDIRTY.
19
/* Set if TLB entry requires byte swap. */
19
*/
20
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
20
g_assert(size > 0);
21
+/* Set if TLB entry writes ignored. */
21
- helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
22
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
22
+ cpu_stb_mmu(env, vaddr, byte, oi, ra);
23
23
haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
24
/* Use this mask to check interception with an alignment mask
24
if (likely(haddr)) {
25
* in a TCG backend.
25
memset(haddr + 1, byte, size - 1);
26
*/
26
} else {
27
#define TLB_FLAGS_MASK \
27
for (i = 1; i < size; i++) {
28
- (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP)
28
- helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
29
+ (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
29
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
30
+ | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
30
}
31
31
}
32
/**
33
* tlb_hit_page: return true if page aligned @addr is a hit against the
34
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/exec/cpu-common.h
37
+++ b/include/exec/cpu-common.h
38
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
39
40
void cpu_flush_icache_range(hwaddr start, hwaddr len);
41
42
-extern struct MemoryRegion io_mem_rom;
43
extern struct MemoryRegion io_mem_notdirty;
44
45
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
46
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/cputlb.c
49
+++ b/accel/tcg/cputlb.c
50
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
51
{
52
uintptr_t addr = tlb_entry->addr_write;
53
54
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
55
+ if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
56
+ TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
57
addr &= TARGET_PAGE_MASK;
58
addr += tlb_entry->addend;
59
if ((addr - start) < length) {
60
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
61
address |= TLB_MMIO;
62
addend = 0;
63
} else {
64
- /* TLB_MMIO for rom/romd handled below */
65
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
66
}
32
}
67
33
@@ -XXX,XX +XXX,XX @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
68
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
34
* Do a single access and test if we can then get access to the
69
35
* page. This is especially relevant to speed up TLB_NOTDIRTY.
70
tn.addr_write = -1;
36
*/
71
if (prot & PAGE_WRITE) {
37
- byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
72
- if ((memory_region_is_ram(section->mr) && section->readonly)
38
+ byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
73
- || memory_region_is_romd(section->mr)) {
39
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
74
- /* Write access calls the I/O callback. */
40
return byte;
75
- tn.addr_write = address | TLB_MMIO;
76
- } else if (memory_region_is_ram(section->mr)
77
- && cpu_physical_memory_is_clean(
78
- memory_region_get_ram_addr(section->mr) + xlat)) {
79
- tn.addr_write = address | TLB_NOTDIRTY;
80
- } else {
81
- tn.addr_write = address;
82
+ tn.addr_write = address;
83
+ if (memory_region_is_romd(section->mr)) {
84
+ /* Use the MMIO path so that the device can switch states. */
85
+ tn.addr_write |= TLB_MMIO;
86
+ } else if (memory_region_is_ram(section->mr)) {
87
+ if (section->readonly) {
88
+ tn.addr_write |= TLB_DISCARD_WRITE;
89
+ } else if (cpu_physical_memory_is_clean(
90
+ memory_region_get_ram_addr(section->mr) + xlat)) {
91
+ tn.addr_write |= TLB_NOTDIRTY;
92
+ }
93
}
94
if (prot & PAGE_WRITE_INV) {
95
tn.addr_write |= TLB_INVALID_MASK;
96
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
97
mr = section->mr;
98
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
99
cpu->mem_io_pc = retaddr;
100
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
101
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
102
cpu_io_recompile(cpu, retaddr);
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
106
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
107
mr = section->mr;
108
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
109
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
110
+ if (mr != &io_mem_notdirty && !cpu->can_do_io) {
111
cpu_io_recompile(cpu, retaddr);
112
}
113
cpu->mem_io_vaddr = addr;
114
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
115
}
116
117
/* Reject I/O access, or other required slow-path. */
118
- if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) {
119
+ if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
120
return NULL;
121
}
122
123
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
124
return;
125
}
126
127
+ /* Ignore writes to ROM. */
128
+ if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
129
+ return;
130
+ }
131
+
132
haddr = (void *)((uintptr_t)addr + entry->addend);
133
134
/*
135
diff --git a/exec.c b/exec.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/exec.c
138
+++ b/exec.c
139
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
140
AddressSpace address_space_io;
141
AddressSpace address_space_memory;
142
143
-MemoryRegion io_mem_rom, io_mem_notdirty;
144
+MemoryRegion io_mem_notdirty;
145
static MemoryRegion io_mem_unassigned;
146
#endif
41
#endif
147
42
@@ -XXX,XX +XXX,XX @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
148
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
43
* Do a single access and test if we can then get access to the
149
44
* page. This is especially relevant to speed up TLB_NOTDIRTY.
150
#define PHYS_SECTION_UNASSIGNED 0
45
*/
151
#define PHYS_SECTION_NOTDIRTY 1
46
- helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
152
-#define PHYS_SECTION_ROM 2
47
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
153
48
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
154
static void io_mem_init(void);
49
#endif
155
static void memory_map_init(void);
156
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
157
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
158
if (!section->readonly) {
159
iotlb |= PHYS_SECTION_NOTDIRTY;
160
- } else {
161
- iotlb |= PHYS_SECTION_ROM;
162
}
163
} else {
164
AddressSpaceDispatch *d;
165
@@ -XXX,XX +XXX,XX @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
166
return phys_section_add(map, &section);
167
}
50
}
168
169
-static void readonly_mem_write(void *opaque, hwaddr addr,
170
- uint64_t val, unsigned size)
171
-{
172
- /* Ignore any write to ROM. */
173
-}
174
-
175
-static bool readonly_mem_accepts(void *opaque, hwaddr addr,
176
- unsigned size, bool is_write,
177
- MemTxAttrs attrs)
178
-{
179
- return is_write;
180
-}
181
-
182
-/* This will only be used for writes, because reads are special cased
183
- * to directly access the underlying host ram.
184
- */
185
-static const MemoryRegionOps readonly_mem_ops = {
186
- .write = readonly_mem_write,
187
- .valid.accepts = readonly_mem_accepts,
188
- .endianness = DEVICE_NATIVE_ENDIAN,
189
- .valid = {
190
- .min_access_size = 1,
191
- .max_access_size = 8,
192
- .unaligned = false,
193
- },
194
- .impl = {
195
- .min_access_size = 1,
196
- .max_access_size = 8,
197
- .unaligned = false,
198
- },
199
-};
200
-
201
MemoryRegionSection *iotlb_to_section(CPUState *cpu,
202
hwaddr index, MemTxAttrs attrs)
203
{
204
@@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
205
206
static void io_mem_init(void)
207
{
208
- memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
209
- NULL, NULL, UINT64_MAX);
210
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
211
NULL, UINT64_MAX);
212
213
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
214
assert(n == PHYS_SECTION_UNASSIGNED);
215
n = dummy_section(&d->map, fv, &io_mem_notdirty);
216
assert(n == PHYS_SECTION_NOTDIRTY);
217
- n = dummy_section(&d->map, fv, &io_mem_rom);
218
- assert(n == PHYS_SECTION_ROM);
219
220
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
221
222
--
51
--
223
2.17.1
52
2.25.1
224
53
225
54
diff view generated by jsdifflib
1
Pages that we want to track for NOTDIRTY are RAM. We do not
1
The helper_*_mmu functions were the only thing available
2
really need to go through the I/O path to handle them.
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
3
5
4
Acked-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
---
9
include/exec/cpu-common.h | 2 --
10
target/sparc/ldst_helper.c | 14 +++++++-------
10
accel/tcg/cputlb.c | 26 +++++++++++++++++---
11
1 file changed, 7 insertions(+), 7 deletions(-)
11
exec.c | 50 ---------------------------------------
12
memory.c | 16 -------------
13
4 files changed, 23 insertions(+), 71 deletions(-)
14
12
15
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
13
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu-common.h
15
--- a/target/sparc/ldst_helper.c
18
+++ b/include/exec/cpu-common.h
16
+++ b/target/sparc/ldst_helper.c
19
@@ -XXX,XX +XXX,XX @@ void qemu_flush_coalesced_mmio_buffer(void);
17
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
20
18
oi = make_memop_idx(memop, idx);
21
void cpu_flush_icache_range(hwaddr start, hwaddr len);
19
switch (size) {
22
20
case 1:
23
-extern struct MemoryRegion io_mem_notdirty;
21
- ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
24
-
22
+ ret = cpu_ldb_mmu(env, addr, oi, GETPC());
25
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
23
break;
26
24
case 2:
27
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
25
if (asi & 8) {
28
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
26
- ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
29
index XXXXXXX..XXXXXXX 100644
27
+ ret = cpu_ldw_le_mmu(env, addr, oi, GETPC());
30
--- a/accel/tcg/cputlb.c
28
} else {
31
+++ b/accel/tcg/cputlb.c
29
- ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
32
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
30
+ ret = cpu_ldw_be_mmu(env, addr, oi, GETPC());
33
mr = section->mr;
31
}
34
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
32
break;
35
cpu->mem_io_pc = retaddr;
33
case 4:
36
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
34
if (asi & 8) {
37
+ if (!cpu->can_do_io) {
35
- ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
38
cpu_io_recompile(cpu, retaddr);
36
+ ret = cpu_ldl_le_mmu(env, addr, oi, GETPC());
39
}
37
} else {
40
38
- ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
41
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
39
+ ret = cpu_ldl_be_mmu(env, addr, oi, GETPC());
42
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
40
}
43
mr = section->mr;
41
break;
44
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
42
case 8:
45
- if (mr != &io_mem_notdirty && !cpu->can_do_io) {
43
if (asi & 8) {
46
+ if (!cpu->can_do_io) {
44
- ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
47
cpu_io_recompile(cpu, retaddr);
45
+ ret = cpu_ldq_le_mmu(env, addr, oi, GETPC());
48
}
46
} else {
49
cpu->mem_io_vaddr = addr;
47
- ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
50
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
48
+ ret = cpu_ldq_be_mmu(env, addr, oi, GETPC());
51
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
49
}
52
50
break;
53
/* Handle I/O access. */
51
default:
54
- if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) {
55
+ if (tlb_addr & TLB_MMIO) {
56
io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
57
op ^ (need_swap * MO_BSWAP));
58
return;
59
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
60
61
haddr = (void *)((uintptr_t)addr + entry->addend);
62
63
+ /* Handle clean RAM pages. */
64
+ if (tlb_addr & TLB_NOTDIRTY) {
65
+ NotDirtyInfo ndi;
66
+
67
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
68
+ env_cpu(env)->mem_io_pc = retaddr;
69
+
70
+ memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
71
+ addr + iotlbentry->addr, size);
72
+
73
+ if (unlikely(need_swap)) {
74
+ store_memop(haddr, val, op ^ MO_BSWAP);
75
+ } else {
76
+ store_memop(haddr, val, op);
77
+ }
78
+
79
+ memory_notdirty_write_complete(&ndi);
80
+ return;
81
+ }
82
+
83
/*
84
* Keep these two store_memop separate to ensure that the compiler
85
* is able to fold the entire function to a single instruction.
86
diff --git a/exec.c b/exec.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/exec.c
89
+++ b/exec.c
90
@@ -XXX,XX +XXX,XX @@ static MemoryRegion *system_io;
91
AddressSpace address_space_io;
92
AddressSpace address_space_memory;
93
94
-MemoryRegion io_mem_notdirty;
95
static MemoryRegion io_mem_unassigned;
96
#endif
97
98
@@ -XXX,XX +XXX,XX @@ typedef struct subpage_t {
99
} subpage_t;
100
101
#define PHYS_SECTION_UNASSIGNED 0
102
-#define PHYS_SECTION_NOTDIRTY 1
103
104
static void io_mem_init(void);
105
static void memory_map_init(void);
106
@@ -XXX,XX +XXX,XX @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
107
if (memory_region_is_ram(section->mr)) {
108
/* Normal RAM. */
109
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
110
- if (!section->readonly) {
111
- iotlb |= PHYS_SECTION_NOTDIRTY;
112
- }
113
} else {
114
AddressSpaceDispatch *d;
115
116
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
117
}
118
}
119
120
-/* Called within RCU critical section. */
121
-static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
122
- uint64_t val, unsigned size)
123
-{
124
- NotDirtyInfo ndi;
125
-
126
- memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
127
- ram_addr, size);
128
-
129
- stn_p(qemu_map_ram_ptr(NULL, ram_addr), size, val);
130
- memory_notdirty_write_complete(&ndi);
131
-}
132
-
133
-static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
134
- unsigned size, bool is_write,
135
- MemTxAttrs attrs)
136
-{
137
- return is_write;
138
-}
139
-
140
-static const MemoryRegionOps notdirty_mem_ops = {
141
- .write = notdirty_mem_write,
142
- .valid.accepts = notdirty_mem_accepts,
143
- .endianness = DEVICE_NATIVE_ENDIAN,
144
- .valid = {
145
- .min_access_size = 1,
146
- .max_access_size = 8,
147
- .unaligned = false,
148
- },
149
- .impl = {
150
- .min_access_size = 1,
151
- .max_access_size = 8,
152
- .unaligned = false,
153
- },
154
-};
155
-
156
/* Generate a debug exception if a watchpoint has been hit. */
157
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
158
MemTxAttrs attrs, int flags, uintptr_t ra)
159
@@ -XXX,XX +XXX,XX @@ static void io_mem_init(void)
160
{
161
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
162
NULL, UINT64_MAX);
163
-
164
- /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
165
- * which can be called without the iothread mutex.
166
- */
167
- memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
168
- NULL, UINT64_MAX);
169
- memory_region_clear_global_locking(&io_mem_notdirty);
170
}
171
172
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
173
@@ -XXX,XX +XXX,XX @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
174
175
n = dummy_section(&d->map, fv, &io_mem_unassigned);
176
assert(n == PHYS_SECTION_UNASSIGNED);
177
- n = dummy_section(&d->map, fv, &io_mem_notdirty);
178
- assert(n == PHYS_SECTION_NOTDIRTY);
179
180
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
181
182
diff --git a/memory.c b/memory.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/memory.c
185
+++ b/memory.c
186
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
187
tmp = mr->ops->read(mr->opaque, addr, size);
188
if (mr->subpage) {
189
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
190
- } else if (mr == &io_mem_notdirty) {
191
- /* Accesses to code which has previously been translated into a TB show
192
- * up in the MMIO path, as accesses to the io_mem_notdirty
193
- * MemoryRegion. */
194
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
195
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
196
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
197
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
198
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
199
if (mr->subpage) {
200
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
201
- } else if (mr == &io_mem_notdirty) {
202
- /* Accesses to code which has previously been translated into a TB show
203
- * up in the MMIO path, as accesses to the io_mem_notdirty
204
- * MemoryRegion. */
205
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
206
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
207
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
208
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
209
210
if (mr->subpage) {
211
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
212
- } else if (mr == &io_mem_notdirty) {
213
- /* Accesses to code which has previously been translated into a TB show
214
- * up in the MMIO path, as accesses to the io_mem_notdirty
215
- * MemoryRegion. */
216
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
217
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
218
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
219
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
220
221
if (mr->subpage) {
222
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
223
- } else if (mr == &io_mem_notdirty) {
224
- /* Accesses to code which has previously been translated into a TB show
225
- * up in the MMIO path, as accesses to the io_mem_notdirty
226
- * MemoryRegion. */
227
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
228
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
229
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
230
--
52
--
231
2.17.1
53
2.25.1
232
54
233
55
diff view generated by jsdifflib
1
The memory_region_tb_read tracepoint is unreachable, since notdirty
1
The helper_*_mmu functions were the only thing available
2
is supposed to apply only to writes. The memory_region_tb_write
2
when this code was written. This could have been adjusted
3
tracepoint is mis-named, because notdirty is not only used for TB
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
invalidation. It is also used for e.g. VGA RAM updates and migration.
4
use the newest set of interfaces.
5
5
6
Replace memory_region_tb_write with memory_notdirty_write_access,
6
Cc: qemu-arm@nongnu.org
7
and place it in memory_notdirty_write_prepare where it can catch
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
all of the instances. Add memory_notdirty_set_dirty to log when
9
we no longer intercept writes to a page.
10
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Reviewed-by: David Hildenbrand <david@redhat.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
9
---
16
exec.c | 3 +++
10
target/arm/helper-a64.c | 52 +++++++----------------------------------
17
memory.c | 4 ----
11
target/arm/m_helper.c | 6 ++---
18
trace-events | 4 ++--
12
2 files changed, 11 insertions(+), 47 deletions(-)
19
3 files changed, 5 insertions(+), 6 deletions(-)
20
13
21
diff --git a/exec.c b/exec.c
14
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
22
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
23
--- a/exec.c
16
--- a/target/arm/helper-a64.c
24
+++ b/exec.c
17
+++ b/target/arm/helper-a64.c
25
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
18
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
26
ndi->size = size;
19
uintptr_t ra = GETPC();
27
ndi->pages = NULL;
20
uint64_t o0, o1;
28
21
bool success;
29
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
22
-
30
+
23
-#ifdef CONFIG_USER_ONLY
31
assert(tcg_enabled());
24
- /* ??? Enforce alignment. */
32
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
25
- uint64_t *haddr = g2h(env_cpu(env), addr);
33
ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
26
-
34
@@ -XXX,XX +XXX,XX @@ void memory_notdirty_write_complete(NotDirtyInfo *ndi)
27
- set_helper_retaddr(ra);
35
/* we remove the notdirty callback only if the code has been
28
- o0 = ldq_le_p(haddr + 0);
36
flushed */
29
- o1 = ldq_le_p(haddr + 1);
37
if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
30
- oldv = int128_make128(o0, o1);
38
+ trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
31
-
39
tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
32
- success = int128_eq(oldv, cmpv);
33
- if (success) {
34
- stq_le_p(haddr + 0, int128_getlo(newv));
35
- stq_le_p(haddr + 1, int128_gethi(newv));
36
- }
37
- clear_helper_retaddr();
38
-#else
39
int mem_idx = cpu_mmu_index(env, false);
40
MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
41
MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
42
43
- o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
44
- o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
45
+ o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
46
+ o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
47
oldv = int128_make128(o0, o1);
48
49
success = int128_eq(oldv, cmpv);
50
if (success) {
51
- helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
52
- helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
53
+ cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
54
+ cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
40
}
55
}
56
-#endif
57
58
return !success;
41
}
59
}
42
diff --git a/memory.c b/memory.c
60
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
61
uintptr_t ra = GETPC();
62
uint64_t o0, o1;
63
bool success;
64
-
65
-#ifdef CONFIG_USER_ONLY
66
- /* ??? Enforce alignment. */
67
- uint64_t *haddr = g2h(env_cpu(env), addr);
68
-
69
- set_helper_retaddr(ra);
70
- o1 = ldq_be_p(haddr + 0);
71
- o0 = ldq_be_p(haddr + 1);
72
- oldv = int128_make128(o0, o1);
73
-
74
- success = int128_eq(oldv, cmpv);
75
- if (success) {
76
- stq_be_p(haddr + 0, int128_gethi(newv));
77
- stq_be_p(haddr + 1, int128_getlo(newv));
78
- }
79
- clear_helper_retaddr();
80
-#else
81
int mem_idx = cpu_mmu_index(env, false);
82
MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
83
MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
84
85
- o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
86
- o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
87
+ o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
88
+ o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
89
oldv = int128_make128(o0, o1);
90
91
success = int128_eq(oldv, cmpv);
92
if (success) {
93
- helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
94
- helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
95
+ cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
96
+ cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
97
}
98
-#endif
99
100
return !success;
101
}
102
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
43
index XXXXXXX..XXXXXXX 100644
103
index XXXXXXX..XXXXXXX 100644
44
--- a/memory.c
104
--- a/target/arm/m_helper.c
45
+++ b/memory.c
105
+++ b/target/arm/m_helper.c
46
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
106
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
47
/* Accesses to code which has previously been translated into a TB show
107
* do them as secure, so work out what MMU index that is.
48
* up in the MMIO path, as accesses to the io_mem_notdirty
108
*/
49
* MemoryRegion. */
109
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
50
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
110
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
51
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
111
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
52
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
112
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
53
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
113
+ oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
54
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
114
+ newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
55
/* Accesses to code which has previously been translated into a TB show
115
+ newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
56
* up in the MMIO path, as accesses to the io_mem_notdirty
116
57
* MemoryRegion. */
117
/* Consistency checks on new IPSR */
58
- trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
118
newpsr_exc = newpsr & XPSR_EXCP;
59
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
60
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
61
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
62
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
63
/* Accesses to code which has previously been translated into a TB show
64
* up in the MMIO path, as accesses to the io_mem_notdirty
65
* MemoryRegion. */
66
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
67
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
68
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
69
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
70
@@ -XXX,XX +XXX,XX @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
71
/* Accesses to code which has previously been translated into a TB show
72
* up in the MMIO path, as accesses to the io_mem_notdirty
73
* MemoryRegion. */
74
- trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
75
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
76
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
77
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
78
diff --git a/trace-events b/trace-events
79
index XXXXXXX..XXXXXXX 100644
80
--- a/trace-events
81
+++ b/trace-events
82
@@ -XXX,XX +XXX,XX @@ dma_map_wait(void *dbs) "dbs=%p"
83
find_ram_offset(uint64_t size, uint64_t offset) "size: 0x%" PRIx64 " @ 0x%" PRIx64
84
find_ram_offset_loop(uint64_t size, uint64_t candidate, uint64_t offset, uint64_t next, uint64_t mingap) "trying size: 0x%" PRIx64 " @ 0x%" PRIx64 ", offset: 0x%" PRIx64" next: 0x%" PRIx64 " mingap: 0x%" PRIx64
85
ram_block_discard_range(const char *rbname, void *hva, size_t length, bool need_madvise, bool need_fallocate, int ret) "%s@%p + 0x%zx: madvise: %d fallocate: %d ret: %d"
86
+memory_notdirty_write_access(uint64_t vaddr, uint64_t ram_addr, unsigned size) "0x%" PRIx64 " ram_addr 0x%" PRIx64 " size %u"
87
+memory_notdirty_set_dirty(uint64_t vaddr) "0x%" PRIx64
88
89
# memory.c
90
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
91
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
92
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
93
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
94
-memory_region_tb_read(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
95
-memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
96
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
97
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
98
flatview_new(void *view, void *root) "%p (root %p)"
99
--
119
--
100
2.17.1
120
2.25.1
101
121
102
122
diff view generated by jsdifflib
1
Since 9458a9a1df1a, all readers of the dirty bitmaps wait
1
These functions have been replaced by cpu_*_mmu as the
2
for the rcu lock, which means that they wait until the end
2
most proper interface to use from target code.
3
of any executing TranslationBlock.
3
4
4
Hide these declarations from code that should not use them.
5
As a consequence, there is no need for the actual access
5
6
to happen in between the _prepare and _complete. Therefore,
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
we can improve things by merging the two functions into
8
notdirty_write and dropping the NotDirtyInfo structure.
9
10
In addition, the only users of notdirty_write are in cputlb.c,
11
so move the merged function there. Pass in the CPUIOTLBEntry
12
from which the ram_addr_t may be computed.
13
14
Reviewed-by: David Hildenbrand <david@redhat.com>
15
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
8
---
18
include/exec/memory-internal.h | 65 -----------------------------
9
include/tcg/tcg-ldst.h | 74 ++++++++++++++++++++++++++++++++++++++++++
19
accel/tcg/cputlb.c | 76 +++++++++++++++++++---------------
10
include/tcg/tcg.h | 71 ----------------------------------------
20
exec.c | 44 --------------------
11
accel/tcg/cputlb.c | 1 +
21
3 files changed, 42 insertions(+), 143 deletions(-)
12
tcg/tcg.c | 1 +
22
13
tcg/tci.c | 1 +
23
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
14
5 files changed, 77 insertions(+), 71 deletions(-)
24
index XXXXXXX..XXXXXXX 100644
15
create mode 100644 include/tcg/tcg-ldst.h
25
--- a/include/exec/memory-internal.h
16
26
+++ b/include/exec/memory-internal.h
17
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
27
@@ -XXX,XX +XXX,XX @@ void address_space_dispatch_free(AddressSpaceDispatch *d);
18
new file mode 100644
28
19
index XXXXXXX..XXXXXXX
29
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
20
--- /dev/null
30
MemoryRegion *root);
21
+++ b/include/tcg/tcg-ldst.h
31
-
22
@@ -XXX,XX +XXX,XX @@
32
-struct page_collection;
23
+/*
33
-
24
+ * Memory helpers that will be used by TCG generated code.
34
-/* Opaque struct for passing info from memory_notdirty_write_prepare()
25
+ *
35
- * to memory_notdirty_write_complete(). Callers should treat all fields
26
+ * Copyright (c) 2008 Fabrice Bellard
36
- * as private, with the exception of @active.
27
+ *
37
- *
28
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
38
- * @active is a field which is not touched by either the prepare or
29
+ * of this software and associated documentation files (the "Software"), to deal
39
- * complete functions, but which the caller can use if it wishes to
30
+ * in the Software without restriction, including without limitation the rights
40
- * track whether it has called prepare for this struct and so needs
31
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
41
- * to later call the complete function.
32
+ * copies of the Software, and to permit persons to whom the Software is
33
+ * furnished to do so, subject to the following conditions:
34
+ *
35
+ * The above copyright notice and this permission notice shall be included in
36
+ * all copies or substantial portions of the Software.
37
+ *
38
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
41
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
42
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
43
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44
+ * THE SOFTWARE.
45
+ */
46
+
47
+#ifndef TCG_LDST_H
48
+#define TCG_LDST_H 1
49
+
50
+#ifdef CONFIG_SOFTMMU
51
+
52
+/* Value zero-extended to tcg register size. */
53
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
54
+ MemOpIdx oi, uintptr_t retaddr);
55
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
56
+ MemOpIdx oi, uintptr_t retaddr);
57
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
58
+ MemOpIdx oi, uintptr_t retaddr);
59
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
60
+ MemOpIdx oi, uintptr_t retaddr);
61
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
62
+ MemOpIdx oi, uintptr_t retaddr);
63
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
64
+ MemOpIdx oi, uintptr_t retaddr);
65
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
66
+ MemOpIdx oi, uintptr_t retaddr);
67
+
68
+/* Value sign-extended to tcg register size. */
69
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
70
+ MemOpIdx oi, uintptr_t retaddr);
71
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
72
+ MemOpIdx oi, uintptr_t retaddr);
73
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
74
+ MemOpIdx oi, uintptr_t retaddr);
75
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
76
+ MemOpIdx oi, uintptr_t retaddr);
77
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
78
+ MemOpIdx oi, uintptr_t retaddr);
79
+
80
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
81
+ MemOpIdx oi, uintptr_t retaddr);
82
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
83
+ MemOpIdx oi, uintptr_t retaddr);
84
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
85
+ MemOpIdx oi, uintptr_t retaddr);
86
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
87
+ MemOpIdx oi, uintptr_t retaddr);
88
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
89
+ MemOpIdx oi, uintptr_t retaddr);
90
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
91
+ MemOpIdx oi, uintptr_t retaddr);
92
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
93
+ MemOpIdx oi, uintptr_t retaddr);
94
+
95
+#endif /* CONFIG_SOFTMMU */
96
+#endif /* TCG_LDST_H */
97
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/tcg/tcg.h
100
+++ b/include/tcg/tcg.h
101
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
102
: (target_long)dup_const(VECE, C))
103
#endif
104
105
-/*
106
- * Memory helpers that will be used by TCG generated code.
42
- */
107
- */
43
-typedef struct {
108
-#ifdef CONFIG_SOFTMMU
44
- CPUState *cpu;
109
-/* Value zero-extended to tcg register size. */
45
- struct page_collection *pages;
110
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
46
- ram_addr_t ram_addr;
111
- MemOpIdx oi, uintptr_t retaddr);
47
- vaddr mem_vaddr;
112
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
48
- unsigned size;
113
- MemOpIdx oi, uintptr_t retaddr);
49
- bool active;
114
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
50
-} NotDirtyInfo;
115
- MemOpIdx oi, uintptr_t retaddr);
51
-
116
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
52
-/**
117
- MemOpIdx oi, uintptr_t retaddr);
53
- * memory_notdirty_write_prepare: call before writing to non-dirty memory
118
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
54
- * @ndi: pointer to opaque NotDirtyInfo struct
119
- MemOpIdx oi, uintptr_t retaddr);
55
- * @cpu: CPU doing the write
120
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
56
- * @mem_vaddr: virtual address of write
121
- MemOpIdx oi, uintptr_t retaddr);
57
- * @ram_addr: the ram address of the write
122
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
58
- * @size: size of write in bytes
123
- MemOpIdx oi, uintptr_t retaddr);
59
- *
124
-
60
- * Any code which writes to the host memory corresponding to
125
-/* Value sign-extended to tcg register size. */
61
- * guest RAM which has been marked as NOTDIRTY must wrap those
126
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
62
- * writes in calls to memory_notdirty_write_prepare() and
127
- MemOpIdx oi, uintptr_t retaddr);
63
- * memory_notdirty_write_complete():
128
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
64
- *
129
- MemOpIdx oi, uintptr_t retaddr);
65
- * NotDirtyInfo ndi;
130
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
66
- * memory_notdirty_write_prepare(&ndi, ....);
131
- MemOpIdx oi, uintptr_t retaddr);
67
- * ... perform write here ...
132
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
68
- * memory_notdirty_write_complete(&ndi);
133
- MemOpIdx oi, uintptr_t retaddr);
69
- *
134
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
70
- * These calls will ensure that we flush any TCG translated code for
135
- MemOpIdx oi, uintptr_t retaddr);
71
- * the memory being written, update the dirty bits and (if possible)
136
-
72
- * remove the slowpath callback for writing to the memory.
137
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
73
- *
138
- MemOpIdx oi, uintptr_t retaddr);
74
- * This must only be called if we are using TCG; it will assert otherwise.
139
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
75
- *
140
- MemOpIdx oi, uintptr_t retaddr);
76
- * We may take locks in the prepare call, so callers must ensure that
141
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
77
- * they don't exit (via longjump or otherwise) without calling complete.
142
- MemOpIdx oi, uintptr_t retaddr);
78
- *
143
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
79
- * This call must only be made inside an RCU critical section.
144
- MemOpIdx oi, uintptr_t retaddr);
80
- * (Note that while we're executing a TCG TB we're always in an
145
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
81
- * RCU critical section, which is likely to be the case for callers
146
- MemOpIdx oi, uintptr_t retaddr);
82
- * of these functions.)
147
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
83
- */
148
- MemOpIdx oi, uintptr_t retaddr);
84
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
149
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
85
- CPUState *cpu,
150
- MemOpIdx oi, uintptr_t retaddr);
86
- vaddr mem_vaddr,
151
-
87
- ram_addr_t ram_addr,
152
-/* Temporary aliases until backends are converted. */
88
- unsigned size);
153
-#ifdef TARGET_WORDS_BIGENDIAN
89
-/**
154
-# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
90
- * memory_notdirty_write_complete: finish write to non-dirty memory
155
-# define helper_ret_lduw_mmu helper_be_lduw_mmu
91
- * @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
156
-# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
92
- * by memory_not_dirty_write_prepare().
157
-# define helper_ret_ldul_mmu helper_be_ldul_mmu
93
- */
158
-# define helper_ret_ldl_mmu helper_be_ldul_mmu
94
-void memory_notdirty_write_complete(NotDirtyInfo *ndi);
159
-# define helper_ret_ldq_mmu helper_be_ldq_mmu
95
-
160
-# define helper_ret_stw_mmu helper_be_stw_mmu
96
#endif
161
-# define helper_ret_stl_mmu helper_be_stl_mmu
97
#endif
162
-# define helper_ret_stq_mmu helper_be_stq_mmu
163
-#else
164
-# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
165
-# define helper_ret_lduw_mmu helper_le_lduw_mmu
166
-# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
167
-# define helper_ret_ldul_mmu helper_le_ldul_mmu
168
-# define helper_ret_ldl_mmu helper_le_ldul_mmu
169
-# define helper_ret_ldq_mmu helper_le_ldq_mmu
170
-# define helper_ret_stw_mmu helper_le_stw_mmu
171
-# define helper_ret_stl_mmu helper_le_stl_mmu
172
-# define helper_ret_stq_mmu helper_le_stq_mmu
173
-#endif
174
-#endif /* CONFIG_SOFTMMU */
175
-
176
#ifdef CONFIG_DEBUG_TCG
177
void tcg_assert_listed_vecop(TCGOpcode);
178
#else
98
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
179
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
99
index XXXXXXX..XXXXXXX 100644
180
index XXXXXXX..XXXXXXX 100644
100
--- a/accel/tcg/cputlb.c
181
--- a/accel/tcg/cputlb.c
101
+++ b/accel/tcg/cputlb.c
182
+++ b/accel/tcg/cputlb.c
102
@@ -XXX,XX +XXX,XX @@
183
@@ -XXX,XX +XXX,XX @@
103
#include "exec/helper-proto.h"
184
#ifdef CONFIG_PLUGIN
104
#include "qemu/atomic.h"
185
#include "qemu/plugin-memory.h"
105
#include "qemu/atomic128.h"
186
#endif
106
+#include "translate-all.h"
187
+#include "tcg/tcg-ldst.h"
107
188
108
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
189
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
109
/* #define DEBUG_TLB */
190
/* #define DEBUG_TLB */
110
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
191
diff --git a/tcg/tcg.c b/tcg/tcg.c
111
return qemu_ram_addr_from_host_nofail(p);
192
index XXXXXXX..XXXXXXX 100644
112
}
193
--- a/tcg/tcg.c
113
194
+++ b/tcg/tcg.c
114
+static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
195
@@ -XXX,XX +XXX,XX @@
115
+ CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
196
116
+{
197
#include "elf.h"
117
+ ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
198
#include "exec/log.h"
118
+
199
+#include "tcg/tcg-ldst.h"
119
+ trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
200
#include "tcg-internal.h"
120
+
201
121
+ if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
202
#ifdef CONFIG_TCG_INTERPRETER
122
+ struct page_collection *pages
203
diff --git a/tcg/tci.c b/tcg/tci.c
123
+ = page_collection_lock(ram_addr, ram_addr + size);
204
index XXXXXXX..XXXXXXX 100644
124
+
205
--- a/tcg/tci.c
125
+ /* We require mem_io_pc in tb_invalidate_phys_page_range. */
206
+++ b/tcg/tci.c
126
+ cpu->mem_io_pc = retaddr;
207
@@ -XXX,XX +XXX,XX @@
127
+
208
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
128
+ tb_invalidate_phys_page_fast(pages, ram_addr, size);
209
#include "exec/cpu_ldst.h"
129
+ page_collection_unlock(pages);
210
#include "tcg/tcg-op.h"
130
+ }
211
+#include "tcg/tcg-ldst.h"
131
+
212
#include "qemu/compiler.h"
132
+ /*
213
#include <ffi.h>
133
+ * Set both VGA and migration bits for simplicity and to remove
214
134
+ * the notdirty callback faster.
135
+ */
136
+ cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
137
+
138
+ /* We remove the notdirty callback only if the code has been flushed. */
139
+ if (!cpu_physical_memory_is_clean(ram_addr)) {
140
+ trace_memory_notdirty_set_dirty(mem_vaddr);
141
+ tlb_set_dirty(cpu, mem_vaddr);
142
+ }
143
+}
144
+
145
/*
146
* Probe for whether the specified guest access is permitted. If it is not
147
* permitted then an exception will be taken in the same way as if this
148
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
149
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
150
* operations, or io operations to proceed. Return the host address. */
151
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
152
- TCGMemOpIdx oi, uintptr_t retaddr,
153
- NotDirtyInfo *ndi)
154
+ TCGMemOpIdx oi, uintptr_t retaddr)
155
{
156
size_t mmu_idx = get_mmuidx(oi);
157
uintptr_t index = tlb_index(env, mmu_idx, addr);
158
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
159
160
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
161
162
- ndi->active = false;
163
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
164
- ndi->active = true;
165
- memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
166
- qemu_ram_addr_from_host_nofail(hostaddr),
167
- 1 << s_bits);
168
+ notdirty_write(env_cpu(env), addr, 1 << s_bits,
169
+ &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
170
}
171
172
return hostaddr;
173
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
174
return;
175
}
176
177
- haddr = (void *)((uintptr_t)addr + entry->addend);
178
-
179
/* Handle clean RAM pages. */
180
if (tlb_addr & TLB_NOTDIRTY) {
181
- NotDirtyInfo ndi;
182
-
183
- /* We require mem_io_pc in tb_invalidate_phys_page_range. */
184
- env_cpu(env)->mem_io_pc = retaddr;
185
-
186
- memory_notdirty_write_prepare(&ndi, env_cpu(env), addr,
187
- addr + iotlbentry->addr, size);
188
-
189
- if (unlikely(need_swap)) {
190
- store_memop(haddr, val, op ^ MO_BSWAP);
191
- } else {
192
- store_memop(haddr, val, op);
193
- }
194
-
195
- memory_notdirty_write_complete(&ndi);
196
- return;
197
+ notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
198
}
199
200
+ haddr = (void *)((uintptr_t)addr + entry->addend);
201
+
202
/*
203
* Keep these two store_memop separate to ensure that the compiler
204
* is able to fold the entire function to a single instruction.
205
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
206
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
207
#define ATOMIC_NAME(X) \
208
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
209
-#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
210
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
211
-#define ATOMIC_MMU_CLEANUP \
212
- do { \
213
- if (unlikely(ndi.active)) { \
214
- memory_notdirty_write_complete(&ndi); \
215
- } \
216
- } while (0)
217
+#define ATOMIC_MMU_DECLS
218
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
219
+#define ATOMIC_MMU_CLEANUP
220
221
#define DATA_SIZE 1
222
#include "atomic_template.h"
223
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
224
#undef ATOMIC_MMU_LOOKUP
225
#define EXTRA_ARGS , TCGMemOpIdx oi
226
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
227
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
228
+#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
229
230
#define DATA_SIZE 1
231
#include "atomic_template.h"
232
diff --git a/exec.c b/exec.c
233
index XXXXXXX..XXXXXXX 100644
234
--- a/exec.c
235
+++ b/exec.c
236
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
237
return block->offset + offset;
238
}
239
240
-/* Called within RCU critical section. */
241
-void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
242
- CPUState *cpu,
243
- vaddr mem_vaddr,
244
- ram_addr_t ram_addr,
245
- unsigned size)
246
-{
247
- ndi->cpu = cpu;
248
- ndi->ram_addr = ram_addr;
249
- ndi->mem_vaddr = mem_vaddr;
250
- ndi->size = size;
251
- ndi->pages = NULL;
252
-
253
- trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
254
-
255
- assert(tcg_enabled());
256
- if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
257
- ndi->pages = page_collection_lock(ram_addr, ram_addr + size);
258
- tb_invalidate_phys_page_fast(ndi->pages, ram_addr, size);
259
- }
260
-}
261
-
262
-/* Called within RCU critical section. */
263
-void memory_notdirty_write_complete(NotDirtyInfo *ndi)
264
-{
265
- if (ndi->pages) {
266
- assert(tcg_enabled());
267
- page_collection_unlock(ndi->pages);
268
- ndi->pages = NULL;
269
- }
270
-
271
- /* Set both VGA and migration bits for simplicity and to remove
272
- * the notdirty callback faster.
273
- */
274
- cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
275
- DIRTY_CLIENTS_NOCODE);
276
- /* we remove the notdirty callback only if the code has been
277
- flushed */
278
- if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
279
- trace_memory_notdirty_set_dirty(ndi->mem_vaddr);
280
- tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
281
- }
282
-}
283
-
284
/* Generate a debug exception if a watchpoint has been hit. */
285
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
286
MemTxAttrs attrs, int flags, uintptr_t ra)
287
--
215
--
288
2.17.1
216
2.25.1
289
217
290
218
diff view generated by jsdifflib
1
Increase the current runtime assert to a compile-time assert.
1
Having observed e.g. al8+leq in dumps, canonicalize to al+leq.
2
2
3
Reviewed-by: David Hildenbrand <david@redhat.com>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/cputlb.c | 5 ++---
6
tcg/tcg-op.c | 7 ++++++-
8
1 file changed, 2 insertions(+), 3 deletions(-)
7
1 file changed, 6 insertions(+), 1 deletion(-)
9
8
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/cputlb.c
11
--- a/tcg/tcg-op.c
13
+++ b/accel/tcg/cputlb.c
12
+++ b/tcg/tcg-op.c
14
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
13
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
15
res = ldq_le_p(haddr);
14
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
16
break;
15
{
17
default:
16
/* Trigger the asserts within as early as possible. */
18
- g_assert_not_reached();
17
- (void)get_alignment_bits(op);
19
+ qemu_build_not_reached();
18
+ unsigned a_bits = get_alignment_bits(op);
20
}
19
+
21
20
+ /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
22
return res;
21
+ if (a_bits == (op & MO_SIZE)) {
23
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
22
+ op = (op & ~MO_AMASK) | MO_ALIGN;
24
stq_le_p(haddr, val);
23
+ }
25
break;
24
26
default:
25
switch (op & MO_SIZE) {
27
- g_assert_not_reached();
26
case MO_8:
28
- break;
29
+ qemu_build_not_reached();
30
}
31
}
32
33
--
27
--
34
2.17.1
28
2.25.1
35
29
36
30
diff view generated by jsdifflib