1
The following changes since commit ee26ce674a93c824713542cec3b6a9ca85459165:
1
The following changes since commit 15df33ceb73cb6bb3c6736cf4d2cff51129ed4b4:
2
2
3
Merge remote-tracking branch 'remotes/jsnow/tags/python-pull-request' into staging (2021-10-12 16:08:33 -0700)
3
Merge remote-tracking branch 'remotes/quic/tags/pull-hex-20220312-1' into staging (2022-03-13 17:29:18 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211013
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220314
8
8
9
for you to fetch changes up to 76e366e728549b3324cc2dee6745d6a4f1af18e6:
9
for you to fetch changes up to 76cff100beeae8d3676bb658cccd45ef5ced8aa9:
10
10
11
tcg: Canonicalize alignment flags in MemOp (2021-10-13 09:14:35 -0700)
11
tcg/arm: Don't emit UNPREDICTABLE LDRD with Rm == Rt or Rt+1 (2022-03-14 10:31:51 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Use MO_128 for 16-byte atomic memory operations.
14
Fixes for s390x host vectors
15
Add cpu_ld/st_mmu memory primitives.
15
Fix for arm ldrd unpredictable case
16
Move helper_ld/st memory helpers out of tcg.h.
17
Canonicalize alignment flags in MemOp.
18
16
19
----------------------------------------------------------------
17
----------------------------------------------------------------
20
BALATON Zoltan (1):
18
Richard Henderson (4):
21
memory: Log access direction for invalid accesses
19
tcg/s390x: Fix tcg_out_dupi_vec vs VGM
20
tcg/s390x: Fix INDEX_op_bitsel_vec vs VSEL
21
tcg/s390x: Fix tcg_out_dup_vec vs general registers
22
tcg/arm: Don't emit UNPREDICTABLE LDRD with Rm == Rt or Rt+1
22
23
23
Richard Henderson (14):
24
tcg/arm/tcg-target.c.inc | 17 +++++++++++++++--
24
target/arm: Use MO_128 for 16 byte atomics
25
tcg/s390x/tcg-target.c.inc | 7 ++++---
25
target/i386: Use MO_128 for 16 byte atomics
26
2 files changed, 19 insertions(+), 5 deletions(-)
26
target/ppc: Use MO_128 for 16 byte atomics
27
target/s390x: Use MO_128 for 16 byte atomics
28
target/hexagon: Implement cpu_mmu_index
29
accel/tcg: Add cpu_{ld,st}*_mmu interfaces
30
accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h
31
target/mips: Use cpu_*_data_ra for msa load/store
32
target/mips: Use 8-byte memory ops for msa load/store
33
target/s390x: Use cpu_*_mmu instead of helper_*_mmu
34
target/sparc: Use cpu_*_mmu instead of helper_*_mmu
35
target/arm: Use cpu_*_mmu instead of helper_*_mmu
36
tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h
37
tcg: Canonicalize alignment flags in MemOp
38
39
docs/devel/loads-stores.rst | 52 +++++-
40
include/exec/cpu_ldst.h | 332 ++++++++++++++++++-----------------
41
include/tcg/tcg-ldst.h | 74 ++++++++
42
include/tcg/tcg.h | 158 -----------------
43
target/hexagon/cpu.h | 9 +
44
accel/tcg/cputlb.c | 393 ++++++++++++++----------------------------
45
accel/tcg/user-exec.c | 385 +++++++++++++++++------------------------
46
softmmu/memory.c | 20 +--
47
target/arm/helper-a64.c | 61 ++-----
48
target/arm/m_helper.c | 6 +-
49
target/i386/tcg/mem_helper.c | 2 +-
50
target/m68k/op_helper.c | 1 -
51
target/mips/tcg/msa_helper.c | 389 ++++++++++-------------------------------
52
target/ppc/mem_helper.c | 1 -
53
target/ppc/translate.c | 12 +-
54
target/s390x/tcg/mem_helper.c | 13 +-
55
target/sparc/ldst_helper.c | 14 +-
56
tcg/tcg-op.c | 7 +-
57
tcg/tcg.c | 1 +
58
tcg/tci.c | 1 +
59
accel/tcg/ldst_common.c.inc | 307 +++++++++++++++++++++++++++++++++
60
21 files changed, 1032 insertions(+), 1206 deletions(-)
61
create mode 100644 include/tcg/tcg-ldst.h
62
create mode 100644 accel/tcg/ldst_common.c.inc
63
diff view generated by jsdifflib
Deleted patch
1
From: BALATON Zoltan <balaton@eik.bme.hu>
2
1
3
In memory_region_access_valid() invalid accesses are logged to help
4
debugging but the log message does not say if it was a read or write.
5
Log that too to better identify the access causing the problem.
6
7
Reviewed-by: David Hildenbrand <david@redhat.com>
8
Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu>
9
Message-Id: <20211011173616.F1DE0756022@zero.eik.bme.hu>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
softmmu/memory.c | 20 ++++++++++----------
13
1 file changed, 10 insertions(+), 10 deletions(-)
14
15
diff --git a/softmmu/memory.c b/softmmu/memory.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/softmmu/memory.c
18
+++ b/softmmu/memory.c
19
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
20
{
21
if (mr->ops->valid.accepts
22
&& !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
23
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
24
- "0x%" HWADDR_PRIX ", size %u, "
25
- "region '%s', reason: rejected\n",
26
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
27
+ ", size %u, region '%s', reason: rejected\n",
28
+ is_write ? "write" : "read",
29
addr, size, memory_region_name(mr));
30
return false;
31
}
32
33
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
34
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
35
- "0x%" HWADDR_PRIX ", size %u, "
36
- "region '%s', reason: unaligned\n",
37
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
38
+ ", size %u, region '%s', reason: unaligned\n",
39
+ is_write ? "write" : "read",
40
addr, size, memory_region_name(mr));
41
return false;
42
}
43
@@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr,
44
45
if (size > mr->ops->valid.max_access_size
46
|| size < mr->ops->valid.min_access_size) {
47
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr "
48
- "0x%" HWADDR_PRIX ", size %u, "
49
- "region '%s', reason: invalid size "
50
- "(min:%u max:%u)\n",
51
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
52
+ ", size %u, region '%s', reason: invalid size "
53
+ "(min:%u max:%u)\n",
54
+ is_write ? "write" : "read",
55
addr, size, memory_region_name(mr),
56
mr->ops->valid.min_access_size,
57
mr->ops->valid.max_access_size);
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
1
These functions are much closer to the softmmu helper
1
The immediate operands to VGM were in the wrong order,
2
functions, in that they take the complete MemOpIdx,
2
producing an inverse mask.
3
and from that they may enforce required alignment.
4
3
5
The previous cpu_ldst.h functions did not have alignment info,
6
and so did not enforce it. Retain this by adding MO_UNALN to
7
the MemOp that we create in calling the new functions.
8
9
Note that we are not yet enforcing alignment for user-only,
10
but we now have the information with which to do so.
11
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
5
---
15
docs/devel/loads-stores.rst | 52 ++++-
6
tcg/s390x/tcg-target.c.inc | 4 ++--
16
include/exec/cpu_ldst.h | 245 ++++++++--------------
7
1 file changed, 2 insertions(+), 2 deletions(-)
17
accel/tcg/cputlb.c | 392 ++++++++++++------------------------
18
accel/tcg/user-exec.c | 385 +++++++++++++++--------------------
19
accel/tcg/ldst_common.c.inc | 307 ++++++++++++++++++++++++++++
20
5 files changed, 717 insertions(+), 664 deletions(-)
21
create mode 100644 accel/tcg/ldst_common.c.inc
22
8
23
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
9
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
25
--- a/docs/devel/loads-stores.rst
11
--- a/tcg/s390x/tcg-target.c.inc
26
+++ b/docs/devel/loads-stores.rst
12
+++ b/tcg/s390x/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ Regexes for git grep
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
28
- ``\<ldn_\([hbl]e\)?_p\>``
14
msb = clz32(val);
29
- ``\<stn_\([hbl]e\)?_p\>``
15
lsb = 31 - ctz32(val);
30
16
}
31
-``cpu_{ld,st}*_mmuidx_ra``
17
- tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_32);
32
-~~~~~~~~~~~~~~~~~~~~~~~~~~
18
+ tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
33
+``cpu_{ld,st}*_mmu``
19
return;
34
+~~~~~~~~~~~~~~~~~~~~
35
36
-These functions operate on a guest virtual address plus a context,
37
-known as a "mmu index" or ``mmuidx``, which controls how that virtual
38
-address is translated. The meaning of the indexes are target specific,
39
-but specifying a particular index might be necessary if, for instance,
40
-the helper requires an "always as non-privileged" access rather that
41
-the default access for the current state of the guest CPU.
42
+These functions operate on a guest virtual address, plus a context
43
+known as a "mmu index" which controls how that virtual address is
44
+translated, plus a ``MemOp`` which contains alignment requirements
45
+among other things. The ``MemOp`` and mmu index are combined into
46
+a single argument of type ``MemOpIdx``.
47
+
48
+The meaning of the indexes are target specific, but specifying a
49
+particular index might be necessary if, for instance, the helper
50
+requires a "always as non-privileged" access rather than the
51
+default access for the current state of the guest CPU.
52
53
These functions may cause a guest CPU exception to be taken
54
(e.g. for an alignment fault or MMU fault) which will result in
55
@@ -XXX,XX +XXX,XX @@ function, which is a return address into the generated code [#gpc]_.
56
57
Function names follow the pattern:
58
59
+load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)``
60
+
61
+store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)``
62
+
63
+``size``
64
+ - ``b`` : 8 bits
65
+ - ``w`` : 16 bits
66
+ - ``l`` : 32 bits
67
+ - ``q`` : 64 bits
68
+
69
+``end``
70
+ - (empty) : for target endian, or 8 bit sizes
71
+ - ``_be`` : big endian
72
+ - ``_le`` : little endian
73
+
74
+Regexes for git grep:
75
+ - ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>``
76
+ - ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>``
77
+
78
+
79
+``cpu_{ld,st}*_mmuidx_ra``
80
+~~~~~~~~~~~~~~~~~~~~~~~~~~
81
+
82
+These functions work like the ``cpu_{ld,st}_mmu`` functions except
83
+that the ``mmuidx`` parameter is not combined with a ``MemOp``,
84
+and therefore there is no required alignment supplied or enforced.
85
+
86
+Function names follow the pattern:
87
+
88
load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
89
90
store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
91
@@ -XXX,XX +XXX,XX @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
92
93
These are generally the preferred way to do accesses by guest
94
virtual address from helper functions, unless the access should
95
-be performed with a context other than the default.
96
+be performed with a context other than the default, or alignment
97
+should be enforced for the access.
98
99
Function names follow the pattern:
100
101
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/include/exec/cpu_ldst.h
104
+++ b/include/exec/cpu_ldst.h
105
@@ -XXX,XX +XXX,XX @@
106
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
107
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
108
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
109
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
110
*
111
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
112
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
113
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
114
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
115
*
116
* sign is:
117
* (empty): for 32 and 64 bit sizes
118
@@ -XXX,XX +XXX,XX @@
119
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
120
* the index to use; the "data" and "code" suffixes take the index from
121
* cpu_mmu_index().
122
+ *
123
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
124
+ * MemOp including alignment requirements. The alignment will be enforced.
125
*/
126
#ifndef CPU_LDST_H
127
#define CPU_LDST_H
128
129
+#include "exec/memopidx.h"
130
+
131
#if defined(CONFIG_USER_ONLY)
132
/* sparc32plus has 64bit long but 32bit space address
133
* this can make bad result with g2h() and h2g()
134
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
135
136
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
137
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
138
-
139
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
140
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
141
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
142
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
143
-
144
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
145
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
146
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
147
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
148
149
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
150
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
151
-
152
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
153
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
154
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
155
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
156
-
157
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
158
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
159
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
160
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
161
162
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
163
-
164
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
165
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
166
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
167
-
168
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
169
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
170
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
171
172
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
173
uint32_t val, uintptr_t ra);
174
-
175
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
176
uint32_t val, uintptr_t ra);
177
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
178
uint32_t val, uintptr_t ra);
179
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
180
uint64_t val, uintptr_t ra);
181
-
182
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
183
uint32_t val, uintptr_t ra);
184
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
185
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
186
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
187
uint64_t val, uintptr_t ra);
188
189
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
190
+ int mmu_idx, uintptr_t ra);
191
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
192
+ int mmu_idx, uintptr_t ra);
193
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
194
+ int mmu_idx, uintptr_t ra);
195
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
196
+ int mmu_idx, uintptr_t ra);
197
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
198
+ int mmu_idx, uintptr_t ra);
199
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
200
+ int mmu_idx, uintptr_t ra);
201
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
202
+ int mmu_idx, uintptr_t ra);
203
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
204
+ int mmu_idx, uintptr_t ra);
205
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
206
+ int mmu_idx, uintptr_t ra);
207
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
208
+ int mmu_idx, uintptr_t ra);
209
+
210
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
211
+ int mmu_idx, uintptr_t ra);
212
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
213
+ int mmu_idx, uintptr_t ra);
214
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
215
+ int mmu_idx, uintptr_t ra);
216
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
217
+ int mmu_idx, uintptr_t ra);
218
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
219
+ int mmu_idx, uintptr_t ra);
220
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
221
+ int mmu_idx, uintptr_t ra);
222
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
223
+ int mmu_idx, uintptr_t ra);
224
+
225
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
226
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
227
+ MemOpIdx oi, uintptr_t ra);
228
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
229
+ MemOpIdx oi, uintptr_t ra);
230
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
231
+ MemOpIdx oi, uintptr_t ra);
232
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
233
+ MemOpIdx oi, uintptr_t ra);
234
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
235
+ MemOpIdx oi, uintptr_t ra);
236
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
237
+ MemOpIdx oi, uintptr_t ra);
238
+
239
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
240
+ MemOpIdx oi, uintptr_t ra);
241
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
242
+ MemOpIdx oi, uintptr_t ra);
243
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
244
+ MemOpIdx oi, uintptr_t ra);
245
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
246
+ MemOpIdx oi, uintptr_t ra);
247
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
248
+ MemOpIdx oi, uintptr_t ra);
249
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
250
+ MemOpIdx oi, uintptr_t ra);
251
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
252
+ MemOpIdx oi, uintptr_t ra);
253
+
254
#if defined(CONFIG_USER_ONLY)
255
256
extern __thread uintptr_t helper_retaddr;
257
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
258
helper_retaddr = 0;
259
}
260
261
-/*
262
- * Provide the same *_mmuidx_ra interface as for softmmu.
263
- * The mmu_idx argument is ignored.
264
- */
265
-
266
-static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
267
- int mmu_idx, uintptr_t ra)
268
-{
269
- return cpu_ldub_data_ra(env, addr, ra);
270
-}
271
-
272
-static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
273
- int mmu_idx, uintptr_t ra)
274
-{
275
- return cpu_ldsb_data_ra(env, addr, ra);
276
-}
277
-
278
-static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
279
- int mmu_idx, uintptr_t ra)
280
-{
281
- return cpu_lduw_be_data_ra(env, addr, ra);
282
-}
283
-
284
-static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
285
- int mmu_idx, uintptr_t ra)
286
-{
287
- return cpu_ldsw_be_data_ra(env, addr, ra);
288
-}
289
-
290
-static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
291
- int mmu_idx, uintptr_t ra)
292
-{
293
- return cpu_ldl_be_data_ra(env, addr, ra);
294
-}
295
-
296
-static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
297
- int mmu_idx, uintptr_t ra)
298
-{
299
- return cpu_ldq_be_data_ra(env, addr, ra);
300
-}
301
-
302
-static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
303
- int mmu_idx, uintptr_t ra)
304
-{
305
- return cpu_lduw_le_data_ra(env, addr, ra);
306
-}
307
-
308
-static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
309
- int mmu_idx, uintptr_t ra)
310
-{
311
- return cpu_ldsw_le_data_ra(env, addr, ra);
312
-}
313
-
314
-static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
315
- int mmu_idx, uintptr_t ra)
316
-{
317
- return cpu_ldl_le_data_ra(env, addr, ra);
318
-}
319
-
320
-static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
321
- int mmu_idx, uintptr_t ra)
322
-{
323
- return cpu_ldq_le_data_ra(env, addr, ra);
324
-}
325
-
326
-static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
327
- uint32_t val, int mmu_idx, uintptr_t ra)
328
-{
329
- cpu_stb_data_ra(env, addr, val, ra);
330
-}
331
-
332
-static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
333
- uint32_t val, int mmu_idx,
334
- uintptr_t ra)
335
-{
336
- cpu_stw_be_data_ra(env, addr, val, ra);
337
-}
338
-
339
-static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
340
- uint32_t val, int mmu_idx,
341
- uintptr_t ra)
342
-{
343
- cpu_stl_be_data_ra(env, addr, val, ra);
344
-}
345
-
346
-static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
347
- uint64_t val, int mmu_idx,
348
- uintptr_t ra)
349
-{
350
- cpu_stq_be_data_ra(env, addr, val, ra);
351
-}
352
-
353
-static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
354
- uint32_t val, int mmu_idx,
355
- uintptr_t ra)
356
-{
357
- cpu_stw_le_data_ra(env, addr, val, ra);
358
-}
359
-
360
-static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
361
- uint32_t val, int mmu_idx,
362
- uintptr_t ra)
363
-{
364
- cpu_stl_le_data_ra(env, addr, val, ra);
365
-}
366
-
367
-static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
368
- uint64_t val, int mmu_idx,
369
- uintptr_t ra)
370
-{
371
- cpu_stq_le_data_ra(env, addr, val, ra);
372
-}
373
-
374
#else
375
376
/* Needed for TCG_OVERSIZED_GUEST */
377
@@ -XXX,XX +XXX,XX @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
378
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
379
}
380
381
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
382
- int mmu_idx, uintptr_t ra);
383
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
384
- int mmu_idx, uintptr_t ra);
385
-
386
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
387
- int mmu_idx, uintptr_t ra);
388
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
389
- int mmu_idx, uintptr_t ra);
390
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
391
- int mmu_idx, uintptr_t ra);
392
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
393
- int mmu_idx, uintptr_t ra);
394
-
395
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
396
- int mmu_idx, uintptr_t ra);
397
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
398
- int mmu_idx, uintptr_t ra);
399
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
400
- int mmu_idx, uintptr_t ra);
401
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
402
- int mmu_idx, uintptr_t ra);
403
-
404
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
405
- int mmu_idx, uintptr_t retaddr);
406
-
407
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
408
- int mmu_idx, uintptr_t retaddr);
409
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
410
- int mmu_idx, uintptr_t retaddr);
411
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
412
- int mmu_idx, uintptr_t retaddr);
413
-
414
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
415
- int mmu_idx, uintptr_t retaddr);
416
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
417
- int mmu_idx, uintptr_t retaddr);
418
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
419
- int mmu_idx, uintptr_t retaddr);
420
-
421
#endif /* defined(CONFIG_USER_ONLY) */
422
423
#ifdef TARGET_WORDS_BIGENDIAN
424
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
425
# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
426
# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
427
# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
428
+# define cpu_ldw_mmu cpu_ldw_be_mmu
429
+# define cpu_ldl_mmu cpu_ldl_be_mmu
430
+# define cpu_ldq_mmu cpu_ldq_be_mmu
431
# define cpu_stw_data cpu_stw_be_data
432
# define cpu_stl_data cpu_stl_be_data
433
# define cpu_stq_data cpu_stq_be_data
434
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
435
# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
436
# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
437
# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
438
+# define cpu_stw_mmu cpu_stw_be_mmu
439
+# define cpu_stl_mmu cpu_stl_be_mmu
440
+# define cpu_stq_mmu cpu_stq_be_mmu
441
#else
442
# define cpu_lduw_data cpu_lduw_le_data
443
# define cpu_ldsw_data cpu_ldsw_le_data
444
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
445
# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
446
# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
447
# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
448
+# define cpu_ldw_mmu cpu_ldw_le_mmu
449
+# define cpu_ldl_mmu cpu_ldl_le_mmu
450
+# define cpu_ldq_mmu cpu_ldq_le_mmu
451
# define cpu_stw_data cpu_stw_le_data
452
# define cpu_stl_data cpu_stl_le_data
453
# define cpu_stq_data cpu_stq_le_data
454
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
455
# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
456
# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
457
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
458
+# define cpu_stw_mmu cpu_stw_le_mmu
459
+# define cpu_stl_mmu cpu_stl_le_mmu
460
+# define cpu_stq_mmu cpu_stq_le_mmu
461
#endif
462
463
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
464
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
465
index XXXXXXX..XXXXXXX 100644
466
--- a/accel/tcg/cputlb.c
467
+++ b/accel/tcg/cputlb.c
468
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
469
cpu_loop_exit_atomic(env_cpu(env), retaddr);
470
}
471
472
+/*
473
+ * Verify that we have passed the correct MemOp to the correct function.
474
+ *
475
+ * In the case of the helper_*_mmu functions, we will have done this by
476
+ * using the MemOp to look up the helper during code generation.
477
+ *
478
+ * In the case of the cpu_*_mmu functions, this is up to the caller.
479
+ * We could present one function to target code, and dispatch based on
480
+ * the MemOp, but so far we have worked hard to avoid an indirect function
481
+ * call along the memory path.
482
+ */
483
+static void validate_memop(MemOpIdx oi, MemOp expected)
484
+{
485
+#ifdef CONFIG_DEBUG_TCG
486
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
487
+ assert(have == expected);
488
+#endif
489
+}
490
+
491
/*
492
* Load Helpers
493
*
494
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
495
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
496
MemOpIdx oi, uintptr_t retaddr)
497
{
498
+ validate_memop(oi, MO_UB);
499
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
500
}
501
502
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
503
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
504
MemOpIdx oi, uintptr_t retaddr)
505
{
506
+ validate_memop(oi, MO_LEUW);
507
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
508
full_le_lduw_mmu);
509
}
510
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
511
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
512
MemOpIdx oi, uintptr_t retaddr)
513
{
514
+ validate_memop(oi, MO_BEUW);
515
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
516
full_be_lduw_mmu);
517
}
518
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
519
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
520
MemOpIdx oi, uintptr_t retaddr)
521
{
522
+ validate_memop(oi, MO_LEUL);
523
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
524
full_le_ldul_mmu);
525
}
526
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
527
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
528
MemOpIdx oi, uintptr_t retaddr)
529
{
530
+ validate_memop(oi, MO_BEUL);
531
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
532
full_be_ldul_mmu);
533
}
534
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
535
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
536
MemOpIdx oi, uintptr_t retaddr)
537
{
538
+ validate_memop(oi, MO_LEQ);
539
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
540
helper_le_ldq_mmu);
541
}
542
@@ -XXX,XX +XXX,XX @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
543
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
544
MemOpIdx oi, uintptr_t retaddr)
545
{
546
+ validate_memop(oi, MO_BEQ);
547
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
548
helper_be_ldq_mmu);
549
}
550
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
551
*/
552
553
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
554
- int mmu_idx, uintptr_t retaddr,
555
- MemOp op, FullLoadHelper *full_load)
556
+ MemOpIdx oi, uintptr_t retaddr,
557
+ FullLoadHelper *full_load)
558
{
559
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
560
uint64_t ret;
561
562
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
563
-
564
ret = full_load(env, addr, oi, retaddr);
565
-
566
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
567
-
568
return ret;
569
}
570
571
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
572
- int mmu_idx, uintptr_t ra)
573
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
574
{
575
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
576
+ return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
577
}
578
579
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
580
- int mmu_idx, uintptr_t ra)
581
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
582
+ MemOpIdx oi, uintptr_t ra)
583
{
584
- return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
585
+ return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
586
}
587
588
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
589
- int mmu_idx, uintptr_t ra)
590
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
591
+ MemOpIdx oi, uintptr_t ra)
592
{
593
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
594
+ return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
595
}
596
597
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
598
- int mmu_idx, uintptr_t ra)
599
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
600
+ MemOpIdx oi, uintptr_t ra)
601
{
602
- return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
603
+ return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
604
}
605
606
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
607
- int mmu_idx, uintptr_t ra)
608
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
609
+ MemOpIdx oi, uintptr_t ra)
610
{
611
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
612
+ return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
613
}
614
615
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
616
- int mmu_idx, uintptr_t ra)
617
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
618
+ MemOpIdx oi, uintptr_t ra)
619
{
620
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
621
+ return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
622
}
623
624
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
625
- int mmu_idx, uintptr_t ra)
626
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
627
+ MemOpIdx oi, uintptr_t ra)
628
{
629
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
630
-}
631
-
632
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
633
- int mmu_idx, uintptr_t ra)
634
-{
635
- return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
636
-}
637
-
638
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
639
- int mmu_idx, uintptr_t ra)
640
-{
641
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
642
-}
643
-
644
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
645
- int mmu_idx, uintptr_t ra)
646
-{
647
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
648
-}
649
-
650
-uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
651
- uintptr_t retaddr)
652
-{
653
- return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
654
-}
655
-
656
-int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
657
-{
658
- return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
659
-}
660
-
661
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
662
- uintptr_t retaddr)
663
-{
664
- return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
665
-}
666
-
667
-int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
668
-{
669
- return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
670
-}
671
-
672
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
673
- uintptr_t retaddr)
674
-{
675
- return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
676
-}
677
-
678
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
679
- uintptr_t retaddr)
680
-{
681
- return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
682
-}
683
-
684
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
685
- uintptr_t retaddr)
686
-{
687
- return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
688
-}
689
-
690
-int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
691
-{
692
- return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
693
-}
694
-
695
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
696
- uintptr_t retaddr)
697
-{
698
- return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
699
-}
700
-
701
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
702
- uintptr_t retaddr)
703
-{
704
- return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
705
-}
706
-
707
-uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
708
-{
709
- return cpu_ldub_data_ra(env, ptr, 0);
710
-}
711
-
712
-int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
713
-{
714
- return cpu_ldsb_data_ra(env, ptr, 0);
715
-}
716
-
717
-uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
718
-{
719
- return cpu_lduw_be_data_ra(env, ptr, 0);
720
-}
721
-
722
-int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
723
-{
724
- return cpu_ldsw_be_data_ra(env, ptr, 0);
725
-}
726
-
727
-uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
728
-{
729
- return cpu_ldl_be_data_ra(env, ptr, 0);
730
-}
731
-
732
-uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
733
-{
734
- return cpu_ldq_be_data_ra(env, ptr, 0);
735
-}
736
-
737
-uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
738
-{
739
- return cpu_lduw_le_data_ra(env, ptr, 0);
740
-}
741
-
742
-int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
743
-{
744
- return cpu_ldsw_le_data_ra(env, ptr, 0);
745
-}
746
-
747
-uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
748
-{
749
- return cpu_ldl_le_data_ra(env, ptr, 0);
750
-}
751
-
752
-uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
753
-{
754
- return cpu_ldq_le_data_ra(env, ptr, 0);
755
+ return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
756
}
757
758
/*
759
@@ -XXX,XX +XXX,XX @@ store_memop(void *haddr, uint64_t val, MemOp op)
760
}
761
}
762
763
+static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
764
+ MemOpIdx oi, uintptr_t retaddr);
765
+
766
static void __attribute__((noinline))
767
store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
768
uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
769
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
770
for (i = 0; i < size; ++i) {
771
/* Big-endian extract. */
772
uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
773
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
774
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
775
}
20
}
776
} else {
21
} else {
777
for (i = 0; i < size; ++i) {
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
778
/* Little-endian extract. */
23
msb = clz64(val);
779
uint8_t val8 = val >> (i * 8);
24
lsb = 63 - ctz64(val);
780
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
25
}
781
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
26
- tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_64);
27
+ tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
28
return;
782
}
29
}
783
}
30
}
784
}
785
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
786
store_memop(haddr, val, op);
787
}
788
789
-void __attribute__((noinline))
790
-helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
791
- MemOpIdx oi, uintptr_t retaddr)
792
+static void __attribute__((noinline))
793
+full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
794
+ MemOpIdx oi, uintptr_t retaddr)
795
{
796
+ validate_memop(oi, MO_UB);
797
store_helper(env, addr, val, oi, retaddr, MO_UB);
798
}
799
800
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
801
+ MemOpIdx oi, uintptr_t retaddr)
802
+{
803
+ full_stb_mmu(env, addr, val, oi, retaddr);
804
+}
805
+
806
+static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
807
+ MemOpIdx oi, uintptr_t retaddr)
808
+{
809
+ validate_memop(oi, MO_LEUW);
810
+ store_helper(env, addr, val, oi, retaddr, MO_LEUW);
811
+}
812
+
813
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
814
MemOpIdx oi, uintptr_t retaddr)
815
{
816
- store_helper(env, addr, val, oi, retaddr, MO_LEUW);
817
+ full_le_stw_mmu(env, addr, val, oi, retaddr);
818
+}
819
+
820
+static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
821
+ MemOpIdx oi, uintptr_t retaddr)
822
+{
823
+ validate_memop(oi, MO_BEUW);
824
+ store_helper(env, addr, val, oi, retaddr, MO_BEUW);
825
}
826
827
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
828
MemOpIdx oi, uintptr_t retaddr)
829
{
830
- store_helper(env, addr, val, oi, retaddr, MO_BEUW);
831
+ full_be_stw_mmu(env, addr, val, oi, retaddr);
832
+}
833
+
834
+static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
835
+ MemOpIdx oi, uintptr_t retaddr)
836
+{
837
+ validate_memop(oi, MO_LEUL);
838
+ store_helper(env, addr, val, oi, retaddr, MO_LEUL);
839
}
840
841
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
842
MemOpIdx oi, uintptr_t retaddr)
843
{
844
- store_helper(env, addr, val, oi, retaddr, MO_LEUL);
845
+ full_le_stl_mmu(env, addr, val, oi, retaddr);
846
+}
847
+
848
+static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
849
+ MemOpIdx oi, uintptr_t retaddr)
850
+{
851
+ validate_memop(oi, MO_BEUL);
852
+ store_helper(env, addr, val, oi, retaddr, MO_BEUL);
853
}
854
855
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
856
MemOpIdx oi, uintptr_t retaddr)
857
{
858
- store_helper(env, addr, val, oi, retaddr, MO_BEUL);
859
+ full_be_stl_mmu(env, addr, val, oi, retaddr);
860
}
861
862
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
863
MemOpIdx oi, uintptr_t retaddr)
864
{
865
+ validate_memop(oi, MO_LEQ);
866
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
867
}
868
869
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
870
MemOpIdx oi, uintptr_t retaddr)
871
{
872
+ validate_memop(oi, MO_BEQ);
873
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
874
}
875
876
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
877
* Store Helpers for cpu_ldst.h
878
*/
879
880
-static inline void QEMU_ALWAYS_INLINE
881
-cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
882
- int mmu_idx, uintptr_t retaddr, MemOp op)
883
+typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
884
+ uint64_t val, MemOpIdx oi, uintptr_t retaddr);
885
+
886
+static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
887
+ uint64_t val, MemOpIdx oi, uintptr_t ra,
888
+ FullStoreHelper *full_store)
889
{
890
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
891
-
892
trace_guest_st_before_exec(env_cpu(env), addr, oi);
893
-
894
- store_helper(env, addr, val, oi, retaddr, op);
895
-
896
+ full_store(env, addr, val, oi, ra);
897
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
898
}
899
900
-void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
901
- int mmu_idx, uintptr_t retaddr)
902
+void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
903
+ MemOpIdx oi, uintptr_t retaddr)
904
{
905
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
906
+ cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
907
}
908
909
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
910
- int mmu_idx, uintptr_t retaddr)
911
+void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
912
+ MemOpIdx oi, uintptr_t retaddr)
913
{
914
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
915
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
916
}
917
918
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
919
- int mmu_idx, uintptr_t retaddr)
920
+void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
921
+ MemOpIdx oi, uintptr_t retaddr)
922
{
923
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
924
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
925
}
926
927
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
928
- int mmu_idx, uintptr_t retaddr)
929
+void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
930
+ MemOpIdx oi, uintptr_t retaddr)
931
{
932
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
933
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
934
}
935
936
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
937
- int mmu_idx, uintptr_t retaddr)
938
+void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
939
+ MemOpIdx oi, uintptr_t retaddr)
940
{
941
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
942
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
943
}
944
945
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
946
- int mmu_idx, uintptr_t retaddr)
947
+void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
948
+ MemOpIdx oi, uintptr_t retaddr)
949
{
950
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
951
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
952
}
953
954
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
955
- int mmu_idx, uintptr_t retaddr)
956
+void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
957
+ MemOpIdx oi, uintptr_t retaddr)
958
{
959
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
960
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
961
}
962
963
-void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
964
- uint32_t val, uintptr_t retaddr)
965
-{
966
- cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
967
-}
968
-
969
-void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
970
- uint32_t val, uintptr_t retaddr)
971
-{
972
- cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
973
-}
974
-
975
-void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
976
- uint32_t val, uintptr_t retaddr)
977
-{
978
- cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
979
-}
980
-
981
-void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
982
- uint64_t val, uintptr_t retaddr)
983
-{
984
- cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
985
-}
986
-
987
-void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
988
- uint32_t val, uintptr_t retaddr)
989
-{
990
- cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
991
-}
992
-
993
-void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
994
- uint32_t val, uintptr_t retaddr)
995
-{
996
- cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
997
-}
998
-
999
-void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
1000
- uint64_t val, uintptr_t retaddr)
1001
-{
1002
- cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
1003
-}
1004
-
1005
-void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1006
-{
1007
- cpu_stb_data_ra(env, ptr, val, 0);
1008
-}
1009
-
1010
-void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1011
-{
1012
- cpu_stw_be_data_ra(env, ptr, val, 0);
1013
-}
1014
-
1015
-void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1016
-{
1017
- cpu_stl_be_data_ra(env, ptr, val, 0);
1018
-}
1019
-
1020
-void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1021
-{
1022
- cpu_stq_be_data_ra(env, ptr, val, 0);
1023
-}
1024
-
1025
-void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1026
-{
1027
- cpu_stw_le_data_ra(env, ptr, val, 0);
1028
-}
1029
-
1030
-void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1031
-{
1032
- cpu_stl_le_data_ra(env, ptr, val, 0);
1033
-}
1034
-
1035
-void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1036
-{
1037
- cpu_stq_le_data_ra(env, ptr, val, 0);
1038
-}
1039
+#include "ldst_common.c.inc"
1040
1041
/*
1042
* First set of functions passes in OI and RETADDR.
1043
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
1044
index XXXXXXX..XXXXXXX 100644
1045
--- a/accel/tcg/user-exec.c
1046
+++ b/accel/tcg/user-exec.c
1047
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
1048
1049
/* The softmmu versions of these helpers are in cputlb.c. */
1050
1051
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
1052
+/*
1053
+ * Verify that we have passed the correct MemOp to the correct function.
1054
+ *
1055
+ * We could present one function to target code, and dispatch based on
1056
+ * the MemOp, but so far we have worked hard to avoid an indirect function
1057
+ * call along the memory path.
1058
+ */
1059
+static void validate_memop(MemOpIdx oi, MemOp expected)
1060
{
1061
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1062
- uint32_t ret;
1063
+#ifdef CONFIG_DEBUG_TCG
1064
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1065
+ assert(have == expected);
1066
+#endif
1067
+}
1068
1069
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1070
- ret = ldub_p(g2h(env_cpu(env), ptr));
1071
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1072
+static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
1073
+ MemOpIdx oi, uintptr_t ra, MMUAccessType type)
1074
+{
1075
+ void *ret;
1076
+
1077
+ /* TODO: Enforce guest required alignment. */
1078
+
1079
+ ret = g2h(env_cpu(env), addr);
1080
+ set_helper_retaddr(ra);
1081
return ret;
1082
}
1083
1084
-int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
1085
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
1086
+ MemOpIdx oi, uintptr_t ra)
1087
{
1088
- return (int8_t)cpu_ldub_data(env, ptr);
1089
-}
1090
+ void *haddr;
1091
+ uint8_t ret;
1092
1093
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
1094
-{
1095
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1096
- uint32_t ret;
1097
-
1098
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1099
- ret = lduw_be_p(g2h(env_cpu(env), ptr));
1100
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1101
+ validate_memop(oi, MO_UB);
1102
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1103
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1104
+ ret = ldub_p(haddr);
1105
+ clear_helper_retaddr();
1106
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1107
return ret;
1108
}
1109
1110
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
1111
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
1112
+ MemOpIdx oi, uintptr_t ra)
1113
{
1114
- return (int16_t)cpu_lduw_be_data(env, ptr);
1115
-}
1116
+ void *haddr;
1117
+ uint16_t ret;
1118
1119
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
1120
-{
1121
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1122
- uint32_t ret;
1123
-
1124
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1125
- ret = ldl_be_p(g2h(env_cpu(env), ptr));
1126
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1127
+ validate_memop(oi, MO_BEUW);
1128
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1129
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1130
+ ret = lduw_be_p(haddr);
1131
+ clear_helper_retaddr();
1132
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1133
return ret;
1134
}
1135
1136
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
1137
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
1138
+ MemOpIdx oi, uintptr_t ra)
1139
{
1140
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1141
+ void *haddr;
1142
+ uint32_t ret;
1143
+
1144
+ validate_memop(oi, MO_BEUL);
1145
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1146
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1147
+ ret = ldl_be_p(haddr);
1148
+ clear_helper_retaddr();
1149
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1150
+ return ret;
1151
+}
1152
+
1153
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
1154
+ MemOpIdx oi, uintptr_t ra)
1155
+{
1156
+ void *haddr;
1157
uint64_t ret;
1158
1159
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1160
- ret = ldq_be_p(g2h(env_cpu(env), ptr));
1161
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1162
+ validate_memop(oi, MO_BEQ);
1163
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1164
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1165
+ ret = ldq_be_p(haddr);
1166
+ clear_helper_retaddr();
1167
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1168
return ret;
1169
}
1170
1171
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
1172
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
1173
+ MemOpIdx oi, uintptr_t ra)
1174
{
1175
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1176
+ void *haddr;
1177
+ uint16_t ret;
1178
+
1179
+ validate_memop(oi, MO_LEUW);
1180
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1181
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1182
+ ret = lduw_le_p(haddr);
1183
+ clear_helper_retaddr();
1184
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1185
+ return ret;
1186
+}
1187
+
1188
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
1189
+ MemOpIdx oi, uintptr_t ra)
1190
+{
1191
+ void *haddr;
1192
uint32_t ret;
1193
1194
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1195
- ret = lduw_le_p(g2h(env_cpu(env), ptr));
1196
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1197
+ validate_memop(oi, MO_LEUL);
1198
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1199
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1200
+ ret = ldl_le_p(haddr);
1201
+ clear_helper_retaddr();
1202
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1203
return ret;
1204
}
1205
1206
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
1207
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
1208
+ MemOpIdx oi, uintptr_t ra)
1209
{
1210
- return (int16_t)cpu_lduw_le_data(env, ptr);
1211
-}
1212
-
1213
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
1214
-{
1215
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1216
- uint32_t ret;
1217
-
1218
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1219
- ret = ldl_le_p(g2h(env_cpu(env), ptr));
1220
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1221
- return ret;
1222
-}
1223
-
1224
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
1225
-{
1226
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1227
+ void *haddr;
1228
uint64_t ret;
1229
1230
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1231
- ret = ldq_le_p(g2h(env_cpu(env), ptr));
1232
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1233
+ validate_memop(oi, MO_LEQ);
1234
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1235
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1236
+ ret = ldq_le_p(haddr);
1237
+ clear_helper_retaddr();
1238
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1239
return ret;
1240
}
1241
1242
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1243
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1244
+ MemOpIdx oi, uintptr_t ra)
1245
{
1246
- uint32_t ret;
1247
+ void *haddr;
1248
1249
- set_helper_retaddr(retaddr);
1250
- ret = cpu_ldub_data(env, ptr);
1251
+ validate_memop(oi, MO_UB);
1252
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1253
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1254
+ stb_p(haddr, val);
1255
clear_helper_retaddr();
1256
- return ret;
1257
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1258
}
1259
1260
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1261
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1262
+ MemOpIdx oi, uintptr_t ra)
1263
{
1264
- return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr);
1265
-}
1266
+ void *haddr;
1267
1268
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1269
-{
1270
- uint32_t ret;
1271
-
1272
- set_helper_retaddr(retaddr);
1273
- ret = cpu_lduw_be_data(env, ptr);
1274
+ validate_memop(oi, MO_BEUW);
1275
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1276
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1277
+ stw_be_p(haddr, val);
1278
clear_helper_retaddr();
1279
- return ret;
1280
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1281
}
1282
1283
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1284
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1285
+ MemOpIdx oi, uintptr_t ra)
1286
{
1287
- return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr);
1288
-}
1289
+ void *haddr;
1290
1291
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1292
-{
1293
- uint32_t ret;
1294
-
1295
- set_helper_retaddr(retaddr);
1296
- ret = cpu_ldl_be_data(env, ptr);
1297
+ validate_memop(oi, MO_BEUL);
1298
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1299
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1300
+ stl_be_p(haddr, val);
1301
clear_helper_retaddr();
1302
- return ret;
1303
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1304
}
1305
1306
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1307
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1308
+ MemOpIdx oi, uintptr_t ra)
1309
{
1310
- uint64_t ret;
1311
+ void *haddr;
1312
1313
- set_helper_retaddr(retaddr);
1314
- ret = cpu_ldq_be_data(env, ptr);
1315
+ validate_memop(oi, MO_BEQ);
1316
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1317
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1318
+ stq_be_p(haddr, val);
1319
clear_helper_retaddr();
1320
- return ret;
1321
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1322
}
1323
1324
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1325
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1326
+ MemOpIdx oi, uintptr_t ra)
1327
{
1328
- uint32_t ret;
1329
+ void *haddr;
1330
1331
- set_helper_retaddr(retaddr);
1332
- ret = cpu_lduw_le_data(env, ptr);
1333
+ validate_memop(oi, MO_LEUW);
1334
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1335
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1336
+ stw_le_p(haddr, val);
1337
clear_helper_retaddr();
1338
- return ret;
1339
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1340
}
1341
1342
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1343
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1344
+ MemOpIdx oi, uintptr_t ra)
1345
{
1346
- return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr);
1347
-}
1348
+ void *haddr;
1349
1350
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1351
-{
1352
- uint32_t ret;
1353
-
1354
- set_helper_retaddr(retaddr);
1355
- ret = cpu_ldl_le_data(env, ptr);
1356
+ validate_memop(oi, MO_LEUL);
1357
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1358
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1359
+ stl_le_p(haddr, val);
1360
clear_helper_retaddr();
1361
- return ret;
1362
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1363
}
1364
1365
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1366
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1367
+ MemOpIdx oi, uintptr_t ra)
1368
{
1369
- uint64_t ret;
1370
+ void *haddr;
1371
1372
- set_helper_retaddr(retaddr);
1373
- ret = cpu_ldq_le_data(env, ptr);
1374
- clear_helper_retaddr();
1375
- return ret;
1376
-}
1377
-
1378
-void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1379
-{
1380
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1381
-
1382
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1383
- stb_p(g2h(env_cpu(env), ptr), val);
1384
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1385
-}
1386
-
1387
-void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1388
-{
1389
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1390
-
1391
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1392
- stw_be_p(g2h(env_cpu(env), ptr), val);
1393
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1394
-}
1395
-
1396
-void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1397
-{
1398
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1399
-
1400
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1401
- stl_be_p(g2h(env_cpu(env), ptr), val);
1402
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1403
-}
1404
-
1405
-void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1406
-{
1407
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1408
-
1409
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1410
- stq_be_p(g2h(env_cpu(env), ptr), val);
1411
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1412
-}
1413
-
1414
-void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1415
-{
1416
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1417
-
1418
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1419
- stw_le_p(g2h(env_cpu(env), ptr), val);
1420
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1421
-}
1422
-
1423
-void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1424
-{
1425
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1426
-
1427
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1428
- stl_le_p(g2h(env_cpu(env), ptr), val);
1429
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1430
-}
1431
-
1432
-void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1433
-{
1434
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1435
-
1436
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1437
- stq_le_p(g2h(env_cpu(env), ptr), val);
1438
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1439
-}
1440
-
1441
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1442
- uint32_t val, uintptr_t retaddr)
1443
-{
1444
- set_helper_retaddr(retaddr);
1445
- cpu_stb_data(env, ptr, val);
1446
- clear_helper_retaddr();
1447
-}
1448
-
1449
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1450
- uint32_t val, uintptr_t retaddr)
1451
-{
1452
- set_helper_retaddr(retaddr);
1453
- cpu_stw_be_data(env, ptr, val);
1454
- clear_helper_retaddr();
1455
-}
1456
-
1457
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1458
- uint32_t val, uintptr_t retaddr)
1459
-{
1460
- set_helper_retaddr(retaddr);
1461
- cpu_stl_be_data(env, ptr, val);
1462
- clear_helper_retaddr();
1463
-}
1464
-
1465
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1466
- uint64_t val, uintptr_t retaddr)
1467
-{
1468
- set_helper_retaddr(retaddr);
1469
- cpu_stq_be_data(env, ptr, val);
1470
- clear_helper_retaddr();
1471
-}
1472
-
1473
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1474
- uint32_t val, uintptr_t retaddr)
1475
-{
1476
- set_helper_retaddr(retaddr);
1477
- cpu_stw_le_data(env, ptr, val);
1478
- clear_helper_retaddr();
1479
-}
1480
-
1481
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1482
- uint32_t val, uintptr_t retaddr)
1483
-{
1484
- set_helper_retaddr(retaddr);
1485
- cpu_stl_le_data(env, ptr, val);
1486
- clear_helper_retaddr();
1487
-}
1488
-
1489
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1490
- uint64_t val, uintptr_t retaddr)
1491
-{
1492
- set_helper_retaddr(retaddr);
1493
- cpu_stq_le_data(env, ptr, val);
1494
+ validate_memop(oi, MO_LEQ);
1495
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1496
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1497
+ stq_le_p(haddr, val);
1498
clear_helper_retaddr();
1499
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1500
}
1501
1502
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1503
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1504
return ret;
1505
}
1506
1507
+#include "ldst_common.c.inc"
1508
+
1509
/*
1510
* Do not allow unaligned operations to proceed. Return the host address.
1511
*
1512
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
1513
new file mode 100644
1514
index XXXXXXX..XXXXXXX
1515
--- /dev/null
1516
+++ b/accel/tcg/ldst_common.c.inc
1517
@@ -XXX,XX +XXX,XX @@
1518
+/*
1519
+ * Routines common to user and system emulation of load/store.
1520
+ *
1521
+ * Copyright (c) 2003 Fabrice Bellard
1522
+ *
1523
+ * SPDX-License-Identifier: GPL-2.0-or-later
1524
+ *
1525
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1526
+ * See the COPYING file in the top-level directory.
1527
+ */
1528
+
1529
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1530
+ int mmu_idx, uintptr_t ra)
1531
+{
1532
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1533
+ return cpu_ldb_mmu(env, addr, oi, ra);
1534
+}
1535
+
1536
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1537
+ int mmu_idx, uintptr_t ra)
1538
+{
1539
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
1540
+}
1541
+
1542
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1543
+ int mmu_idx, uintptr_t ra)
1544
+{
1545
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1546
+ return cpu_ldw_be_mmu(env, addr, oi, ra);
1547
+}
1548
+
1549
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1550
+ int mmu_idx, uintptr_t ra)
1551
+{
1552
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
1553
+}
1554
+
1555
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1556
+ int mmu_idx, uintptr_t ra)
1557
+{
1558
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1559
+ return cpu_ldl_be_mmu(env, addr, oi, ra);
1560
+}
1561
+
1562
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1563
+ int mmu_idx, uintptr_t ra)
1564
+{
1565
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1566
+ return cpu_ldq_be_mmu(env, addr, oi, ra);
1567
+}
1568
+
1569
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1570
+ int mmu_idx, uintptr_t ra)
1571
+{
1572
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1573
+ return cpu_ldw_le_mmu(env, addr, oi, ra);
1574
+}
1575
+
1576
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1577
+ int mmu_idx, uintptr_t ra)
1578
+{
1579
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
1580
+}
1581
+
1582
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1583
+ int mmu_idx, uintptr_t ra)
1584
+{
1585
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1586
+ return cpu_ldl_le_mmu(env, addr, oi, ra);
1587
+}
1588
+
1589
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1590
+ int mmu_idx, uintptr_t ra)
1591
+{
1592
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1593
+ return cpu_ldq_le_mmu(env, addr, oi, ra);
1594
+}
1595
+
1596
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1597
+ int mmu_idx, uintptr_t ra)
1598
+{
1599
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1600
+ cpu_stb_mmu(env, addr, val, oi, ra);
1601
+}
1602
+
1603
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1604
+ int mmu_idx, uintptr_t ra)
1605
+{
1606
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1607
+ cpu_stw_be_mmu(env, addr, val, oi, ra);
1608
+}
1609
+
1610
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1611
+ int mmu_idx, uintptr_t ra)
1612
+{
1613
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1614
+ cpu_stl_be_mmu(env, addr, val, oi, ra);
1615
+}
1616
+
1617
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1618
+ int mmu_idx, uintptr_t ra)
1619
+{
1620
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1621
+ cpu_stq_be_mmu(env, addr, val, oi, ra);
1622
+}
1623
+
1624
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1625
+ int mmu_idx, uintptr_t ra)
1626
+{
1627
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1628
+ cpu_stw_le_mmu(env, addr, val, oi, ra);
1629
+}
1630
+
1631
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1632
+ int mmu_idx, uintptr_t ra)
1633
+{
1634
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1635
+ cpu_stl_le_mmu(env, addr, val, oi, ra);
1636
+}
1637
+
1638
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1639
+ int mmu_idx, uintptr_t ra)
1640
+{
1641
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1642
+ cpu_stq_le_mmu(env, addr, val, oi, ra);
1643
+}
1644
+
1645
+/*--------------------------*/
1646
+
1647
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1648
+{
1649
+ return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1650
+}
1651
+
1652
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1653
+{
1654
+ return (int8_t)cpu_ldub_data_ra(env, addr, ra);
1655
+}
1656
+
1657
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1658
+{
1659
+ return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1660
+}
1661
+
1662
+int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1663
+{
1664
+ return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
1665
+}
1666
+
1667
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1668
+{
1669
+ return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1670
+}
1671
+
1672
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1673
+{
1674
+ return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1675
+}
1676
+
1677
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1678
+{
1679
+ return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1680
+}
1681
+
1682
+int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1683
+{
1684
+ return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
1685
+}
1686
+
1687
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1688
+{
1689
+ return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1690
+}
1691
+
1692
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1693
+{
1694
+ return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1695
+}
1696
+
1697
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
1698
+ uint32_t val, uintptr_t ra)
1699
+{
1700
+ cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1701
+}
1702
+
1703
+void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
1704
+ uint32_t val, uintptr_t ra)
1705
+{
1706
+ cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1707
+}
1708
+
1709
+void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
1710
+ uint32_t val, uintptr_t ra)
1711
+{
1712
+ cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1713
+}
1714
+
1715
+void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
1716
+ uint64_t val, uintptr_t ra)
1717
+{
1718
+ cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1719
+}
1720
+
1721
+void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
1722
+ uint32_t val, uintptr_t ra)
1723
+{
1724
+ cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1725
+}
1726
+
1727
+void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
1728
+ uint32_t val, uintptr_t ra)
1729
+{
1730
+ cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1731
+}
1732
+
1733
+void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
1734
+ uint64_t val, uintptr_t ra)
1735
+{
1736
+ cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1737
+}
1738
+
1739
+/*--------------------------*/
1740
+
1741
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
1742
+{
1743
+ return cpu_ldub_data_ra(env, addr, 0);
1744
+}
1745
+
1746
+int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
1747
+{
1748
+ return (int8_t)cpu_ldub_data(env, addr);
1749
+}
1750
+
1751
+uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
1752
+{
1753
+ return cpu_lduw_be_data_ra(env, addr, 0);
1754
+}
1755
+
1756
+int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
1757
+{
1758
+ return (int16_t)cpu_lduw_be_data(env, addr);
1759
+}
1760
+
1761
+uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
1762
+{
1763
+ return cpu_ldl_be_data_ra(env, addr, 0);
1764
+}
1765
+
1766
+uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
1767
+{
1768
+ return cpu_ldq_be_data_ra(env, addr, 0);
1769
+}
1770
+
1771
+uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
1772
+{
1773
+ return cpu_lduw_le_data_ra(env, addr, 0);
1774
+}
1775
+
1776
+int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
1777
+{
1778
+ return (int16_t)cpu_lduw_le_data(env, addr);
1779
+}
1780
+
1781
+uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
1782
+{
1783
+ return cpu_ldl_le_data_ra(env, addr, 0);
1784
+}
1785
+
1786
+uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
1787
+{
1788
+ return cpu_ldq_le_data_ra(env, addr, 0);
1789
+}
1790
+
1791
+void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1792
+{
1793
+ cpu_stb_data_ra(env, addr, val, 0);
1794
+}
1795
+
1796
+void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1797
+{
1798
+ cpu_stw_be_data_ra(env, addr, val, 0);
1799
+}
1800
+
1801
+void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1802
+{
1803
+ cpu_stl_be_data_ra(env, addr, val, 0);
1804
+}
1805
+
1806
+void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1807
+{
1808
+ cpu_stq_be_data_ra(env, addr, val, 0);
1809
+}
1810
+
1811
+void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1812
+{
1813
+ cpu_stw_le_data_ra(env, addr, val, 0);
1814
+}
1815
+
1816
+void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1817
+{
1818
+ cpu_stl_le_data_ra(env, addr, val, 0);
1819
+}
1820
+
1821
+void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1822
+{
1823
+ cpu_stq_le_data_ra(env, addr, val, 0);
1824
+}
1825
--
31
--
1826
2.25.1
32
2.25.1
1827
1828
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
The operands are output in the wrong order: the tcg selector
2
argument is first, whereas the s390x selector argument is last.
3
4
Tested-by: Thomas Huth <thuth@redhat.com>
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/898
6
Fixes: 9bca986df88 ("tcg/s390x: Implement TCG_TARGET_HAS_bitsel_vec")
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
target/i386/tcg/mem_helper.c | 2 +-
9
tcg/s390x/tcg-target.c.inc | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
1 file changed, 1 insertion(+), 1 deletion(-)
6
11
7
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
12
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/target/i386/tcg/mem_helper.c
14
--- a/tcg/s390x/tcg-target.c.inc
10
+++ b/target/i386/tcg/mem_helper.c
15
+++ b/tcg/s390x/tcg-target.c.inc
11
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
12
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
17
break;
13
18
14
int mem_idx = cpu_mmu_index(env, false);
19
case INDEX_op_bitsel_vec:
15
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
20
- tcg_out_insn(s, VRRe, VSEL, a0, a1, a2, args[3]);
16
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
21
+ tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
17
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
22
break;
18
23
19
if (int128_eq(oldv, cmpv)) {
24
case INDEX_op_cmp_vec:
20
--
25
--
21
2.25.1
26
2.25.1
22
23
diff view generated by jsdifflib
1
Cc: qemu-arm@nongnu.org
1
We copied the data from the general register input to the
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
vector register output, but have not yet replicated it.
3
We intended to fall through into the vector-vector case,
4
but failed to redirect the input register.
5
6
This is caught by an assertion failure in tcg_out_insn_VRIc,
7
which diagnosed the incorrect register class.
8
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
10
---
5
target/arm/helper-a64.c | 8 ++++----
11
tcg/s390x/tcg-target.c.inc | 1 +
6
1 file changed, 4 insertions(+), 4 deletions(-)
12
1 file changed, 1 insertion(+)
7
13
8
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
14
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/helper-a64.c
16
--- a/tcg/s390x/tcg-target.c.inc
11
+++ b/target/arm/helper-a64.c
17
+++ b/tcg/s390x/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
18
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
13
assert(HAVE_CMPXCHG128);
19
if (vece == MO_64) {
14
20
return true;
15
mem_idx = cpu_mmu_index(env, false);
21
}
16
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
22
+ src = dst;
17
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
23
}
18
19
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
20
newv = int128_make128(new_lo, new_hi);
21
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
22
assert(HAVE_CMPXCHG128);
23
24
mem_idx = cpu_mmu_index(env, false);
25
- oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
26
+ oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx);
27
24
28
/*
25
/*
29
* High and low need to be switched here because this is not actually a
30
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
31
assert(HAVE_CMPXCHG128);
32
33
mem_idx = cpu_mmu_index(env, false);
34
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
35
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
36
37
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
38
newv = int128_make128(new_lo, new_hi);
39
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
40
assert(HAVE_CMPXCHG128);
41
42
mem_idx = cpu_mmu_index(env, false);
43
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
44
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
45
46
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
47
newv = int128_make128(new_lo, new_hi);
48
--
26
--
49
2.25.1
27
2.25.1
50
51
diff view generated by jsdifflib
Deleted patch
1
Cc: qemu-ppc@nongnu.org
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/ppc/translate.c | 12 +++++++-----
6
1 file changed, 7 insertions(+), 5 deletions(-)
7
1
8
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/ppc/translate.c
11
+++ b/target/ppc/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_std(DisasContext *ctx)
13
if (HAVE_ATOMIC128) {
14
TCGv_i32 oi = tcg_temp_new_i32();
15
if (ctx->le_mode) {
16
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
17
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
18
+ ctx->mem_idx));
19
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
20
} else {
21
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
22
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
23
+ ctx->mem_idx));
24
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
25
}
26
tcg_temp_free_i32(oi);
27
@@ -XXX,XX +XXX,XX @@ static void gen_lqarx(DisasContext *ctx)
28
if (HAVE_ATOMIC128) {
29
TCGv_i32 oi = tcg_temp_new_i32();
30
if (ctx->le_mode) {
31
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
32
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
33
ctx->mem_idx));
34
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
35
} else {
36
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
37
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
38
ctx->mem_idx));
39
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
40
}
41
@@ -XXX,XX +XXX,XX @@ static void gen_stqcx_(DisasContext *ctx)
42
43
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
44
if (HAVE_CMPXCHG128) {
45
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
46
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
47
if (ctx->le_mode) {
48
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
49
EA, lo, hi, oi);
50
--
51
2.25.1
52
53
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/s390x/tcg/mem_helper.c | 4 ++--
6
1 file changed, 2 insertions(+), 2 deletions(-)
7
1
8
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/s390x/tcg/mem_helper.c
11
+++ b/target/s390x/tcg/mem_helper.c
12
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
13
assert(HAVE_CMPXCHG128);
14
15
mem_idx = cpu_mmu_index(env, false);
16
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
17
+ oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
18
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
19
fail = !int128_eq(oldv, cmpv);
20
21
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
22
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
23
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
24
} else if (HAVE_CMPXCHG128) {
25
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
26
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
27
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
28
cc = !int128_eq(ov, cv);
29
} else {
30
--
31
2.25.1
32
33
diff view generated by jsdifflib
Deleted patch
1
The function is trivial for user-only, but still must be present.
2
1
3
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/cpu.h | 9 +++++++++
8
1 file changed, 9 insertions(+)
9
10
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/cpu.h
13
+++ b/target/hexagon/cpu.h
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
15
#endif
16
}
17
18
+static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
19
+{
20
+#ifdef CONFIG_USER_ONLY
21
+ return MMU_USER_IDX;
22
+#else
23
+#error System mode not supported on Hexagon yet
24
+#endif
25
+}
26
+
27
typedef struct CPUHexagonState CPUArchState;
28
typedef HexagonCPU ArchCPU;
29
30
--
31
2.25.1
32
33
diff view generated by jsdifflib
Deleted patch
1
The previous placement in tcg/tcg.h was not logical.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/exec/cpu_ldst.h | 87 +++++++++++++++++++++++++++++++++++
7
include/tcg/tcg.h | 87 -----------------------------------
8
target/arm/helper-a64.c | 1 -
9
target/m68k/op_helper.c | 1 -
10
target/ppc/mem_helper.c | 1 -
11
target/s390x/tcg/mem_helper.c | 1 -
12
6 files changed, 87 insertions(+), 91 deletions(-)
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@
19
#define CPU_LDST_H
20
21
#include "exec/memopidx.h"
22
+#include "qemu/int128.h"
23
24
#if defined(CONFIG_USER_ONLY)
25
/* sparc32plus has 64bit long but 32bit space address
26
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
27
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
28
MemOpIdx oi, uintptr_t ra);
29
30
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
31
+ uint32_t cmpv, uint32_t newv,
32
+ MemOpIdx oi, uintptr_t retaddr);
33
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
34
+ uint32_t cmpv, uint32_t newv,
35
+ MemOpIdx oi, uintptr_t retaddr);
36
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
37
+ uint32_t cmpv, uint32_t newv,
38
+ MemOpIdx oi, uintptr_t retaddr);
39
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
40
+ uint64_t cmpv, uint64_t newv,
41
+ MemOpIdx oi, uintptr_t retaddr);
42
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
43
+ uint32_t cmpv, uint32_t newv,
44
+ MemOpIdx oi, uintptr_t retaddr);
45
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
46
+ uint32_t cmpv, uint32_t newv,
47
+ MemOpIdx oi, uintptr_t retaddr);
48
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
49
+ uint64_t cmpv, uint64_t newv,
50
+ MemOpIdx oi, uintptr_t retaddr);
51
+
52
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
53
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
54
+ (CPUArchState *env, target_ulong addr, TYPE val, \
55
+ MemOpIdx oi, uintptr_t retaddr);
56
+
57
+#ifdef CONFIG_ATOMIC64
58
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
59
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
60
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
61
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
62
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
63
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
64
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
65
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
66
+#else
67
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
68
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
69
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
70
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
71
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
72
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
73
+#endif
74
+
75
+GEN_ATOMIC_HELPER_ALL(fetch_add)
76
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
77
+GEN_ATOMIC_HELPER_ALL(fetch_and)
78
+GEN_ATOMIC_HELPER_ALL(fetch_or)
79
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
80
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
81
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
82
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
83
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
84
+
85
+GEN_ATOMIC_HELPER_ALL(add_fetch)
86
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
87
+GEN_ATOMIC_HELPER_ALL(and_fetch)
88
+GEN_ATOMIC_HELPER_ALL(or_fetch)
89
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
90
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
91
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
92
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
93
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
94
+
95
+GEN_ATOMIC_HELPER_ALL(xchg)
96
+
97
+#undef GEN_ATOMIC_HELPER_ALL
98
+#undef GEN_ATOMIC_HELPER
99
+
100
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
101
+ Int128 cmpv, Int128 newv,
102
+ MemOpIdx oi, uintptr_t retaddr);
103
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
104
+ Int128 cmpv, Int128 newv,
105
+ MemOpIdx oi, uintptr_t retaddr);
106
+
107
+Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
108
+ MemOpIdx oi, uintptr_t retaddr);
109
+Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
110
+ MemOpIdx oi, uintptr_t retaddr);
111
+void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
112
+ MemOpIdx oi, uintptr_t retaddr);
113
+void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
114
+ MemOpIdx oi, uintptr_t retaddr);
115
+
116
#if defined(CONFIG_USER_ONLY)
117
118
extern __thread uintptr_t helper_retaddr;
119
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
120
index XXXXXXX..XXXXXXX 100644
121
--- a/include/tcg/tcg.h
122
+++ b/include/tcg/tcg.h
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/queue.h"
125
#include "tcg/tcg-mo.h"
126
#include "tcg-target.h"
127
-#include "qemu/int128.h"
128
#include "tcg/tcg-cond.h"
129
130
/* XXX: make safe guess about sizes */
131
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
132
#endif
133
#endif /* CONFIG_SOFTMMU */
134
135
-uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
136
- uint32_t cmpv, uint32_t newv,
137
- MemOpIdx oi, uintptr_t retaddr);
138
-uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
139
- uint32_t cmpv, uint32_t newv,
140
- MemOpIdx oi, uintptr_t retaddr);
141
-uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
142
- uint32_t cmpv, uint32_t newv,
143
- MemOpIdx oi, uintptr_t retaddr);
144
-uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
145
- uint64_t cmpv, uint64_t newv,
146
- MemOpIdx oi, uintptr_t retaddr);
147
-uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
148
- uint32_t cmpv, uint32_t newv,
149
- MemOpIdx oi, uintptr_t retaddr);
150
-uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
151
- uint32_t cmpv, uint32_t newv,
152
- MemOpIdx oi, uintptr_t retaddr);
153
-uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
154
- uint64_t cmpv, uint64_t newv,
155
- MemOpIdx oi, uintptr_t retaddr);
156
-
157
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
158
-TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
159
- (CPUArchState *env, target_ulong addr, TYPE val, \
160
- MemOpIdx oi, uintptr_t retaddr);
161
-
162
-#ifdef CONFIG_ATOMIC64
163
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
164
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
165
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
166
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
167
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
168
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
169
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
170
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
171
-#else
172
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
173
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
174
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
175
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
176
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
177
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
178
-#endif
179
-
180
-GEN_ATOMIC_HELPER_ALL(fetch_add)
181
-GEN_ATOMIC_HELPER_ALL(fetch_sub)
182
-GEN_ATOMIC_HELPER_ALL(fetch_and)
183
-GEN_ATOMIC_HELPER_ALL(fetch_or)
184
-GEN_ATOMIC_HELPER_ALL(fetch_xor)
185
-GEN_ATOMIC_HELPER_ALL(fetch_smin)
186
-GEN_ATOMIC_HELPER_ALL(fetch_umin)
187
-GEN_ATOMIC_HELPER_ALL(fetch_smax)
188
-GEN_ATOMIC_HELPER_ALL(fetch_umax)
189
-
190
-GEN_ATOMIC_HELPER_ALL(add_fetch)
191
-GEN_ATOMIC_HELPER_ALL(sub_fetch)
192
-GEN_ATOMIC_HELPER_ALL(and_fetch)
193
-GEN_ATOMIC_HELPER_ALL(or_fetch)
194
-GEN_ATOMIC_HELPER_ALL(xor_fetch)
195
-GEN_ATOMIC_HELPER_ALL(smin_fetch)
196
-GEN_ATOMIC_HELPER_ALL(umin_fetch)
197
-GEN_ATOMIC_HELPER_ALL(smax_fetch)
198
-GEN_ATOMIC_HELPER_ALL(umax_fetch)
199
-
200
-GEN_ATOMIC_HELPER_ALL(xchg)
201
-
202
-#undef GEN_ATOMIC_HELPER_ALL
203
-#undef GEN_ATOMIC_HELPER
204
-
205
-Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
206
- Int128 cmpv, Int128 newv,
207
- MemOpIdx oi, uintptr_t retaddr);
208
-Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
209
- Int128 cmpv, Int128 newv,
210
- MemOpIdx oi, uintptr_t retaddr);
211
-
212
-Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
213
- MemOpIdx oi, uintptr_t retaddr);
214
-Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
215
- MemOpIdx oi, uintptr_t retaddr);
216
-void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
217
- MemOpIdx oi, uintptr_t retaddr);
218
-void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
219
- MemOpIdx oi, uintptr_t retaddr);
220
-
221
#ifdef CONFIG_DEBUG_TCG
222
void tcg_assert_listed_vecop(TCGOpcode);
223
#else
224
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/target/arm/helper-a64.c
227
+++ b/target/arm/helper-a64.c
228
@@ -XXX,XX +XXX,XX @@
229
#include "exec/cpu_ldst.h"
230
#include "qemu/int128.h"
231
#include "qemu/atomic128.h"
232
-#include "tcg/tcg.h"
233
#include "fpu/softfloat.h"
234
#include <zlib.h> /* For crc32 */
235
236
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/target/m68k/op_helper.c
239
+++ b/target/m68k/op_helper.c
240
@@ -XXX,XX +XXX,XX @@
241
#include "exec/exec-all.h"
242
#include "exec/cpu_ldst.h"
243
#include "semihosting/semihost.h"
244
-#include "tcg/tcg.h"
245
246
#if !defined(CONFIG_USER_ONLY)
247
248
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
249
index XXXXXXX..XXXXXXX 100644
250
--- a/target/ppc/mem_helper.c
251
+++ b/target/ppc/mem_helper.c
252
@@ -XXX,XX +XXX,XX @@
253
#include "exec/helper-proto.h"
254
#include "helper_regs.h"
255
#include "exec/cpu_ldst.h"
256
-#include "tcg/tcg.h"
257
#include "internal.h"
258
#include "qemu/atomic128.h"
259
260
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
261
index XXXXXXX..XXXXXXX 100644
262
--- a/target/s390x/tcg/mem_helper.c
263
+++ b/target/s390x/tcg/mem_helper.c
264
@@ -XXX,XX +XXX,XX @@
265
#include "exec/cpu_ldst.h"
266
#include "qemu/int128.h"
267
#include "qemu/atomic128.h"
268
-#include "tcg/tcg.h"
269
#include "trace.h"
270
271
#if !defined(CONFIG_USER_ONLY)
272
--
273
2.25.1
274
275
diff view generated by jsdifflib
Deleted patch
1
We should not have been using the helper_ret_* set of
2
functions, as they are supposed to be private to tcg.
3
Nor should we have been using the plain cpu_*_data set
4
of functions, as they do not handle unwinding properly.
5
1
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/mips/tcg/msa_helper.c | 420 +++++++++++------------------------
10
1 file changed, 135 insertions(+), 285 deletions(-)
11
12
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/mips/tcg/msa_helper.c
15
+++ b/target/mips/tcg/msa_helper.c
16
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
17
target_ulong addr)
18
{
19
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
20
- MEMOP_IDX(DF_BYTE)
21
-#if !defined(CONFIG_USER_ONLY)
22
+ uintptr_t ra = GETPC();
23
+
24
#if !defined(HOST_WORDS_BIGENDIAN)
25
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
26
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
27
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
28
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
29
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
30
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
31
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
32
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
33
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
34
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
35
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
36
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
37
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
38
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
39
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
40
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
41
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
42
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
43
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
44
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
45
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
46
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
47
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
48
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
49
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
50
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
51
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
52
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
53
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
54
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
55
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
56
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
57
#else
58
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
59
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
60
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
61
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
62
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
63
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
64
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
65
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
66
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
67
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
68
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
69
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
70
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
71
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
72
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
73
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
74
-#endif
75
-#else
76
-#if !defined(HOST_WORDS_BIGENDIAN)
77
- pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
78
- pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
79
- pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
80
- pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
81
- pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
82
- pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
83
- pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
84
- pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
85
- pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
86
- pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
87
- pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
88
- pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
89
- pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
90
- pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
91
- pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
92
- pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
93
-#else
94
- pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
95
- pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
96
- pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
97
- pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
98
- pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
99
- pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
100
- pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
101
- pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
102
- pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
103
- pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
104
- pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
105
- pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
106
- pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
107
- pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
108
- pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
109
- pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
110
-#endif
111
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
112
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
113
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
114
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
115
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
116
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
117
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
118
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
119
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
120
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
121
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
122
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
123
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
124
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
125
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
126
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
127
#endif
128
}
129
130
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
131
target_ulong addr)
132
{
133
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
134
- MEMOP_IDX(DF_HALF)
135
-#if !defined(CONFIG_USER_ONLY)
136
+ uintptr_t ra = GETPC();
137
+
138
#if !defined(HOST_WORDS_BIGENDIAN)
139
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
140
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
141
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
142
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
143
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
144
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
145
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
146
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
147
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
148
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
149
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
150
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
151
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
152
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
153
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
154
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
155
#else
156
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
157
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
158
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
159
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
160
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
161
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
162
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
163
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
164
-#endif
165
-#else
166
-#if !defined(HOST_WORDS_BIGENDIAN)
167
- pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
168
- pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
169
- pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
170
- pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
171
- pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
172
- pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
173
- pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
174
- pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
175
-#else
176
- pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
177
- pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
178
- pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
179
- pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
180
- pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
181
- pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
182
- pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
183
- pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
184
-#endif
185
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
186
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
187
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
188
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
189
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
190
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
191
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
192
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
193
#endif
194
}
195
196
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
197
target_ulong addr)
198
{
199
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
200
- MEMOP_IDX(DF_WORD)
201
-#if !defined(CONFIG_USER_ONLY)
202
+ uintptr_t ra = GETPC();
203
+
204
#if !defined(HOST_WORDS_BIGENDIAN)
205
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
206
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
207
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
208
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
209
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
210
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
211
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
212
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
213
#else
214
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
215
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
216
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
217
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
218
-#endif
219
-#else
220
-#if !defined(HOST_WORDS_BIGENDIAN)
221
- pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
222
- pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
223
- pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
224
- pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
225
-#else
226
- pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
227
- pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
228
- pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
229
- pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
230
-#endif
231
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
232
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
233
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
234
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
235
#endif
236
}
237
238
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
239
target_ulong addr)
240
{
241
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
242
- MEMOP_IDX(DF_DOUBLE)
243
-#if !defined(CONFIG_USER_ONLY)
244
- pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
245
- pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
246
-#else
247
- pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
248
- pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
249
-#endif
250
+ uintptr_t ra = GETPC();
251
+
252
+ pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
253
+ pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
254
}
255
256
#define MSA_PAGESPAN(x) \
257
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
258
{
259
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
260
int mmu_idx = cpu_mmu_index(env, false);
261
+ uintptr_t ra = GETPC();
262
+
263
+ ensure_writable_pages(env, addr, mmu_idx, ra);
264
265
- MEMOP_IDX(DF_BYTE)
266
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
267
-#if !defined(CONFIG_USER_ONLY)
268
#if !defined(HOST_WORDS_BIGENDIAN)
269
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC());
270
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC());
271
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC());
272
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC());
273
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC());
274
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC());
275
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC());
276
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC());
277
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC());
278
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC());
279
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
280
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
281
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
282
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
283
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
284
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
285
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
286
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
287
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
288
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
289
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
290
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
291
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
292
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
293
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
294
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
295
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
296
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
297
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
298
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
299
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
300
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
301
#else
302
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC());
303
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC());
304
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC());
305
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC());
306
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC());
307
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC());
308
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC());
309
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC());
310
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC());
311
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC());
312
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
313
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
314
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
315
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
316
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC());
317
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC());
318
-#endif
319
-#else
320
-#if !defined(HOST_WORDS_BIGENDIAN)
321
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]);
322
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]);
323
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]);
324
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]);
325
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]);
326
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]);
327
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]);
328
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]);
329
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]);
330
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]);
331
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
332
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
333
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
334
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
335
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
336
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
337
-#else
338
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]);
339
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]);
340
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]);
341
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]);
342
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]);
343
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]);
344
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]);
345
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]);
346
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
347
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
348
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
349
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
350
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
351
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
352
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]);
353
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]);
354
-#endif
355
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
356
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
357
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
358
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
359
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
360
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
361
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
362
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
363
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
364
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
365
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
366
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
367
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
368
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
369
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
370
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
371
#endif
372
}
373
374
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
375
{
376
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
377
int mmu_idx = cpu_mmu_index(env, false);
378
+ uintptr_t ra = GETPC();
379
+
380
+ ensure_writable_pages(env, addr, mmu_idx, ra);
381
382
- MEMOP_IDX(DF_HALF)
383
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
384
-#if !defined(CONFIG_USER_ONLY)
385
#if !defined(HOST_WORDS_BIGENDIAN)
386
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
387
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
388
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
389
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
390
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
391
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
392
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
393
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
394
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
395
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
396
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
397
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
398
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
399
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
400
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
401
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
402
#else
403
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
404
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
405
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
406
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
407
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
408
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
409
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
410
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
411
-#endif
412
-#else
413
-#if !defined(HOST_WORDS_BIGENDIAN)
414
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
415
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
416
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
417
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
418
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
419
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
420
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
421
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
422
-#else
423
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
424
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
425
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
426
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
427
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
428
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
429
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
430
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
431
-#endif
432
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
433
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
434
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
435
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
436
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
437
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
438
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
439
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
440
#endif
441
}
442
443
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
444
{
445
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
446
int mmu_idx = cpu_mmu_index(env, false);
447
+ uintptr_t ra = GETPC();
448
+
449
+ ensure_writable_pages(env, addr, mmu_idx, ra);
450
451
- MEMOP_IDX(DF_WORD)
452
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
453
-#if !defined(CONFIG_USER_ONLY)
454
#if !defined(HOST_WORDS_BIGENDIAN)
455
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
456
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
457
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
458
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
459
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
460
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
461
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
462
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
463
#else
464
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
465
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
466
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
467
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
468
-#endif
469
-#else
470
-#if !defined(HOST_WORDS_BIGENDIAN)
471
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
472
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
473
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
474
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
475
-#else
476
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
477
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
478
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
479
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
480
-#endif
481
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
482
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
483
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
484
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
485
#endif
486
}
487
488
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
489
{
490
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
491
int mmu_idx = cpu_mmu_index(env, false);
492
+ uintptr_t ra = GETPC();
493
494
- MEMOP_IDX(DF_DOUBLE)
495
ensure_writable_pages(env, addr, mmu_idx, GETPC());
496
-#if !defined(CONFIG_USER_ONLY)
497
- helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
498
- helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
499
-#else
500
- cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
501
- cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
502
-#endif
503
+
504
+ cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
505
+ cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
506
}
507
--
508
2.25.1
509
510
diff view generated by jsdifflib
Deleted patch
1
Rather than use 4-16 separate operations, use 2 operations
2
plus some byte reordering as necessary.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/mips/tcg/msa_helper.c | 201 +++++++++++++----------------------
8
1 file changed, 71 insertions(+), 130 deletions(-)
9
10
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/mips/tcg/msa_helper.c
13
+++ b/target/mips/tcg/msa_helper.c
14
@@ -XXX,XX +XXX,XX @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
15
#define MEMOP_IDX(DF)
16
#endif
17
18
+#ifdef TARGET_WORDS_BIGENDIAN
19
+static inline uint64_t bswap16x4(uint64_t x)
20
+{
21
+ uint64_t m = 0x00ff00ff00ff00ffull;
22
+ return ((x & m) << 8) | ((x >> 8) & m);
23
+}
24
+
25
+static inline uint64_t bswap32x2(uint64_t x)
26
+{
27
+ return ror64(bswap64(x), 32);
28
+}
29
+#endif
30
+
31
void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
32
target_ulong addr)
33
{
34
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
35
uintptr_t ra = GETPC();
36
+ uint64_t d0, d1;
37
38
-#if !defined(HOST_WORDS_BIGENDIAN)
39
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
40
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
41
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
42
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
43
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
44
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
45
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
46
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
47
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
48
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
49
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
50
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
51
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
52
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
53
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
54
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
55
-#else
56
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
57
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
58
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
59
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
60
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
61
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
62
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
63
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
64
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
65
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
66
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
67
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
68
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
69
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
70
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
71
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
72
-#endif
73
+ /* Load 8 bytes at a time. Vector element ordering makes this LE. */
74
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
75
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
76
+ pwd->d[0] = d0;
77
+ pwd->d[1] = d1;
78
}
79
80
void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
81
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
82
{
83
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
84
uintptr_t ra = GETPC();
85
+ uint64_t d0, d1;
86
87
-#if !defined(HOST_WORDS_BIGENDIAN)
88
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
89
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
90
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
91
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
92
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
93
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
94
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
95
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
96
-#else
97
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
98
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
99
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
100
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
101
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
102
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
103
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
104
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
105
+ /*
106
+ * Load 8 bytes at a time. Use little-endian load, then for
107
+ * big-endian target, we must then swap the four halfwords.
108
+ */
109
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
110
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
111
+#ifdef TARGET_WORDS_BIGENDIAN
112
+ d0 = bswap16x4(d0);
113
+ d1 = bswap16x4(d1);
114
#endif
115
+ pwd->d[0] = d0;
116
+ pwd->d[1] = d1;
117
}
118
119
void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
120
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
121
{
122
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
123
uintptr_t ra = GETPC();
124
+ uint64_t d0, d1;
125
126
-#if !defined(HOST_WORDS_BIGENDIAN)
127
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
128
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
129
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
130
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
131
-#else
132
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
133
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
134
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
135
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
136
+ /*
137
+ * Load 8 bytes at a time. Use little-endian load, then for
138
+ * big-endian target, we must then bswap the two words.
139
+ */
140
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
141
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
142
+#ifdef TARGET_WORDS_BIGENDIAN
143
+ d0 = bswap32x2(d0);
144
+ d1 = bswap32x2(d1);
145
#endif
146
+ pwd->d[0] = d0;
147
+ pwd->d[1] = d1;
148
}
149
150
void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
151
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
152
{
153
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
154
uintptr_t ra = GETPC();
155
+ uint64_t d0, d1;
156
157
- pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
158
- pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
159
+ d0 = cpu_ldq_data_ra(env, addr + 0, ra);
160
+ d1 = cpu_ldq_data_ra(env, addr + 8, ra);
161
+ pwd->d[0] = d0;
162
+ pwd->d[1] = d1;
163
}
164
165
#define MSA_PAGESPAN(x) \
166
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
167
168
ensure_writable_pages(env, addr, mmu_idx, ra);
169
170
-#if !defined(HOST_WORDS_BIGENDIAN)
171
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
172
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
173
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
174
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
175
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
176
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
177
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
178
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
179
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
180
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
181
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
182
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
183
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
184
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
185
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
186
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
187
-#else
188
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
189
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
190
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
191
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
192
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
193
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
194
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
195
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
196
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
197
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
198
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
199
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
200
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
201
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
202
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
203
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
204
-#endif
205
+ /* Store 8 bytes at a time. Vector element ordering makes this LE. */
206
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
207
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
208
}
209
210
void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
211
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
212
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
213
int mmu_idx = cpu_mmu_index(env, false);
214
uintptr_t ra = GETPC();
215
+ uint64_t d0, d1;
216
217
ensure_writable_pages(env, addr, mmu_idx, ra);
218
219
-#if !defined(HOST_WORDS_BIGENDIAN)
220
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
221
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
222
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
223
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
224
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
225
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
226
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
227
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
228
-#else
229
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
230
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
231
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
232
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
233
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
234
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
235
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
236
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
237
+ /* Store 8 bytes at a time. See helper_msa_ld_h. */
238
+ d0 = pwd->d[0];
239
+ d1 = pwd->d[1];
240
+#ifdef TARGET_WORDS_BIGENDIAN
241
+ d0 = bswap16x4(d0);
242
+ d1 = bswap16x4(d1);
243
#endif
244
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
245
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
246
}
247
248
void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
249
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
250
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
251
int mmu_idx = cpu_mmu_index(env, false);
252
uintptr_t ra = GETPC();
253
+ uint64_t d0, d1;
254
255
ensure_writable_pages(env, addr, mmu_idx, ra);
256
257
-#if !defined(HOST_WORDS_BIGENDIAN)
258
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
259
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
260
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
261
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
262
-#else
263
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
264
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
265
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
266
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
267
+ /* Store 8 bytes at a time. See helper_msa_ld_w. */
268
+ d0 = pwd->d[0];
269
+ d1 = pwd->d[1];
270
+#ifdef TARGET_WORDS_BIGENDIAN
271
+ d0 = bswap32x2(d0);
272
+ d1 = bswap32x2(d1);
273
#endif
274
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
275
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
276
}
277
278
void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
279
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
280
281
ensure_writable_pages(env, addr, mmu_idx, GETPC());
282
283
- cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
284
- cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
285
+ cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
286
+ cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
287
}
288
--
289
2.25.1
290
291
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/s390x/tcg/mem_helper.c | 8 ++++----
11
1 file changed, 4 insertions(+), 4 deletions(-)
12
13
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/s390x/tcg/mem_helper.c
16
+++ b/target/s390x/tcg/mem_helper.c
17
@@ -XXX,XX +XXX,XX @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
18
* page. This is especially relevant to speed up TLB_NOTDIRTY.
19
*/
20
g_assert(size > 0);
21
- helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
22
+ cpu_stb_mmu(env, vaddr, byte, oi, ra);
23
haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
24
if (likely(haddr)) {
25
memset(haddr + 1, byte, size - 1);
26
} else {
27
for (i = 1; i < size; i++) {
28
- helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
29
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
30
}
31
}
32
}
33
@@ -XXX,XX +XXX,XX @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
34
* Do a single access and test if we can then get access to the
35
* page. This is especially relevant to speed up TLB_NOTDIRTY.
36
*/
37
- byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
38
+ byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
39
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
40
return byte;
41
#endif
42
@@ -XXX,XX +XXX,XX @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
43
* Do a single access and test if we can then get access to the
44
* page. This is especially relevant to speed up TLB_NOTDIRTY.
45
*/
46
- helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
47
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
48
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
49
#endif
50
}
51
--
52
2.25.1
53
54
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/sparc/ldst_helper.c | 14 +++++++-------
11
1 file changed, 7 insertions(+), 7 deletions(-)
12
13
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/sparc/ldst_helper.c
16
+++ b/target/sparc/ldst_helper.c
17
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
18
oi = make_memop_idx(memop, idx);
19
switch (size) {
20
case 1:
21
- ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
22
+ ret = cpu_ldb_mmu(env, addr, oi, GETPC());
23
break;
24
case 2:
25
if (asi & 8) {
26
- ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
27
+ ret = cpu_ldw_le_mmu(env, addr, oi, GETPC());
28
} else {
29
- ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
30
+ ret = cpu_ldw_be_mmu(env, addr, oi, GETPC());
31
}
32
break;
33
case 4:
34
if (asi & 8) {
35
- ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
36
+ ret = cpu_ldl_le_mmu(env, addr, oi, GETPC());
37
} else {
38
- ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
39
+ ret = cpu_ldl_be_mmu(env, addr, oi, GETPC());
40
}
41
break;
42
case 8:
43
if (asi & 8) {
44
- ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
45
+ ret = cpu_ldq_le_mmu(env, addr, oi, GETPC());
46
} else {
47
- ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
48
+ ret = cpu_ldq_be_mmu(env, addr, oi, GETPC());
49
}
50
break;
51
default:
52
--
53
2.25.1
54
55
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Cc: qemu-arm@nongnu.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/arm/helper-a64.c | 52 +++++++----------------------------------
11
target/arm/m_helper.c | 6 ++---
12
2 files changed, 11 insertions(+), 47 deletions(-)
13
14
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-a64.c
17
+++ b/target/arm/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
19
uintptr_t ra = GETPC();
20
uint64_t o0, o1;
21
bool success;
22
-
23
-#ifdef CONFIG_USER_ONLY
24
- /* ??? Enforce alignment. */
25
- uint64_t *haddr = g2h(env_cpu(env), addr);
26
-
27
- set_helper_retaddr(ra);
28
- o0 = ldq_le_p(haddr + 0);
29
- o1 = ldq_le_p(haddr + 1);
30
- oldv = int128_make128(o0, o1);
31
-
32
- success = int128_eq(oldv, cmpv);
33
- if (success) {
34
- stq_le_p(haddr + 0, int128_getlo(newv));
35
- stq_le_p(haddr + 1, int128_gethi(newv));
36
- }
37
- clear_helper_retaddr();
38
-#else
39
int mem_idx = cpu_mmu_index(env, false);
40
MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
41
MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
42
43
- o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
44
- o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
45
+ o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
46
+ o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
47
oldv = int128_make128(o0, o1);
48
49
success = int128_eq(oldv, cmpv);
50
if (success) {
51
- helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
52
- helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
53
+ cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
54
+ cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
55
}
56
-#endif
57
58
return !success;
59
}
60
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
61
uintptr_t ra = GETPC();
62
uint64_t o0, o1;
63
bool success;
64
-
65
-#ifdef CONFIG_USER_ONLY
66
- /* ??? Enforce alignment. */
67
- uint64_t *haddr = g2h(env_cpu(env), addr);
68
-
69
- set_helper_retaddr(ra);
70
- o1 = ldq_be_p(haddr + 0);
71
- o0 = ldq_be_p(haddr + 1);
72
- oldv = int128_make128(o0, o1);
73
-
74
- success = int128_eq(oldv, cmpv);
75
- if (success) {
76
- stq_be_p(haddr + 0, int128_gethi(newv));
77
- stq_be_p(haddr + 1, int128_getlo(newv));
78
- }
79
- clear_helper_retaddr();
80
-#else
81
int mem_idx = cpu_mmu_index(env, false);
82
MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
83
MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
84
85
- o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
86
- o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
87
+ o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
88
+ o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
89
oldv = int128_make128(o0, o1);
90
91
success = int128_eq(oldv, cmpv);
92
if (success) {
93
- helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
94
- helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
95
+ cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
96
+ cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
97
}
98
-#endif
99
100
return !success;
101
}
102
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/m_helper.c
105
+++ b/target/arm/m_helper.c
106
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
107
* do them as secure, so work out what MMU index that is.
108
*/
109
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
110
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
111
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
112
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
113
+ oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
114
+ newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
115
+ newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
116
117
/* Consistency checks on new IPSR */
118
newpsr_exc = newpsr & XPSR_EXCP;
119
--
120
2.25.1
121
122
diff view generated by jsdifflib
Deleted patch
1
These functions have been replaced by cpu_*_mmu as the
2
most proper interface to use from target code.
3
1
4
Hide these declarations from code that should not use them.
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/tcg/tcg-ldst.h | 74 ++++++++++++++++++++++++++++++++++++++++++
10
include/tcg/tcg.h | 71 ----------------------------------------
11
accel/tcg/cputlb.c | 1 +
12
tcg/tcg.c | 1 +
13
tcg/tci.c | 1 +
14
5 files changed, 77 insertions(+), 71 deletions(-)
15
create mode 100644 include/tcg/tcg-ldst.h
16
17
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/include/tcg/tcg-ldst.h
22
@@ -XXX,XX +XXX,XX @@
23
+/*
24
+ * Memory helpers that will be used by TCG generated code.
25
+ *
26
+ * Copyright (c) 2008 Fabrice Bellard
27
+ *
28
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
29
+ * of this software and associated documentation files (the "Software"), to deal
30
+ * in the Software without restriction, including without limitation the rights
31
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
32
+ * copies of the Software, and to permit persons to whom the Software is
33
+ * furnished to do so, subject to the following conditions:
34
+ *
35
+ * The above copyright notice and this permission notice shall be included in
36
+ * all copies or substantial portions of the Software.
37
+ *
38
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
41
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
42
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
43
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44
+ * THE SOFTWARE.
45
+ */
46
+
47
+#ifndef TCG_LDST_H
48
+#define TCG_LDST_H 1
49
+
50
+#ifdef CONFIG_SOFTMMU
51
+
52
+/* Value zero-extended to tcg register size. */
53
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
54
+ MemOpIdx oi, uintptr_t retaddr);
55
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
56
+ MemOpIdx oi, uintptr_t retaddr);
57
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
58
+ MemOpIdx oi, uintptr_t retaddr);
59
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
60
+ MemOpIdx oi, uintptr_t retaddr);
61
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
62
+ MemOpIdx oi, uintptr_t retaddr);
63
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
64
+ MemOpIdx oi, uintptr_t retaddr);
65
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
66
+ MemOpIdx oi, uintptr_t retaddr);
67
+
68
+/* Value sign-extended to tcg register size. */
69
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
70
+ MemOpIdx oi, uintptr_t retaddr);
71
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
72
+ MemOpIdx oi, uintptr_t retaddr);
73
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
74
+ MemOpIdx oi, uintptr_t retaddr);
75
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
76
+ MemOpIdx oi, uintptr_t retaddr);
77
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
78
+ MemOpIdx oi, uintptr_t retaddr);
79
+
80
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
81
+ MemOpIdx oi, uintptr_t retaddr);
82
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
83
+ MemOpIdx oi, uintptr_t retaddr);
84
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
85
+ MemOpIdx oi, uintptr_t retaddr);
86
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
87
+ MemOpIdx oi, uintptr_t retaddr);
88
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
89
+ MemOpIdx oi, uintptr_t retaddr);
90
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
91
+ MemOpIdx oi, uintptr_t retaddr);
92
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
93
+ MemOpIdx oi, uintptr_t retaddr);
94
+
95
+#endif /* CONFIG_SOFTMMU */
96
+#endif /* TCG_LDST_H */
97
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/tcg/tcg.h
100
+++ b/include/tcg/tcg.h
101
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
102
: (target_long)dup_const(VECE, C))
103
#endif
104
105
-/*
106
- * Memory helpers that will be used by TCG generated code.
107
- */
108
-#ifdef CONFIG_SOFTMMU
109
-/* Value zero-extended to tcg register size. */
110
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
111
- MemOpIdx oi, uintptr_t retaddr);
112
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
113
- MemOpIdx oi, uintptr_t retaddr);
114
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
115
- MemOpIdx oi, uintptr_t retaddr);
116
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
117
- MemOpIdx oi, uintptr_t retaddr);
118
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
119
- MemOpIdx oi, uintptr_t retaddr);
120
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
121
- MemOpIdx oi, uintptr_t retaddr);
122
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
123
- MemOpIdx oi, uintptr_t retaddr);
124
-
125
-/* Value sign-extended to tcg register size. */
126
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
127
- MemOpIdx oi, uintptr_t retaddr);
128
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
129
- MemOpIdx oi, uintptr_t retaddr);
130
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
131
- MemOpIdx oi, uintptr_t retaddr);
132
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
133
- MemOpIdx oi, uintptr_t retaddr);
134
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
135
- MemOpIdx oi, uintptr_t retaddr);
136
-
137
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
138
- MemOpIdx oi, uintptr_t retaddr);
139
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
140
- MemOpIdx oi, uintptr_t retaddr);
141
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
142
- MemOpIdx oi, uintptr_t retaddr);
143
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
144
- MemOpIdx oi, uintptr_t retaddr);
145
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
146
- MemOpIdx oi, uintptr_t retaddr);
147
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
148
- MemOpIdx oi, uintptr_t retaddr);
149
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
150
- MemOpIdx oi, uintptr_t retaddr);
151
-
152
-/* Temporary aliases until backends are converted. */
153
-#ifdef TARGET_WORDS_BIGENDIAN
154
-# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
155
-# define helper_ret_lduw_mmu helper_be_lduw_mmu
156
-# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
157
-# define helper_ret_ldul_mmu helper_be_ldul_mmu
158
-# define helper_ret_ldl_mmu helper_be_ldul_mmu
159
-# define helper_ret_ldq_mmu helper_be_ldq_mmu
160
-# define helper_ret_stw_mmu helper_be_stw_mmu
161
-# define helper_ret_stl_mmu helper_be_stl_mmu
162
-# define helper_ret_stq_mmu helper_be_stq_mmu
163
-#else
164
-# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
165
-# define helper_ret_lduw_mmu helper_le_lduw_mmu
166
-# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
167
-# define helper_ret_ldul_mmu helper_le_ldul_mmu
168
-# define helper_ret_ldl_mmu helper_le_ldul_mmu
169
-# define helper_ret_ldq_mmu helper_le_ldq_mmu
170
-# define helper_ret_stw_mmu helper_le_stw_mmu
171
-# define helper_ret_stl_mmu helper_le_stl_mmu
172
-# define helper_ret_stq_mmu helper_le_stq_mmu
173
-#endif
174
-#endif /* CONFIG_SOFTMMU */
175
-
176
#ifdef CONFIG_DEBUG_TCG
177
void tcg_assert_listed_vecop(TCGOpcode);
178
#else
179
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
180
index XXXXXXX..XXXXXXX 100644
181
--- a/accel/tcg/cputlb.c
182
+++ b/accel/tcg/cputlb.c
183
@@ -XXX,XX +XXX,XX @@
184
#ifdef CONFIG_PLUGIN
185
#include "qemu/plugin-memory.h"
186
#endif
187
+#include "tcg/tcg-ldst.h"
188
189
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
190
/* #define DEBUG_TLB */
191
diff --git a/tcg/tcg.c b/tcg/tcg.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/tcg.c
194
+++ b/tcg/tcg.c
195
@@ -XXX,XX +XXX,XX @@
196
197
#include "elf.h"
198
#include "exec/log.h"
199
+#include "tcg/tcg-ldst.h"
200
#include "tcg-internal.h"
201
202
#ifdef CONFIG_TCG_INTERPRETER
203
diff --git a/tcg/tci.c b/tcg/tci.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/tci.c
206
+++ b/tcg/tci.c
207
@@ -XXX,XX +XXX,XX @@
208
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
209
#include "exec/cpu_ldst.h"
210
#include "tcg/tcg-op.h"
211
+#include "tcg/tcg-ldst.h"
212
#include "qemu/compiler.h"
213
#include <ffi.h>
214
215
--
216
2.25.1
217
218
diff view generated by jsdifflib
1
Having observed e.g. al8+leq in dumps, canonicalize to al+leq.
1
The LDRD (register) instruction is UNPREDICTABLE if the Rm register
2
is the same as either Rt or Rt+1 (the two registers being loaded to).
3
We weren't making sure we avoided this, with the result that on some
4
host CPUs like the Cortex-A7 we would get a SIGILL because the CPU
5
chooses to UNDEF for this particular UNPREDICTABLE case.
2
6
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Since we've already checked that datalo is aligned, we can simplify
8
the test vs the Rm operand by aligning it before comparison. Check
9
for the two orderings before falling back to two ldr instructions.
10
11
We don't bother to do anything similar for tcg_out_ldrd_rwb(),
12
because it is only used in tcg_out_tlb_read() with a fixed set of
13
registers which don't overlap.
14
15
There is no equivalent UNPREDICTABLE case for STRD.
16
17
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
18
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/896
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
20
---
6
tcg/tcg-op.c | 7 ++++++-
21
tcg/arm/tcg-target.c.inc | 17 +++++++++++++++--
7
1 file changed, 6 insertions(+), 1 deletion(-)
22
1 file changed, 15 insertions(+), 2 deletions(-)
8
23
9
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
24
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tcg-op.c
26
--- a/tcg/arm/tcg-target.c.inc
12
+++ b/tcg/tcg-op.c
27
+++ b/tcg/arm/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
14
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
29
/* LDRD requires alignment; double-check that. */
15
{
30
if (get_alignment_bits(opc) >= MO_64
16
/* Trigger the asserts within as early as possible. */
31
&& (datalo & 1) == 0 && datahi == datalo + 1) {
17
- (void)get_alignment_bits(op);
32
- tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
18
+ unsigned a_bits = get_alignment_bits(op);
33
- } else if (scratch_addend) {
19
+
34
+ /*
20
+ /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
35
+ * Rm (the second address op) must not overlap Rt or Rt + 1.
21
+ if (a_bits == (op & MO_SIZE)) {
36
+ * Since datalo is aligned, we can simplify the test via alignment.
22
+ op = (op & ~MO_AMASK) | MO_ALIGN;
37
+ * Flip the two address arguments if that works.
23
+ }
38
+ */
24
39
+ if ((addend & ~1) != datalo) {
25
switch (op & MO_SIZE) {
40
+ tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
26
case MO_8:
41
+ break;
42
+ }
43
+ if ((addrlo & ~1) != datalo) {
44
+ tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
45
+ break;
46
+ }
47
+ }
48
+ if (scratch_addend) {
49
tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
50
tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
51
} else {
27
--
52
--
28
2.25.1
53
2.25.1
29
54
30
55
diff view generated by jsdifflib