1
Based-on: 20211006172307.780893-1-richard.henderson@linaro.org
1
Hi,
2
("[PATCH v4 00/41] linux-user: Streamline handling of SIGSEGV")
3
2
4
This began with Peter wanting a cpu_ldst.h interface that can handle
3
This new version removed the translate_fn() from patch 1 because it
5
alignment info for Arm M-profile system mode, which will also compile
4
wasn't removing the sign-extension for pentry as we thought it would.
6
for user-only without ifdefs.
5
A more detailed explanation is given in the commit msg of patch 1.
7
6
8
Once I had that interface, I thought I might as well enforce the
7
We're now retrieving the 'lowaddr' value from load_elf_ram_sym() and
9
requested alignment in user-only. There are plenty of cases where
8
using it when we're running a 32-bit CPU. This worked with 32 bit
10
we ought to have been doing that for quite a while. This took rather
9
'virt' machine booting with the -kernel option.
11
more work than I imagined to start.
12
10
13
Changes for v4:
11
If this approach doesn't work for the Xvisor use case, IMO we should
14
* Rebase, with some patches now upstream.
12
just filter kernel_load_addr bits directly as we were doing a handful of
15
* Rename the core function to cpu_loop_exit_sigbus.
13
versions ago.
16
14
17
Changes for v3:
15
Patches are based on current riscv-to-apply.next.
18
* Updated tcg/{aarch64,ppc,s390,riscv,tci}.
19
16
20
Changes for v2:
17
Changes from v9:
21
* Cleanup prctl(2), add support for prctl(PR_GET/SET_UNALIGN).
18
- patch 1:
22
* Adjustments for ppc and sparc reporting address during alignment fault.
19
- removed the translate_fn() callback
20
- return 'kernel_low' when running a 32-bit CPU
21
- v9 link: https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg04509.html
23
22
23
Daniel Henrique Barboza (3):
24
hw/riscv: handle 32 bit CPUs kernel_addr in riscv_load_kernel()
25
hw/riscv/boot.c: consolidate all kernel init in riscv_load_kernel()
26
hw/riscv/boot.c: make riscv_load_initrd() static
24
27
25
r~
28
hw/riscv/boot.c | 96 +++++++++++++++++++++++---------------
26
29
hw/riscv/microchip_pfsoc.c | 12 +----
27
30
hw/riscv/opentitan.c | 4 +-
28
Richard Henderson (48):
31
hw/riscv/sifive_e.c | 4 +-
29
hw/core: Add TCGCPUOps.record_sigbus
32
hw/riscv/sifive_u.c | 12 +----
30
linux-user: Add cpu_loop_exit_sigbus
33
hw/riscv/spike.c | 14 ++----
31
linux-user/alpha: Remove EXCP_UNALIGN handling
34
hw/riscv/virt.c | 12 +----
32
target/arm: Implement arm_cpu_record_sigbus
35
include/hw/riscv/boot.h | 3 +-
33
linux-user/hppa: Remove EXCP_UNALIGN handling
36
8 files changed, 76 insertions(+), 81 deletions(-)
34
target/microblaze: Do not set MO_ALIGN for user-only
35
target/ppc: Move SPR_DSISR setting to powerpc_excp
36
target/ppc: Set fault address in ppc_cpu_do_unaligned_access
37
target/ppc: Restrict ppc_cpu_do_unaligned_access to sysemu
38
target/s390x: Implement s390x_cpu_record_sigbus
39
linux-user/hppa: Remove POWERPC_EXCP_ALIGN handling
40
target/sh4: Set fault address in superh_cpu_do_unaligned_access
41
target/sparc: Remove DEBUG_UNALIGNED
42
target/sparc: Split out build_sfsr
43
target/sparc: Set fault address in sparc_cpu_do_unaligned_access
44
accel/tcg: Report unaligned atomics for user-only
45
target/arm: Use MO_128 for 16 byte atomics
46
target/i386: Use MO_128 for 16 byte atomics
47
target/ppc: Use MO_128 for 16 byte atomics
48
target/s390x: Use MO_128 for 16 byte atomics
49
target/hexagon: Implement cpu_mmu_index
50
accel/tcg: Add cpu_{ld,st}*_mmu interfaces
51
accel/tcg: Move cpu_atomic decls to exec/cpu_ldst.h
52
target/mips: Use cpu_*_data_ra for msa load/store
53
target/mips: Use 8-byte memory ops for msa load/store
54
target/s390x: Use cpu_*_mmu instead of helper_*_mmu
55
target/sparc: Use cpu_*_mmu instead of helper_*_mmu
56
target/arm: Use cpu_*_mmu instead of helper_*_mmu
57
tcg: Move helper_*_mmu decls to tcg/tcg-ldst.h
58
tcg: Add helper_unaligned_{ld,st} for user-only sigbus
59
linux-user: Split out do_prctl and subroutines
60
linux-user: Disable more prctl subcodes
61
Revert "cpu: Move cpu_common_props to hw/core/cpu.c"
62
linux-user: Add code for PR_GET/SET_UNALIGN
63
target/alpha: Reorg fp memory operations
64
target/alpha: Reorg integer memory operations
65
target/alpha: Implement prctl_unalign_sigbus
66
target/hppa: Implement prctl_unalign_sigbus
67
target/sh4: Implement prctl_unalign_sigbus
68
linux-user/signal: Handle BUS_ADRALN in host_signal_handler
69
tcg: Canonicalize alignment flags in MemOp
70
tcg/i386: Support raising sigbus for user-only
71
tcg/aarch64: Support raising sigbus for user-only
72
tcg/ppc: Support raising sigbus for user-only
73
tcg/s390: Support raising sigbus for user-only
74
tcg/tci: Support raising sigbus for user-only
75
tcg/riscv: Support raising sigbus for user-only
76
tests/tcg/multiarch: Add sigbus.c
77
78
docs/devel/loads-stores.rst | 52 ++-
79
include/exec/cpu_ldst.h | 332 ++++++++-------
80
include/exec/exec-all.h | 14 +
81
include/hw/core/cpu.h | 4 +
82
include/hw/core/tcg-cpu-ops.h | 23 +
83
include/tcg/tcg-ldst.h | 79 ++++
84
include/tcg/tcg.h | 158 -------
85
linux-user/aarch64/target_prctl.h | 160 +++++++
86
linux-user/aarch64/target_syscall.h | 23 -
87
linux-user/alpha/target_prctl.h | 1 +
88
linux-user/arm/target_prctl.h | 1 +
89
linux-user/cris/target_prctl.h | 1 +
90
linux-user/generic/target_prctl_unalign.h | 27 ++
91
linux-user/hexagon/target_prctl.h | 1 +
92
linux-user/hppa/target_prctl.h | 1 +
93
linux-user/i386/target_prctl.h | 1 +
94
linux-user/m68k/target_prctl.h | 1 +
95
linux-user/microblaze/target_prctl.h | 1 +
96
linux-user/mips/target_prctl.h | 88 ++++
97
linux-user/mips/target_syscall.h | 6 -
98
linux-user/mips64/target_prctl.h | 1 +
99
linux-user/mips64/target_syscall.h | 6 -
100
linux-user/nios2/target_prctl.h | 1 +
101
linux-user/openrisc/target_prctl.h | 1 +
102
linux-user/ppc/target_prctl.h | 1 +
103
linux-user/riscv/target_prctl.h | 1 +
104
linux-user/s390x/target_prctl.h | 1 +
105
linux-user/sh4/target_prctl.h | 1 +
106
linux-user/sparc/target_prctl.h | 1 +
107
linux-user/x86_64/target_prctl.h | 1 +
108
linux-user/xtensa/target_prctl.h | 1 +
109
target/alpha/cpu.h | 5 +
110
target/arm/internals.h | 2 +
111
target/hexagon/cpu.h | 9 +
112
target/hppa/cpu.h | 5 +-
113
target/ppc/internal.h | 8 +-
114
target/s390x/s390x-internal.h | 8 +-
115
target/sh4/cpu.h | 4 +
116
tcg/aarch64/tcg-target.h | 2 -
117
tcg/i386/tcg-target.h | 2 -
118
tcg/ppc/tcg-target.h | 2 -
119
tcg/riscv/tcg-target.h | 2 -
120
tcg/s390x/tcg-target.h | 2 -
121
accel/tcg/cputlb.c | 393 ++++++-----------
122
accel/tcg/user-exec.c | 414 ++++++++----------
123
cpu.c | 31 ++
124
hw/core/cpu-common.c | 17 +-
125
linux-user/aarch64/cpu_loop.c | 12 +-
126
linux-user/alpha/cpu_loop.c | 15 -
127
linux-user/arm/cpu_loop.c | 30 +-
128
linux-user/hppa/cpu_loop.c | 7 -
129
linux-user/ppc/cpu_loop.c | 8 -
130
linux-user/signal.c | 17 +
131
linux-user/syscall.c | 490 +++++++++-------------
132
target/alpha/translate.c | 188 +++++----
133
target/arm/cpu.c | 1 +
134
target/arm/cpu_tcg.c | 1 +
135
target/arm/helper-a64.c | 61 +--
136
target/arm/m_helper.c | 6 +-
137
target/arm/tlb_helper.c | 6 +
138
target/hppa/translate.c | 19 +-
139
target/i386/tcg/mem_helper.c | 2 +-
140
target/m68k/op_helper.c | 1 -
141
target/microblaze/translate.c | 16 +
142
target/mips/tcg/msa_helper.c | 389 ++++-------------
143
target/ppc/excp_helper.c | 41 +-
144
target/ppc/mem_helper.c | 1 -
145
target/ppc/translate.c | 12 +-
146
target/s390x/cpu.c | 1 +
147
target/s390x/tcg/excp_helper.c | 27 +-
148
target/s390x/tcg/mem_helper.c | 13 +-
149
target/sh4/op_helper.c | 5 +
150
target/sh4/translate.c | 50 ++-
151
target/sparc/ldst_helper.c | 36 +-
152
target/sparc/mmu_helper.c | 92 ++--
153
tcg/tcg-op.c | 7 +-
154
tcg/tcg.c | 1 +
155
tcg/tci.c | 21 +-
156
tests/tcg/multiarch/sigbus.c | 68 +++
157
accel/tcg/ldst_common.c.inc | 307 ++++++++++++++
158
tcg/aarch64/tcg-target.c.inc | 91 +++-
159
tcg/i386/tcg-target.c.inc | 103 ++++-
160
tcg/ppc/tcg-target.c.inc | 98 ++++-
161
tcg/riscv/tcg-target.c.inc | 63 ++-
162
tcg/s390x/tcg-target.c.inc | 59 ++-
163
85 files changed, 2457 insertions(+), 1804 deletions(-)
164
create mode 100644 include/tcg/tcg-ldst.h
165
create mode 100644 linux-user/aarch64/target_prctl.h
166
create mode 100644 linux-user/alpha/target_prctl.h
167
create mode 100644 linux-user/arm/target_prctl.h
168
create mode 100644 linux-user/cris/target_prctl.h
169
create mode 100644 linux-user/generic/target_prctl_unalign.h
170
create mode 100644 linux-user/hexagon/target_prctl.h
171
create mode 100644 linux-user/hppa/target_prctl.h
172
create mode 100644 linux-user/i386/target_prctl.h
173
create mode 100644 linux-user/m68k/target_prctl.h
174
create mode 100644 linux-user/microblaze/target_prctl.h
175
create mode 100644 linux-user/mips/target_prctl.h
176
create mode 100644 linux-user/mips64/target_prctl.h
177
create mode 100644 linux-user/nios2/target_prctl.h
178
create mode 100644 linux-user/openrisc/target_prctl.h
179
create mode 100644 linux-user/ppc/target_prctl.h
180
create mode 100644 linux-user/riscv/target_prctl.h
181
create mode 100644 linux-user/s390x/target_prctl.h
182
create mode 100644 linux-user/sh4/target_prctl.h
183
create mode 100644 linux-user/sparc/target_prctl.h
184
create mode 100644 linux-user/x86_64/target_prctl.h
185
create mode 100644 linux-user/xtensa/target_prctl.h
186
create mode 100644 tests/tcg/multiarch/sigbus.c
187
create mode 100644 accel/tcg/ldst_common.c.inc
188
37
189
--
38
--
190
2.25.1
39
2.39.1
191
192
diff view generated by jsdifflib
Deleted patch
1
Add a new user-only interface for updating cpu state before
2
raising a signal. This will take the place of do_unaligned_access
3
for user-only and should result in less boilerplate for each guest.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/hw/core/tcg-cpu-ops.h | 23 +++++++++++++++++++++++
8
1 file changed, 23 insertions(+)
9
10
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/hw/core/tcg-cpu-ops.h
13
+++ b/include/hw/core/tcg-cpu-ops.h
14
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
15
void (*record_sigsegv)(CPUState *cpu, vaddr addr,
16
MMUAccessType access_type,
17
bool maperr, uintptr_t ra);
18
+ /**
19
+ * record_sigbus:
20
+ * @cpu: cpu context
21
+ * @addr: misaligned guest address
22
+ * @access_type: access was read/write/execute
23
+ * @ra: host pc for unwinding
24
+ *
25
+ * We are about to raise SIGBUS with si_code BUS_ADRALN,
26
+ * and si_addr set for @addr. Record anything further needed
27
+ * for the signal ucontext_t.
28
+ *
29
+ * If the emulated kernel does not provide the signal handler with
30
+ * anything besides the user context registers, and the siginfo_t,
31
+ * then this hook need do nothing and may be omitted.
32
+ * Otherwise, record the data and return; the caller will raise
33
+ * the signal, unwind the cpu state, and return to the main loop.
34
+ *
35
+ * If it is simpler to re-use the sysemu do_unaligned_access code,
36
+ * @ra is provided so that a "normal" cpu exception can be raised.
37
+ * In this case, the signal must be raised by the architecture cpu_loop.
38
+ */
39
+ void (*record_sigbus)(CPUState *cpu, vaddr addr,
40
+ MMUAccessType access_type, uintptr_t ra);
41
#endif /* CONFIG_SOFTMMU */
42
#endif /* NEED_CPU_H */
43
44
--
45
2.25.1
46
47
diff view generated by jsdifflib
Deleted patch
1
This is a new interface to be provided by the os emulator for
2
raising SIGBUS on fault. Use the new record_sigbus target hook.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/exec/exec-all.h | 14 ++++++++++++++
7
linux-user/signal.c | 14 ++++++++++++++
8
2 files changed, 28 insertions(+)
9
10
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/exec-all.h
13
+++ b/include/exec/exec-all.h
14
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
15
MMUAccessType access_type,
16
bool maperr, uintptr_t ra);
17
18
+/**
19
+ * cpu_loop_exit_sigbus:
20
+ * @cpu: the cpu context
21
+ * @addr: the guest address of the alignment fault
22
+ * @access_type: access was read/write/execute
23
+ * @ra: host pc for unwinding
24
+ *
25
+ * Use the TCGCPUOps hook to record cpu state, do guest operating system
26
+ * specific things to raise SIGBUS, and jump to the main cpu loop.
27
+ */
28
+void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
29
+ MMUAccessType access_type,
30
+ uintptr_t ra);
31
+
32
#else
33
static inline void mmap_lock(void) {}
34
static inline void mmap_unlock(void) {}
35
diff --git a/linux-user/signal.c b/linux-user/signal.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/linux-user/signal.c
38
+++ b/linux-user/signal.c
39
@@ -XXX,XX +XXX,XX @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
40
cpu_loop_exit_restore(cpu, ra);
41
}
42
43
+void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
44
+ MMUAccessType access_type, uintptr_t ra)
45
+{
46
+ const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
47
+
48
+ if (tcg_ops->record_sigbus) {
49
+ tcg_ops->record_sigbus(cpu, addr, access_type, ra);
50
+ }
51
+
52
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
53
+ cpu->exception_index = EXCP_INTERRUPT;
54
+ cpu_loop_exit_restore(cpu, ra);
55
+}
56
+
57
/* abort execution with signal */
58
static void QEMU_NORETURN dump_core_and_abort(int target_sig)
59
{
60
--
61
2.25.1
62
63
diff view generated by jsdifflib
Deleted patch
1
We will raise SIGBUS directly from cpu_loop_exit_sigbus.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
linux-user/alpha/cpu_loop.c | 15 ---------------
6
1 file changed, 15 deletions(-)
7
8
diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/linux-user/alpha/cpu_loop.c
11
+++ b/linux-user/alpha/cpu_loop.c
12
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUAlphaState *env)
13
fprintf(stderr, "External interrupt. Exit\n");
14
exit(EXIT_FAILURE);
15
break;
16
- case EXCP_MMFAULT:
17
- info.si_signo = TARGET_SIGSEGV;
18
- info.si_errno = 0;
19
- info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
20
- ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
21
- info._sifields._sigfault._addr = env->trap_arg0;
22
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
23
- break;
24
- case EXCP_UNALIGN:
25
- info.si_signo = TARGET_SIGBUS;
26
- info.si_errno = 0;
27
- info.si_code = TARGET_BUS_ADRALN;
28
- info._sifields._sigfault._addr = env->trap_arg0;
29
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
30
- break;
31
case EXCP_OPCDEC:
32
do_sigill:
33
info.si_signo = TARGET_SIGILL;
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
Deleted patch
1
Because of the complexity of setting ESR, re-use the existing
2
arm_cpu_do_unaligned_access function. This means we have to
3
handle the exception ourselves in cpu_loop, transforming it
4
to the appropriate signal.
5
1
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/arm/internals.h | 2 ++
9
linux-user/aarch64/cpu_loop.c | 12 +++++++++---
10
linux-user/arm/cpu_loop.c | 30 ++++++++++++++++++++++++++----
11
target/arm/cpu.c | 1 +
12
target/arm/cpu_tcg.c | 1 +
13
target/arm/tlb_helper.c | 6 ++++++
14
6 files changed, 45 insertions(+), 7 deletions(-)
15
16
diff --git a/target/arm/internals.h b/target/arm/internals.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/arm/internals.h
19
+++ b/target/arm/internals.h
20
@@ -XXX,XX +XXX,XX @@ static inline bool arm_extabort_type(MemTxResult result)
21
void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
22
MMUAccessType access_type,
23
bool maperr, uintptr_t ra);
24
+void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
25
+ MMUAccessType access_type, uintptr_t ra);
26
#else
27
bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
28
MMUAccessType access_type, int mmu_idx,
29
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/linux-user/aarch64/cpu_loop.c
32
+++ b/linux-user/aarch64/cpu_loop.c
33
@@ -XXX,XX +XXX,XX @@
34
void cpu_loop(CPUARMState *env)
35
{
36
CPUState *cs = env_cpu(env);
37
- int trapnr, ec, fsc, si_code;
38
+ int trapnr, ec, fsc, si_code, si_signo;
39
abi_long ret;
40
41
for (;;) {
42
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
43
fsc = extract32(env->exception.syndrome, 0, 6);
44
switch (fsc) {
45
case 0x04 ... 0x07: /* Translation fault, level {0-3} */
46
+ si_signo = TARGET_SIGSEGV;
47
si_code = TARGET_SEGV_MAPERR;
48
break;
49
case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
50
case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
51
+ si_signo = TARGET_SIGSEGV;
52
si_code = TARGET_SEGV_ACCERR;
53
break;
54
case 0x11: /* Synchronous Tag Check Fault */
55
+ si_signo = TARGET_SIGSEGV;
56
si_code = TARGET_SEGV_MTESERR;
57
break;
58
+ case 0x21: /* Alignment fault */
59
+ si_signo = TARGET_SIGBUS;
60
+ si_code = TARGET_BUS_ADRALN;
61
+ break;
62
default:
63
g_assert_not_reached();
64
}
65
-
66
- force_sig_fault(TARGET_SIGSEGV, si_code, env->exception.vaddress);
67
+ force_sig_fault(si_signo, si_code, env->exception.vaddress);
68
break;
69
case EXCP_DEBUG:
70
case EXCP_BKPT:
71
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/linux-user/arm/cpu_loop.c
74
+++ b/linux-user/arm/cpu_loop.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "cpu_loop-common.h"
77
#include "signal-common.h"
78
#include "semihosting/common-semi.h"
79
+#include "target/arm/syndrome.h"
80
81
#define get_user_code_u32(x, gaddr, env) \
82
({ abi_long __r = get_user_u32((x), (gaddr)); \
83
@@ -XXX,XX +XXX,XX @@ static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode)
84
void cpu_loop(CPUARMState *env)
85
{
86
CPUState *cs = env_cpu(env);
87
- int trapnr;
88
+ int trapnr, si_signo, si_code;
89
unsigned int n, insn;
90
abi_ulong ret;
91
92
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUARMState *env)
93
break;
94
case EXCP_PREFETCH_ABORT:
95
case EXCP_DATA_ABORT:
96
- /* XXX: check env->error_code */
97
- force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR,
98
- env->exception.vaddress);
99
+ /* For user-only we don't set TTBCR_EAE, so look at the FSR. */
100
+ switch (env->exception.fsr & 0x1f) {
101
+ case 0x1: /* Alignment */
102
+ si_signo = TARGET_SIGBUS;
103
+ si_code = TARGET_BUS_ADRALN;
104
+ break;
105
+ case 0x3: /* Access flag fault, level 1 */
106
+ case 0x6: /* Access flag fault, level 2 */
107
+ case 0x9: /* Domain fault, level 1 */
108
+ case 0xb: /* Domain fault, level 2 */
109
+ case 0xd: /* Permision fault, level 1 */
110
+ case 0xf: /* Permision fault, level 2 */
111
+ si_signo = TARGET_SIGSEGV;
112
+ si_code = TARGET_SEGV_ACCERR;
113
+ break;
114
+ case 0x5: /* Translation fault, level 1 */
115
+ case 0x7: /* Translation fault, level 2 */
116
+ si_signo = TARGET_SIGSEGV;
117
+ si_code = TARGET_SEGV_MAPERR;
118
+ break;
119
+ default:
120
+ g_assert_not_reached();
121
+ }
122
+ force_sig_fault(si_signo, si_code, env->exception.vaddress);
123
break;
124
case EXCP_DEBUG:
125
case EXCP_BKPT:
126
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/arm/cpu.c
129
+++ b/target/arm/cpu.c
130
@@ -XXX,XX +XXX,XX @@ static const struct TCGCPUOps arm_tcg_ops = {
131
132
#ifdef CONFIG_USER_ONLY
133
.record_sigsegv = arm_cpu_record_sigsegv,
134
+ .record_sigbus = arm_cpu_record_sigbus,
135
#else
136
.tlb_fill = arm_cpu_tlb_fill,
137
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
138
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/target/arm/cpu_tcg.c
141
+++ b/target/arm/cpu_tcg.c
142
@@ -XXX,XX +XXX,XX @@ static const struct TCGCPUOps arm_v7m_tcg_ops = {
143
144
#ifdef CONFIG_USER_ONLY
145
.record_sigsegv = arm_cpu_record_sigsegv,
146
+ .record_sigbus = arm_cpu_record_sigbus,
147
#else
148
.tlb_fill = arm_cpu_tlb_fill,
149
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
150
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/target/arm/tlb_helper.c
153
+++ b/target/arm/tlb_helper.c
154
@@ -XXX,XX +XXX,XX @@ void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
155
cpu_restore_state(cs, ra, true);
156
arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
157
}
158
+
159
+void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
160
+ MMUAccessType access_type, uintptr_t ra)
161
+{
162
+ arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
163
+}
164
#endif /* !defined(CONFIG_USER_ONLY) */
165
--
166
2.25.1
167
168
diff view generated by jsdifflib
Deleted patch
1
We will raise SIGBUS directly from cpu_loop_exit_sigbus.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
linux-user/hppa/cpu_loop.c | 7 -------
6
1 file changed, 7 deletions(-)
7
8
diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/linux-user/hppa/cpu_loop.c
11
+++ b/linux-user/hppa/cpu_loop.c
12
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUHPPAState *env)
13
env->iaoq_f = env->gr[31];
14
env->iaoq_b = env->gr[31] + 4;
15
break;
16
- case EXCP_UNALIGN:
17
- info.si_signo = TARGET_SIGBUS;
18
- info.si_errno = 0;
19
- info.si_code = 0;
20
- info._sifields._sigfault._addr = env->cr[CR_IOR];
21
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
22
- break;
23
case EXCP_ILL:
24
case EXCP_PRIV_OPR:
25
case EXCP_PRIV_REG:
26
--
27
2.25.1
28
29
diff view generated by jsdifflib
Deleted patch
1
The kernel will fix up unaligned accesses, so emulate that
2
by allowing unaligned accesses to succeed.
3
1
4
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/microblaze/translate.c | 16 ++++++++++++++++
8
1 file changed, 16 insertions(+)
9
10
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/microblaze/translate.c
13
+++ b/target/microblaze/translate.c
14
@@ -XXX,XX +XXX,XX @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
15
}
16
#endif
17
18
+#ifndef CONFIG_USER_ONLY
19
static void record_unaligned_ess(DisasContext *dc, int rd,
20
MemOp size, bool store)
21
{
22
@@ -XXX,XX +XXX,XX @@ static void record_unaligned_ess(DisasContext *dc, int rd,
23
24
tcg_set_insn_start_param(dc->insn_start, 1, iflags);
25
}
26
+#endif
27
28
static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
29
int mem_index, bool rev)
30
@@ -XXX,XX +XXX,XX @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
31
}
32
}
33
34
+ /*
35
+ * For system mode, enforce alignment if the cpu configuration
36
+ * requires it. For user-mode, the Linux kernel will have fixed up
37
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
38
+ */
39
+#ifndef CONFIG_USER_ONLY
40
if (size > MO_8 &&
41
(dc->tb_flags & MSR_EE) &&
42
dc->cfg->unaligned_exceptions) {
43
record_unaligned_ess(dc, rd, size, false);
44
mop |= MO_ALIGN;
45
}
46
+#endif
47
48
tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
49
50
@@ -XXX,XX +XXX,XX @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
51
}
52
}
53
54
+ /*
55
+ * For system mode, enforce alignment if the cpu configuration
56
+ * requires it. For user-mode, the Linux kernel will have fixed up
57
+ * any unaligned access, so emulate that by *not* setting MO_ALIGN.
58
+ */
59
+#ifndef CONFIG_USER_ONLY
60
if (size > MO_8 &&
61
(dc->tb_flags & MSR_EE) &&
62
dc->cfg->unaligned_exceptions) {
63
record_unaligned_ess(dc, rd, size, true);
64
mop |= MO_ALIGN;
65
}
66
+#endif
67
68
tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
69
70
--
71
2.25.1
72
73
diff view generated by jsdifflib
Deleted patch
1
By doing this while sending the exception, we will have already
2
done the unwinding, which makes the ppc_cpu_do_unaligned_access
3
code a bit cleaner.
4
1
5
Update the comment about the expected instruction format.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/ppc/excp_helper.c | 21 +++++++++------------
11
1 file changed, 9 insertions(+), 12 deletions(-)
12
13
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/ppc/excp_helper.c
16
+++ b/target/ppc/excp_helper.c
17
@@ -XXX,XX +XXX,XX @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
18
break;
19
}
20
case POWERPC_EXCP_ALIGN: /* Alignment exception */
21
- /* Get rS/rD and rA from faulting opcode */
22
/*
23
- * Note: the opcode fields will not be set properly for a
24
- * direct store load/store, but nobody cares as nobody
25
- * actually uses direct store segments.
26
+ * Get rS/rD and rA from faulting opcode.
27
+ * Note: We will only invoke ALIGN for atomic operations,
28
+ * so all instructions are X-form.
29
*/
30
- env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
31
+ {
32
+ uint32_t insn = cpu_ldl_code(env, env->nip);
33
+ env->spr[SPR_DSISR] |= (insn & 0x03FF0000) >> 16;
34
+ }
35
break;
36
case POWERPC_EXCP_PROGRAM: /* Program exception */
37
switch (env->error_code & ~0xF) {
38
@@ -XXX,XX +XXX,XX @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
39
int mmu_idx, uintptr_t retaddr)
40
{
41
CPUPPCState *env = cs->env_ptr;
42
- uint32_t insn;
43
-
44
- /* Restore state and reload the insn we executed, for filling in DSISR. */
45
- cpu_restore_state(cs, retaddr, true);
46
- insn = cpu_ldl_code(env, env->nip);
47
48
cs->exception_index = POWERPC_EXCP_ALIGN;
49
- env->error_code = insn & 0x03FF0000;
50
- cpu_loop_exit(cs);
51
+ env->error_code = 0;
52
+ cpu_loop_exit_restore(cs, retaddr);
53
}
54
#endif
55
--
56
2.25.1
57
58
diff view generated by jsdifflib
Deleted patch
1
We ought to have been recording the virtual address for reporting
2
to the guest trap handler.
3
1
4
Cc: qemu-ppc@nongnu.org
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/ppc/excp_helper.c | 14 ++++++++++++++
9
1 file changed, 14 insertions(+)
10
11
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/ppc/excp_helper.c
14
+++ b/target/ppc/excp_helper.c
15
@@ -XXX,XX +XXX,XX @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
16
{
17
CPUPPCState *env = cs->env_ptr;
18
19
+ switch (env->mmu_model) {
20
+ case POWERPC_MMU_SOFT_4xx:
21
+ case POWERPC_MMU_SOFT_4xx_Z:
22
+ env->spr[SPR_40x_DEAR] = vaddr;
23
+ break;
24
+ case POWERPC_MMU_BOOKE:
25
+ case POWERPC_MMU_BOOKE206:
26
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
27
+ break;
28
+ default:
29
+ env->spr[SPR_DAR] = vaddr;
30
+ break;
31
+ }
32
+
33
cs->exception_index = POWERPC_EXCP_ALIGN;
34
env->error_code = 0;
35
cpu_loop_exit_restore(cs, retaddr);
36
--
37
2.25.1
38
39
diff view generated by jsdifflib
Deleted patch
1
This is not used by, nor required by, user-only.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/ppc/internal.h | 8 +++-----
6
target/ppc/excp_helper.c | 8 +++-----
7
2 files changed, 6 insertions(+), 10 deletions(-)
8
9
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/ppc/internal.h
12
+++ b/target/ppc/internal.h
13
@@ -XXX,XX +XXX,XX @@ void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
14
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
15
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
16
17
-/* Raise a data fault alignment exception for the specified virtual address */
18
-void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
19
- MMUAccessType access_type, int mmu_idx,
20
- uintptr_t retaddr) QEMU_NORETURN;
21
-
22
/* translate.c */
23
24
int ppc_fixup_cpu(PowerPCCPU *cpu);
25
@@ -XXX,XX +XXX,XX @@ void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
26
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
27
MMUAccessType access_type, int mmu_idx,
28
bool probe, uintptr_t retaddr);
29
+void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
30
+ MMUAccessType access_type, int mmu_idx,
31
+ uintptr_t retaddr) QEMU_NORETURN;
32
#endif
33
34
#endif /* PPC_INTERNAL_H */
35
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/ppc/excp_helper.c
38
+++ b/target/ppc/excp_helper.c
39
@@ -XXX,XX +XXX,XX @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
40
41
book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
42
}
43
-#endif
44
-#endif /* CONFIG_TCG */
45
-#endif
46
+#endif /* TARGET_PPC64 */
47
48
-#ifdef CONFIG_TCG
49
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
50
MMUAccessType access_type,
51
int mmu_idx, uintptr_t retaddr)
52
@@ -XXX,XX +XXX,XX @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
53
env->error_code = 0;
54
cpu_loop_exit_restore(cs, retaddr);
55
}
56
-#endif
57
+#endif /* CONFIG_TCG */
58
+#endif /* !CONFIG_USER_ONLY */
59
--
60
2.25.1
61
62
diff view generated by jsdifflib
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
load_elf_ram_sym() will sign-extend 32 bit addresses. If a 32 bit QEMU
2
guest happens to be running in a hypervisor that are using 64 bits to
3
encode its address, kernel_entry can be padded with '1's and create
4
problems [1].
5
6
Using a translate_fn() callback in load_elf_ram_sym() to filter the
7
padding from the address doesn't work. A more detailed explanation can
8
be found in [2]. The short version is that glue(load_elf, SZ), from
9
include/hw/elf_ops.h, will calculate 'pentry' (mapped into the
10
'kernel_load_base' var in riscv_load_Kernel()) before using
11
translate_fn(), and will not recalculate it after executing it. This
12
means that the callback does not prevent the padding from
13
kernel_load_base to appear.
14
15
Let's instead use a kernel_low var to capture the 'lowaddr' value from
16
load_elf_ram_sim(), and return it when we're dealing with 32 bit CPUs.
17
18
[1] https://lists.gnu.org/archive/html/qemu-devel/2023-01/msg02281.html
19
[2] https://lists.gnu.org/archive/html/qemu-devel/2023-02/msg00099.html
20
21
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
---
22
---
3
tcg/aarch64/tcg-target.h | 2 -
23
hw/riscv/boot.c | 15 +++++++++++----
4
tcg/aarch64/tcg-target.c.inc | 91 +++++++++++++++++++++++++++++-------
24
hw/riscv/microchip_pfsoc.c | 3 ++-
5
2 files changed, 74 insertions(+), 19 deletions(-)
25
hw/riscv/opentitan.c | 3 ++-
26
hw/riscv/sifive_e.c | 3 ++-
27
hw/riscv/sifive_u.c | 3 ++-
28
hw/riscv/spike.c | 3 ++-
29
hw/riscv/virt.c | 3 ++-
30
include/hw/riscv/boot.h | 1 +
31
8 files changed, 24 insertions(+), 10 deletions(-)
6
32
7
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
33
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
8
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/aarch64/tcg-target.h
35
--- a/hw/riscv/boot.c
10
+++ b/tcg/aarch64/tcg-target.h
36
+++ b/hw/riscv/boot.c
11
@@ -XXX,XX +XXX,XX @@ typedef enum {
37
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_firmware(const char *firmware_filename,
12
38
}
13
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
39
14
40
target_ulong riscv_load_kernel(MachineState *machine,
15
-#ifdef CONFIG_SOFTMMU
41
+ RISCVHartArrayState *harts,
16
#define TCG_TARGET_NEED_LDST_LABELS
42
target_ulong kernel_start_addr,
17
-#endif
43
symbol_fn_t sym_cb)
18
#define TCG_TARGET_NEED_POOL_LABELS
44
{
19
45
const char *kernel_filename = machine->kernel_filename;
20
#endif /* AARCH64_TCG_TARGET_H */
46
- uint64_t kernel_load_base, kernel_entry;
21
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
47
+ uint64_t kernel_load_base, kernel_entry, kernel_low;
48
49
g_assert(kernel_filename != NULL);
50
51
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_kernel(MachineState *machine,
52
* the (expected) load address load address. This allows kernels to have
53
* separate SBI and ELF entry points (used by FreeBSD, for example).
54
*/
55
- if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL,
56
- NULL, &kernel_load_base, NULL, NULL, 0,
57
+ if (load_elf_ram_sym(kernel_filename, NULL, NULL, NULL, NULL,
58
+ &kernel_load_base, &kernel_low, NULL, 0,
59
EM_RISCV, 1, 0, NULL, true, sym_cb) > 0) {
60
- return kernel_load_base;
61
+ kernel_entry = kernel_load_base;
62
+
63
+ if (riscv_is_32bit(harts)) {
64
+ kernel_entry = kernel_low;
65
+ }
66
+
67
+ return kernel_entry;
68
}
69
70
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
71
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
22
index XXXXXXX..XXXXXXX 100644
72
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/aarch64/tcg-target.c.inc
73
--- a/hw/riscv/microchip_pfsoc.c
24
+++ b/tcg/aarch64/tcg-target.c.inc
74
+++ b/hw/riscv/microchip_pfsoc.c
25
@@ -XXX,XX +XXX,XX @@
75
@@ -XXX,XX +XXX,XX @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
26
* See the COPYING file in the top-level directory for details.
76
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
27
*/
77
firmware_end_addr);
28
78
29
+#include "../tcg-ldst.c.inc"
79
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
30
#include "../tcg-pool.c.inc"
80
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
31
#include "qemu/bitops.h"
81
+ kernel_start_addr, NULL);
32
82
33
@@ -XXX,XX +XXX,XX @@ typedef enum {
83
if (machine->initrd_filename) {
34
I3404_ANDI = 0x12000000,
84
riscv_load_initrd(machine, kernel_entry);
35
I3404_ORRI = 0x32000000,
85
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
36
I3404_EORI = 0x52000000,
86
index XXXXXXX..XXXXXXX 100644
37
+ I3404_ANDSI = 0x72000000,
87
--- a/hw/riscv/opentitan.c
38
88
+++ b/hw/riscv/opentitan.c
39
/* Move wide immediate instructions. */
89
@@ -XXX,XX +XXX,XX @@ static void opentitan_board_init(MachineState *machine)
40
I3405_MOVN = 0x12800000,
90
}
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
91
42
if (offset == sextract64(offset, 0, 26)) {
92
if (machine->kernel_filename) {
43
tcg_out_insn(s, 3206, B, offset);
93
- riscv_load_kernel(machine, memmap[IBEX_DEV_RAM].base, NULL);
44
} else {
94
+ riscv_load_kernel(machine, &s->soc.cpus,
45
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
95
+ memmap[IBEX_DEV_RAM].base, NULL);
46
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
47
+ /* Choose X9 as a call-clobbered non-LR temporary. */
48
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
49
+ tcg_out_insn(s, 3207, BR, TCG_REG_X9);
50
}
96
}
51
}
97
}
52
98
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
99
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/hw/riscv/sifive_e.c
102
+++ b/hw/riscv/sifive_e.c
103
@@ -XXX,XX +XXX,XX @@ static void sifive_e_machine_init(MachineState *machine)
104
memmap[SIFIVE_E_DEV_MROM].base, &address_space_memory);
105
106
if (machine->kernel_filename) {
107
- riscv_load_kernel(machine, memmap[SIFIVE_E_DEV_DTIM].base, NULL);
108
+ riscv_load_kernel(machine, &s->soc.cpus,
109
+ memmap[SIFIVE_E_DEV_DTIM].base, NULL);
54
}
110
}
55
}
111
}
56
112
57
-#ifdef CONFIG_SOFTMMU
113
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
58
-#include "../tcg-ldst.c.inc"
114
index XXXXXXX..XXXXXXX 100644
59
+static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
115
--- a/hw/riscv/sifive_u.c
60
+{
116
+++ b/hw/riscv/sifive_u.c
61
+ ptrdiff_t offset = tcg_pcrel_diff(s, target);
117
@@ -XXX,XX +XXX,XX @@ static void sifive_u_machine_init(MachineState *machine)
62
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
118
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc.u_cpus,
63
+ tcg_out_insn(s, 3406, ADR, rd, offset);
119
firmware_end_addr);
64
+}
120
65
121
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
66
+#ifdef CONFIG_SOFTMMU
122
+ kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
67
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
123
+ kernel_start_addr, NULL);
68
* MemOpIdx oi, uintptr_t ra)
124
69
*/
125
if (machine->initrd_filename) {
70
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
126
riscv_load_initrd(machine, kernel_entry);
71
#endif
127
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
72
};
128
index XXXXXXX..XXXXXXX 100644
73
129
--- a/hw/riscv/spike.c
74
-static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
130
+++ b/hw/riscv/spike.c
75
-{
131
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
76
- ptrdiff_t offset = tcg_pcrel_diff(s, target);
132
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
77
- tcg_debug_assert(offset == sextract64(offset, 0, 21));
133
firmware_end_addr);
78
- tcg_out_insn(s, 3406, ADR, rd, offset);
134
79
-}
135
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr,
80
-
136
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0],
81
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
137
+ kernel_start_addr,
82
{
138
htif_symbol_callback);
83
MemOpIdx oi = lb->oi;
139
84
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
140
if (machine->initrd_filename) {
85
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
141
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
86
}
142
index XXXXXXX..XXXXXXX 100644
87
143
--- a/hw/riscv/virt.c
88
+#else
144
+++ b/hw/riscv/virt.c
89
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
145
@@ -XXX,XX +XXX,XX @@ static void virt_machine_done(Notifier *notifier, void *data)
90
+ unsigned a_bits)
146
kernel_start_addr = riscv_calc_kernel_start_addr(&s->soc[0],
91
+{
147
firmware_end_addr);
92
+ unsigned a_mask = (1 << a_bits) - 1;
148
93
+ TCGLabelQemuLdst *label = new_ldst_label(s);
149
- kernel_entry = riscv_load_kernel(machine, kernel_start_addr, NULL);
94
+
150
+ kernel_entry = riscv_load_kernel(machine, &s->soc[0],
95
+ label->is_ld = is_ld;
151
+ kernel_start_addr, NULL);
96
+ label->addrlo_reg = addr_reg;
152
97
+
153
if (machine->initrd_filename) {
98
+ /* tst addr, #mask */
154
riscv_load_initrd(machine, kernel_entry);
99
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
155
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
100
+
156
index XXXXXXX..XXXXXXX 100644
101
+ label->label_ptr[0] = s->code_ptr;
157
--- a/include/hw/riscv/boot.h
102
+
158
+++ b/include/hw/riscv/boot.h
103
+ /* b.ne slow_path */
159
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_firmware(const char *firmware_filename,
104
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
160
hwaddr firmware_load_addr,
105
+
161
symbol_fn_t sym_cb);
106
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
162
target_ulong riscv_load_kernel(MachineState *machine,
107
+}
163
+ RISCVHartArrayState *harts,
108
+
164
target_ulong firmware_end_addr,
109
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
165
symbol_fn_t sym_cb);
110
+{
166
void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry);
111
+ if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
112
+ return false;
113
+ }
114
+
115
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg);
116
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
117
+
118
+ /* "Tail call" to the helper, with the return address back inline. */
119
+ tcg_out_adr(s, TCG_REG_LR, l->raddr);
120
+ tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld
121
+ : helper_unaligned_st));
122
+ return true;
123
+}
124
+
125
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
126
+{
127
+ return tcg_out_fail_alignment(s, l);
128
+}
129
+
130
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
131
+{
132
+ return tcg_out_fail_alignment(s, l);
133
+}
134
#endif /* CONFIG_SOFTMMU */
135
136
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
137
TCGReg data_r, TCGReg addr_r,
138
TCGType otype, TCGReg off_r)
139
{
140
- /* Byte swapping is left to middle-end expansion. */
141
- tcg_debug_assert((memop & MO_BSWAP) == 0);
142
-
143
switch (memop & MO_SSIZE) {
144
case MO_UB:
145
tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
146
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
147
TCGReg data_r, TCGReg addr_r,
148
TCGType otype, TCGReg off_r)
149
{
150
- /* Byte swapping is left to middle-end expansion. */
151
- tcg_debug_assert((memop & MO_BSWAP) == 0);
152
-
153
switch (memop & MO_SIZE) {
154
case MO_8:
155
tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
156
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
157
{
158
MemOp memop = get_memop(oi);
159
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
160
+
161
+ /* Byte swapping is left to middle-end expansion. */
162
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
163
+
164
#ifdef CONFIG_SOFTMMU
165
unsigned mem_index = get_mmuidx(oi);
166
tcg_insn_unit *label_ptr;
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
168
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
169
s->code_ptr, label_ptr);
170
#else /* !CONFIG_SOFTMMU */
171
+ unsigned a_bits = get_alignment_bits(memop);
172
+ if (a_bits) {
173
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
174
+ }
175
if (USE_GUEST_BASE) {
176
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
177
TCG_REG_GUEST_BASE, otype, addr_reg);
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
179
{
180
MemOp memop = get_memop(oi);
181
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
182
+
183
+ /* Byte swapping is left to middle-end expansion. */
184
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
185
+
186
#ifdef CONFIG_SOFTMMU
187
unsigned mem_index = get_mmuidx(oi);
188
tcg_insn_unit *label_ptr;
189
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
190
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
191
data_reg, addr_reg, s->code_ptr, label_ptr);
192
#else /* !CONFIG_SOFTMMU */
193
+ unsigned a_bits = get_alignment_bits(memop);
194
+ if (a_bits) {
195
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
196
+ }
197
if (USE_GUEST_BASE) {
198
tcg_out_qemu_st_direct(s, memop, data_reg,
199
TCG_REG_GUEST_BASE, otype, addr_reg);
200
--
167
--
201
2.25.1
168
2.39.1
202
203
diff view generated by jsdifflib
1
For s390x, the only unaligned accesses that are signaled are atomic,
1
The microchip_icicle_kit, sifive_u, spike and virt boards are now doing
2
and we don't actually want to raise SIGBUS for those, but instead
2
the same steps when '-kernel' is used:
3
raise a SPECIFICATION error, which the kernel will report as SIGILL.
3
4
4
- execute load_kernel()
5
Split out a do_unaligned_access function to share between the user-only
5
- load init_rd()
6
s390x_cpu_record_sigbus and the sysemu s390x_do_unaligned_access.
6
- write kernel_cmdline
7
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Let's fold everything inside riscv_load_kernel() to avoid code
9
repetition. To not change the behavior of boards that aren't calling
10
riscv_load_init(), add an 'load_initrd' flag to riscv_load_kernel() and
11
allow these boards to opt out from initrd loading.
12
13
Cc: Palmer Dabbelt <palmer@dabbelt.com>
14
Reviewed-by: Bin Meng <bmeng@tinylab.org>
15
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
---
17
---
10
target/s390x/s390x-internal.h | 8 +++++---
18
hw/riscv/boot.c | 21 ++++++++++++++++++---
11
target/s390x/cpu.c | 1 +
19
hw/riscv/microchip_pfsoc.c | 11 +----------
12
target/s390x/tcg/excp_helper.c | 27 ++++++++++++++++++++-------
20
hw/riscv/opentitan.c | 3 ++-
13
3 files changed, 26 insertions(+), 10 deletions(-)
21
hw/riscv/sifive_e.c | 3 ++-
14
22
hw/riscv/sifive_u.c | 11 +----------
15
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
23
hw/riscv/spike.c | 11 +----------
16
index XXXXXXX..XXXXXXX 100644
24
hw/riscv/virt.c | 11 +----------
17
--- a/target/s390x/s390x-internal.h
25
include/hw/riscv/boot.h | 1 +
18
+++ b/target/s390x/s390x-internal.h
26
8 files changed, 27 insertions(+), 45 deletions(-)
19
@@ -XXX,XX +XXX,XX @@ ObjectClass *s390_cpu_class_by_name(const char *name);
27
20
void s390x_cpu_debug_excp_handler(CPUState *cs);
28
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
21
void s390_cpu_do_interrupt(CPUState *cpu);
29
index XXXXXXX..XXXXXXX 100644
22
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
30
--- a/hw/riscv/boot.c
23
-void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
31
+++ b/hw/riscv/boot.c
24
- MMUAccessType access_type, int mmu_idx,
32
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_firmware(const char *firmware_filename,
25
- uintptr_t retaddr) QEMU_NORETURN;
33
target_ulong riscv_load_kernel(MachineState *machine,
26
34
RISCVHartArrayState *harts,
27
#ifdef CONFIG_USER_ONLY
35
target_ulong kernel_start_addr,
28
void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
36
+ bool load_initrd,
29
MMUAccessType access_type,
37
symbol_fn_t sym_cb)
30
bool maperr, uintptr_t retaddr);
38
{
31
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
39
const char *kernel_filename = machine->kernel_filename;
32
+ MMUAccessType access_type, uintptr_t retaddr);
40
uint64_t kernel_load_base, kernel_entry, kernel_low;
33
#else
41
+ void *fdt = machine->fdt;
34
bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
42
35
MMUAccessType access_type, int mmu_idx,
43
g_assert(kernel_filename != NULL);
36
bool probe, uintptr_t retaddr);
44
37
+void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
45
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_kernel(MachineState *machine,
38
+ MMUAccessType access_type, int mmu_idx,
46
kernel_entry = kernel_low;
39
+ uintptr_t retaddr) QEMU_NORETURN;
47
}
40
#endif
48
41
49
- return kernel_entry;
42
50
+ goto out;
43
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
51
}
44
index XXXXXXX..XXXXXXX 100644
52
45
--- a/target/s390x/cpu.c
53
if (load_uimage_as(kernel_filename, &kernel_entry, NULL, NULL,
46
+++ b/target/s390x/cpu.c
54
NULL, NULL, NULL) > 0) {
47
@@ -XXX,XX +XXX,XX @@ static const struct TCGCPUOps s390_tcg_ops = {
55
- return kernel_entry;
48
56
+ goto out;
49
#ifdef CONFIG_USER_ONLY
57
}
50
.record_sigsegv = s390_cpu_record_sigsegv,
58
51
+ .record_sigbus = s390_cpu_record_sigbus,
59
if (load_image_targphys_as(kernel_filename, kernel_start_addr,
52
#else
60
current_machine->ram_size, NULL) > 0) {
53
.tlb_fill = s390_cpu_tlb_fill,
61
- return kernel_start_addr;
54
.cpu_exec_interrupt = s390_cpu_exec_interrupt,
62
+ kernel_entry = kernel_start_addr;
55
diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c
63
+ goto out;
56
index XXXXXXX..XXXXXXX 100644
64
}
57
--- a/target/s390x/tcg/excp_helper.c
65
58
+++ b/target/s390x/tcg/excp_helper.c
66
error_report("could not load kernel '%s'", kernel_filename);
59
@@ -XXX,XX +XXX,XX @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
67
exit(1);
60
tcg_s390_data_exception(env, dxc, GETPC());
68
+
69
+out:
70
+ if (load_initrd && machine->initrd_filename) {
71
+ riscv_load_initrd(machine, kernel_entry);
72
+ }
73
+
74
+ if (fdt && machine->kernel_cmdline && *machine->kernel_cmdline) {
75
+ qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
76
+ machine->kernel_cmdline);
77
+ }
78
+
79
+ return kernel_entry;
61
}
80
}
62
81
63
+/*
82
void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
64
+ * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
83
diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c
65
+ * this is only for the atomic operations, for which we want to raise a
84
index XXXXXXX..XXXXXXX 100644
66
+ * specification exception.
85
--- a/hw/riscv/microchip_pfsoc.c
67
+ */
86
+++ b/hw/riscv/microchip_pfsoc.c
68
+static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr)
87
@@ -XXX,XX +XXX,XX @@ static void microchip_icicle_kit_machine_init(MachineState *machine)
69
+{
88
firmware_end_addr);
70
+ S390CPU *cpu = S390_CPU(cs);
89
71
+ CPUS390XState *env = &cpu->env;
90
kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
72
+
91
- kernel_start_addr, NULL);
73
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
92
-
74
+}
93
- if (machine->initrd_filename) {
75
+
94
- riscv_load_initrd(machine, kernel_entry);
76
#if defined(CONFIG_USER_ONLY)
95
- }
77
96
-
78
void s390_cpu_do_interrupt(CPUState *cs)
97
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
79
@@ -XXX,XX +XXX,XX @@ void s390_cpu_record_sigsegv(CPUState *cs, vaddr address,
98
- qemu_fdt_setprop_string(machine->fdt, "/chosen",
80
cpu_loop_exit_restore(cs, retaddr);
99
- "bootargs", machine->kernel_cmdline);
100
- }
101
+ kernel_start_addr, true, NULL);
102
103
/* Compute the fdt load address in dram */
104
fdt_load_addr = riscv_compute_fdt_addr(memmap[MICROCHIP_PFSOC_DRAM_LO].base,
105
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/hw/riscv/opentitan.c
108
+++ b/hw/riscv/opentitan.c
109
@@ -XXX,XX +XXX,XX @@ static void opentitan_board_init(MachineState *machine)
110
111
if (machine->kernel_filename) {
112
riscv_load_kernel(machine, &s->soc.cpus,
113
- memmap[IBEX_DEV_RAM].base, NULL);
114
+ memmap[IBEX_DEV_RAM].base,
115
+ false, NULL);
116
}
81
}
117
}
82
118
83
+void s390_cpu_record_sigbus(CPUState *cs, vaddr address,
119
diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c
84
+ MMUAccessType access_type, uintptr_t retaddr)
120
index XXXXXXX..XXXXXXX 100644
85
+{
121
--- a/hw/riscv/sifive_e.c
86
+ do_unaligned_access(cs, retaddr);
122
+++ b/hw/riscv/sifive_e.c
87
+}
123
@@ -XXX,XX +XXX,XX @@ static void sifive_e_machine_init(MachineState *machine)
88
+
124
89
#else /* !CONFIG_USER_ONLY */
125
if (machine->kernel_filename) {
90
126
riscv_load_kernel(machine, &s->soc.cpus,
91
static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
127
- memmap[SIFIVE_E_DEV_DTIM].base, NULL);
92
@@ -XXX,XX +XXX,XX @@ void s390x_cpu_debug_excp_handler(CPUState *cs)
128
+ memmap[SIFIVE_E_DEV_DTIM].base,
129
+ false, NULL);
93
}
130
}
94
}
131
}
95
132
96
-/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
133
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
97
- this is only for the atomic operations, for which we want to raise a
134
index XXXXXXX..XXXXXXX 100644
98
- specification exception. */
135
--- a/hw/riscv/sifive_u.c
99
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
136
+++ b/hw/riscv/sifive_u.c
100
MMUAccessType access_type,
137
@@ -XXX,XX +XXX,XX @@ static void sifive_u_machine_init(MachineState *machine)
101
int mmu_idx, uintptr_t retaddr)
138
firmware_end_addr);
102
{
139
103
- S390CPU *cpu = S390_CPU(cs);
140
kernel_entry = riscv_load_kernel(machine, &s->soc.u_cpus,
104
- CPUS390XState *env = &cpu->env;
141
- kernel_start_addr, NULL);
105
-
142
-
106
- tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
143
- if (machine->initrd_filename) {
107
+ do_unaligned_access(cs, retaddr);
144
- riscv_load_initrd(machine, kernel_entry);
108
}
145
- }
109
146
-
110
static void QEMU_NORETURN monitor_event(CPUS390XState *env,
147
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
148
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
149
- machine->kernel_cmdline);
150
- }
151
+ kernel_start_addr, true, NULL);
152
} else {
153
/*
154
* If dynamic firmware is used, it doesn't know where is the next mode
155
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/hw/riscv/spike.c
158
+++ b/hw/riscv/spike.c
159
@@ -XXX,XX +XXX,XX @@ static void spike_board_init(MachineState *machine)
160
161
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
162
kernel_start_addr,
163
- htif_symbol_callback);
164
-
165
- if (machine->initrd_filename) {
166
- riscv_load_initrd(machine, kernel_entry);
167
- }
168
-
169
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
170
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
171
- machine->kernel_cmdline);
172
- }
173
+ true, htif_symbol_callback);
174
} else {
175
/*
176
* If dynamic firmware is used, it doesn't know where is the next mode
177
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
178
index XXXXXXX..XXXXXXX 100644
179
--- a/hw/riscv/virt.c
180
+++ b/hw/riscv/virt.c
181
@@ -XXX,XX +XXX,XX @@ static void virt_machine_done(Notifier *notifier, void *data)
182
firmware_end_addr);
183
184
kernel_entry = riscv_load_kernel(machine, &s->soc[0],
185
- kernel_start_addr, NULL);
186
-
187
- if (machine->initrd_filename) {
188
- riscv_load_initrd(machine, kernel_entry);
189
- }
190
-
191
- if (machine->kernel_cmdline && *machine->kernel_cmdline) {
192
- qemu_fdt_setprop_string(machine->fdt, "/chosen", "bootargs",
193
- machine->kernel_cmdline);
194
- }
195
+ kernel_start_addr, true, NULL);
196
} else {
197
/*
198
* If dynamic firmware is used, it doesn't know where is the next mode
199
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/include/hw/riscv/boot.h
202
+++ b/include/hw/riscv/boot.h
203
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_firmware(const char *firmware_filename,
204
target_ulong riscv_load_kernel(MachineState *machine,
205
RISCVHartArrayState *harts,
206
target_ulong firmware_end_addr,
207
+ bool load_initrd,
208
symbol_fn_t sym_cb);
209
void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry);
210
uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
111
--
211
--
112
2.25.1
212
2.39.1
113
114
diff view generated by jsdifflib
Deleted patch
1
We will raise SIGBUS directly from cpu_loop_exit_sigbus.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
linux-user/ppc/cpu_loop.c | 8 --------
6
1 file changed, 8 deletions(-)
7
8
diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/linux-user/ppc/cpu_loop.c
11
+++ b/linux-user/ppc/cpu_loop.c
12
@@ -XXX,XX +XXX,XX @@ void cpu_loop(CPUPPCState *env)
13
cpu_abort(cs, "External interrupt while in user mode. "
14
"Aborting\n");
15
break;
16
- case POWERPC_EXCP_ALIGN: /* Alignment exception */
17
- /* XXX: check this */
18
- info.si_signo = TARGET_SIGBUS;
19
- info.si_errno = 0;
20
- info.si_code = TARGET_BUS_ADRALN;
21
- info._sifields._sigfault._addr = env->nip;
22
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
23
- break;
24
case POWERPC_EXCP_PROGRAM: /* Program exception */
25
case POWERPC_EXCP_HV_EMU: /* HV emulation */
26
/* XXX: check this */
27
--
28
2.25.1
29
30
diff view generated by jsdifflib
Deleted patch
1
We ought to have been recording the virtual address for reporting
2
to the guest trap handler.
3
1
4
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/sh4/op_helper.c | 5 +++++
9
1 file changed, 5 insertions(+)
10
11
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sh4/op_helper.c
14
+++ b/target/sh4/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
16
MMUAccessType access_type,
17
int mmu_idx, uintptr_t retaddr)
18
{
19
+ CPUSH4State *env = cs->env_ptr;
20
+
21
+ env->tea = addr;
22
switch (access_type) {
23
case MMU_INST_FETCH:
24
case MMU_DATA_LOAD:
25
@@ -XXX,XX +XXX,XX @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
26
case MMU_DATA_STORE:
27
cs->exception_index = 0x100;
28
break;
29
+ default:
30
+ g_assert_not_reached();
31
}
32
cpu_loop_exit_restore(cs, retaddr);
33
}
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
Deleted patch
1
The printf should have been qemu_log_mask, the parameters
2
themselves no longer compile, and because this is placed
3
before unwinding the PC is actively wrong.
4
1
5
We get better (and correct) logging on the other side of
6
raising the exception, in sparc_cpu_do_interrupt.
7
8
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/sparc/ldst_helper.c | 9 ---------
13
1 file changed, 9 deletions(-)
14
15
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/sparc/ldst_helper.c
18
+++ b/target/sparc/ldst_helper.c
19
@@ -XXX,XX +XXX,XX @@
20
21
//#define DEBUG_MMU
22
//#define DEBUG_MXCC
23
-//#define DEBUG_UNALIGNED
24
//#define DEBUG_UNASSIGNED
25
//#define DEBUG_ASI
26
//#define DEBUG_CACHE_CONTROL
27
@@ -XXX,XX +XXX,XX @@ static void do_check_align(CPUSPARCState *env, target_ulong addr,
28
uint32_t align, uintptr_t ra)
29
{
30
if (addr & align) {
31
-#ifdef DEBUG_UNALIGNED
32
- printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
33
- "\n", addr, env->pc);
34
-#endif
35
cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
36
}
37
}
38
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
39
SPARCCPU *cpu = SPARC_CPU(cs);
40
CPUSPARCState *env = &cpu->env;
41
42
-#ifdef DEBUG_UNALIGNED
43
- printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
44
- "\n", addr, env->pc);
45
-#endif
46
cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
47
}
48
#endif
49
--
50
2.25.1
51
52
diff view generated by jsdifflib
1
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
1
The only remaining caller is riscv_load_kernel_and_initrd() which
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
belongs to the same file.
3
4
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Bin Meng <bmeng@tinylab.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
3
---
8
---
4
target/sparc/mmu_helper.c | 72 +++++++++++++++++++++++++--------------
9
hw/riscv/boot.c | 80 ++++++++++++++++++++---------------------
5
1 file changed, 46 insertions(+), 26 deletions(-)
10
include/hw/riscv/boot.h | 1 -
11
2 files changed, 40 insertions(+), 41 deletions(-)
6
12
7
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
13
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
8
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
9
--- a/target/sparc/mmu_helper.c
15
--- a/hw/riscv/boot.c
10
+++ b/target/sparc/mmu_helper.c
16
+++ b/hw/riscv/boot.c
11
@@ -XXX,XX +XXX,XX @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
17
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_firmware(const char *firmware_filename,
12
return 0;
18
exit(1);
13
}
19
}
14
20
15
+static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw)
21
+static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
16
+{
22
+{
17
+ uint64_t sfsr = SFSR_VALID_BIT;
23
+ const char *filename = machine->initrd_filename;
24
+ uint64_t mem_size = machine->ram_size;
25
+ void *fdt = machine->fdt;
26
+ hwaddr start, end;
27
+ ssize_t size;
18
+
28
+
19
+ switch (mmu_idx) {
29
+ g_assert(filename != NULL);
20
+ case MMU_PHYS_IDX:
30
+
21
+ sfsr |= SFSR_CT_NOTRANS;
31
+ /*
22
+ break;
32
+ * We want to put the initrd far enough into RAM that when the
23
+ case MMU_USER_IDX:
33
+ * kernel is uncompressed it will not clobber the initrd. However
24
+ case MMU_KERNEL_IDX:
34
+ * on boards without much RAM we must ensure that we still leave
25
+ sfsr |= SFSR_CT_PRIMARY;
35
+ * enough room for a decent sized initrd, and on boards with large
26
+ break;
36
+ * amounts of RAM we must avoid the initrd being so far up in RAM
27
+ case MMU_USER_SECONDARY_IDX:
37
+ * that it is outside lowmem and inaccessible to the kernel.
28
+ case MMU_KERNEL_SECONDARY_IDX:
38
+ * So for boards with less than 256MB of RAM we put the initrd
29
+ sfsr |= SFSR_CT_SECONDARY;
39
+ * halfway into RAM, and for boards with 256MB of RAM or more we put
30
+ break;
40
+ * the initrd at 128MB.
31
+ case MMU_NUCLEUS_IDX:
41
+ */
32
+ sfsr |= SFSR_CT_NUCLEUS;
42
+ start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
33
+ break;
43
+
34
+ default:
44
+ size = load_ramdisk(filename, start, mem_size - start);
35
+ g_assert_not_reached();
45
+ if (size == -1) {
46
+ size = load_image_targphys(filename, start, mem_size - start);
47
+ if (size == -1) {
48
+ error_report("could not load ramdisk '%s'", filename);
49
+ exit(1);
50
+ }
36
+ }
51
+ }
37
+
52
+
38
+ if (rw == 1) {
53
+ /* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
39
+ sfsr |= SFSR_WRITE_BIT;
54
+ if (fdt) {
40
+ } else if (rw == 4) {
55
+ end = start + size;
41
+ sfsr |= SFSR_NF_BIT;
56
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start);
57
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", end);
42
+ }
58
+ }
43
+
44
+ if (env->pstate & PS_PRIV) {
45
+ sfsr |= SFSR_PR_BIT;
46
+ }
47
+
48
+ if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
49
+ sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */
50
+ }
51
+
52
+ /* FIXME: ASI field in SFSR must be set */
53
+
54
+ return sfsr;
55
+}
59
+}
56
+
60
+
57
static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
61
target_ulong riscv_load_kernel(MachineState *machine,
58
int *prot, MemTxAttrs *attrs,
62
RISCVHartArrayState *harts,
59
target_ulong address, int rw, int mmu_idx)
63
target_ulong kernel_start_addr,
60
{
64
@@ -XXX,XX +XXX,XX @@ out:
61
CPUState *cs = env_cpu(env);
65
return kernel_entry;
62
unsigned int i;
66
}
63
+ uint64_t sfsr;
67
64
uint64_t context;
68
-void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
65
- uint64_t sfsr = 0;
69
-{
66
bool is_user = false;
70
- const char *filename = machine->initrd_filename;
67
71
- uint64_t mem_size = machine->ram_size;
68
+ sfsr = build_sfsr(env, mmu_idx, rw);
72
- void *fdt = machine->fdt;
69
+
73
- hwaddr start, end;
70
switch (mmu_idx) {
74
- ssize_t size;
71
case MMU_PHYS_IDX:
75
-
72
g_assert_not_reached();
76
- g_assert(filename != NULL);
73
@@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
77
-
74
/* fallthru */
78
- /*
75
case MMU_KERNEL_IDX:
79
- * We want to put the initrd far enough into RAM that when the
76
context = env->dmmu.mmu_primary_context & 0x1fff;
80
- * kernel is uncompressed it will not clobber the initrd. However
77
- sfsr |= SFSR_CT_PRIMARY;
81
- * on boards without much RAM we must ensure that we still leave
78
break;
82
- * enough room for a decent sized initrd, and on boards with large
79
case MMU_USER_SECONDARY_IDX:
83
- * amounts of RAM we must avoid the initrd being so far up in RAM
80
is_user = true;
84
- * that it is outside lowmem and inaccessible to the kernel.
81
/* fallthru */
85
- * So for boards with less than 256MB of RAM we put the initrd
82
case MMU_KERNEL_SECONDARY_IDX:
86
- * halfway into RAM, and for boards with 256MB of RAM or more we put
83
context = env->dmmu.mmu_secondary_context & 0x1fff;
87
- * the initrd at 128MB.
84
- sfsr |= SFSR_CT_SECONDARY;
88
- */
85
break;
89
- start = kernel_entry + MIN(mem_size / 2, 128 * MiB);
86
- case MMU_NUCLEUS_IDX:
90
-
87
- sfsr |= SFSR_CT_NUCLEUS;
91
- size = load_ramdisk(filename, start, mem_size - start);
88
- /* FALLTHRU */
92
- if (size == -1) {
89
default:
93
- size = load_image_targphys(filename, start, mem_size - start);
90
context = 0;
94
- if (size == -1) {
91
break;
95
- error_report("could not load ramdisk '%s'", filename);
92
}
96
- exit(1);
93
97
- }
94
- if (rw == 1) {
95
- sfsr |= SFSR_WRITE_BIT;
96
- } else if (rw == 4) {
97
- sfsr |= SFSR_NF_BIT;
98
- }
98
- }
99
-
99
-
100
for (i = 0; i < 64; i++) {
100
- /* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
101
/* ctx match, vaddr match, valid? */
101
- if (fdt) {
102
if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
102
- end = start + size;
103
@@ -XXX,XX +XXX,XX @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
103
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start);
104
return 0;
104
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", end);
105
}
105
- }
106
106
-}
107
- if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
108
- sfsr |= SFSR_OW_BIT; /* overflow (not read before
109
- another fault) */
110
- }
111
-
107
-
112
- if (env->pstate & PS_PRIV) {
108
/*
113
- sfsr |= SFSR_PR_BIT;
109
* This function makes an assumption that the DRAM interval
114
- }
110
* 'dram_base' + 'dram_size' is contiguous.
115
-
111
diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h
116
- /* FIXME: ASI field in SFSR must be set */
112
index XXXXXXX..XXXXXXX 100644
117
- env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
113
--- a/include/hw/riscv/boot.h
118
-
114
+++ b/include/hw/riscv/boot.h
119
+ env->dmmu.sfsr = sfsr;
115
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_load_kernel(MachineState *machine,
120
env->dmmu.sfar = address; /* Fault address register */
116
target_ulong firmware_end_addr,
121
-
117
bool load_initrd,
122
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
118
symbol_fn_t sym_cb);
123
-
119
-void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry);
124
return 1;
120
uint64_t riscv_compute_fdt_addr(hwaddr dram_start, uint64_t dram_size,
125
}
121
MachineState *ms);
126
}
122
void riscv_load_fdt(hwaddr fdt_addr, void *fdt);
127
--
123
--
128
2.25.1
124
2.39.1
129
125
130
126
diff view generated by jsdifflib
Deleted patch
1
We ought to have been recording the virtual address for reporting
2
to the guest trap handler. Move the function to mmu_helper.c, so
3
that we can re-use code shared with get_physical_address_data.
4
1
5
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/sparc/ldst_helper.c | 13 -------------
9
target/sparc/mmu_helper.c | 20 ++++++++++++++++++++
10
2 files changed, 20 insertions(+), 13 deletions(-)
11
12
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/sparc/ldst_helper.c
15
+++ b/target/sparc/ldst_helper.c
16
@@ -XXX,XX +XXX,XX @@ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
17
is_asi, size, retaddr);
18
}
19
#endif
20
-
21
-#if !defined(CONFIG_USER_ONLY)
22
-void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
23
- MMUAccessType access_type,
24
- int mmu_idx,
25
- uintptr_t retaddr)
26
-{
27
- SPARCCPU *cpu = SPARC_CPU(cs);
28
- CPUSPARCState *env = &cpu->env;
29
-
30
- cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
31
-}
32
-#endif
33
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/mmu_helper.c
36
+++ b/target/sparc/mmu_helper.c
37
@@ -XXX,XX +XXX,XX @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
38
}
39
return phys_addr;
40
}
41
+
42
+#ifndef CONFIG_USER_ONLY
43
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
44
+ MMUAccessType access_type,
45
+ int mmu_idx,
46
+ uintptr_t retaddr)
47
+{
48
+ SPARCCPU *cpu = SPARC_CPU(cs);
49
+ CPUSPARCState *env = &cpu->env;
50
+
51
+#ifdef TARGET_SPARC64
52
+ env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
53
+ env->dmmu.sfar = addr;
54
+#else
55
+ env->mmuregs[4] = addr;
56
+#endif
57
+
58
+ cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
59
+}
60
+#endif /* !CONFIG_USER_ONLY */
61
--
62
2.25.1
63
64
diff view generated by jsdifflib
Deleted patch
1
Use the new cpu_loop_exit_sigbus for atomic_mmu_lookup, which
2
has access to complete alignment info from the TCGMemOpIdx arg.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/user-exec.c | 13 ++++++++++++-
8
1 file changed, 12 insertions(+), 1 deletion(-)
9
10
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/user-exec.c
13
+++ b/accel/tcg/user-exec.c
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
15
MemOpIdx oi, int size, int prot,
16
uintptr_t retaddr)
17
{
18
+ MemOp mop = get_memop(oi);
19
+ int a_bits = get_alignment_bits(mop);
20
+ void *ret;
21
+
22
+ /* Enforce guest required alignment. */
23
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
24
+ MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
25
+ cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
26
+ }
27
+
28
/* Enforce qemu required alignment. */
29
if (unlikely(addr & (size - 1))) {
30
cpu_loop_exit_atomic(env_cpu(env), retaddr);
31
}
32
- void *ret = g2h(env_cpu(env), addr);
33
+
34
+ ret = g2h(env_cpu(env), addr);
35
set_helper_retaddr(retaddr);
36
return ret;
37
}
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
Deleted patch
1
Cc: qemu-arm@nongnu.org
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/arm/helper-a64.c | 8 ++++----
6
1 file changed, 4 insertions(+), 4 deletions(-)
7
1
8
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/helper-a64.c
11
+++ b/target/arm/helper-a64.c
12
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
13
assert(HAVE_CMPXCHG128);
14
15
mem_idx = cpu_mmu_index(env, false);
16
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
17
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
18
19
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
20
newv = int128_make128(new_lo, new_hi);
21
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
22
assert(HAVE_CMPXCHG128);
23
24
mem_idx = cpu_mmu_index(env, false);
25
- oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
26
+ oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx);
27
28
/*
29
* High and low need to be switched here because this is not actually a
30
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
31
assert(HAVE_CMPXCHG128);
32
33
mem_idx = cpu_mmu_index(env, false);
34
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
35
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
36
37
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
38
newv = int128_make128(new_lo, new_hi);
39
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
40
assert(HAVE_CMPXCHG128);
41
42
mem_idx = cpu_mmu_index(env, false);
43
- oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
44
+ oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx);
45
46
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
47
newv = int128_make128(new_lo, new_hi);
48
--
49
2.25.1
50
51
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/i386/tcg/mem_helper.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
1
7
diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/i386/tcg/mem_helper.c
10
+++ b/target/i386/tcg/mem_helper.c
11
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
12
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
13
14
int mem_idx = cpu_mmu_index(env, false);
15
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
16
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
17
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
18
19
if (int128_eq(oldv, cmpv)) {
20
--
21
2.25.1
22
23
diff view generated by jsdifflib
Deleted patch
1
Cc: qemu-ppc@nongnu.org
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/ppc/translate.c | 12 +++++++-----
6
1 file changed, 7 insertions(+), 5 deletions(-)
7
1
8
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/ppc/translate.c
11
+++ b/target/ppc/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_std(DisasContext *ctx)
13
if (HAVE_ATOMIC128) {
14
TCGv_i32 oi = tcg_temp_new_i32();
15
if (ctx->le_mode) {
16
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
17
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
18
+ ctx->mem_idx));
19
gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
20
} else {
21
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
22
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
23
+ ctx->mem_idx));
24
gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
25
}
26
tcg_temp_free_i32(oi);
27
@@ -XXX,XX +XXX,XX @@ static void gen_lqarx(DisasContext *ctx)
28
if (HAVE_ATOMIC128) {
29
TCGv_i32 oi = tcg_temp_new_i32();
30
if (ctx->le_mode) {
31
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
32
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
33
ctx->mem_idx));
34
gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
35
} else {
36
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
37
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
38
ctx->mem_idx));
39
gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
40
}
41
@@ -XXX,XX +XXX,XX @@ static void gen_stqcx_(DisasContext *ctx)
42
43
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
44
if (HAVE_CMPXCHG128) {
45
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
46
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
47
if (ctx->le_mode) {
48
gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
49
EA, lo, hi, oi);
50
--
51
2.25.1
52
53
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/s390x/tcg/mem_helper.c | 4 ++--
6
1 file changed, 2 insertions(+), 2 deletions(-)
7
1
8
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/s390x/tcg/mem_helper.c
11
+++ b/target/s390x/tcg/mem_helper.c
12
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
13
assert(HAVE_CMPXCHG128);
14
15
mem_idx = cpu_mmu_index(env, false);
16
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
17
+ oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
18
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
19
fail = !int128_eq(oldv, cmpv);
20
21
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
22
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
23
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
24
} else if (HAVE_CMPXCHG128) {
25
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
26
+ MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx);
27
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
28
cc = !int128_eq(ov, cv);
29
} else {
30
--
31
2.25.1
32
33
diff view generated by jsdifflib
Deleted patch
1
The function is trivial for user-only, but still must be present.
2
1
3
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/cpu.h | 9 +++++++++
8
1 file changed, 9 insertions(+)
9
10
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/cpu.h
13
+++ b/target/hexagon/cpu.h
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
15
#endif
16
}
17
18
+static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
19
+{
20
+#ifdef CONFIG_USER_ONLY
21
+ return MMU_USER_IDX;
22
+#else
23
+#error System mode not supported on Hexagon yet
24
+#endif
25
+}
26
+
27
typedef struct CPUHexagonState CPUArchState;
28
typedef HexagonCPU ArchCPU;
29
30
--
31
2.25.1
32
33
diff view generated by jsdifflib
Deleted patch
1
These functions are much closer to the softmmu helper
2
functions, in that they take the complete MemOpIdx,
3
and from that they may enforce required alignment.
4
1
5
The previous cpu_ldst.h functions did not have alignment info,
6
and so did not enforce it. Retain this by adding MO_UNALN to
7
the MemOp that we create in calling the new functions.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
docs/devel/loads-stores.rst | 52 ++++-
13
include/exec/cpu_ldst.h | 245 ++++++++--------------
14
accel/tcg/cputlb.c | 392 ++++++++++++------------------------
15
accel/tcg/user-exec.c | 390 +++++++++++++++--------------------
16
accel/tcg/ldst_common.c.inc | 307 ++++++++++++++++++++++++++++
17
5 files changed, 722 insertions(+), 664 deletions(-)
18
create mode 100644 accel/tcg/ldst_common.c.inc
19
20
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
21
index XXXXXXX..XXXXXXX 100644
22
--- a/docs/devel/loads-stores.rst
23
+++ b/docs/devel/loads-stores.rst
24
@@ -XXX,XX +XXX,XX @@ Regexes for git grep
25
- ``\<ldn_\([hbl]e\)?_p\>``
26
- ``\<stn_\([hbl]e\)?_p\>``
27
28
-``cpu_{ld,st}*_mmuidx_ra``
29
-~~~~~~~~~~~~~~~~~~~~~~~~~~
30
+``cpu_{ld,st}*_mmu``
31
+~~~~~~~~~~~~~~~~~~~~
32
33
-These functions operate on a guest virtual address plus a context,
34
-known as a "mmu index" or ``mmuidx``, which controls how that virtual
35
-address is translated. The meaning of the indexes are target specific,
36
-but specifying a particular index might be necessary if, for instance,
37
-the helper requires an "always as non-privileged" access rather that
38
-the default access for the current state of the guest CPU.
39
+These functions operate on a guest virtual address, plus a context
40
+known as a "mmu index" which controls how that virtual address is
41
+translated, plus a ``MemOp`` which contains alignment requirements
42
+among other things. The ``MemOp`` and mmu index are combined into
43
+a single argument of type ``MemOpIdx``.
44
+
45
+The meaning of the indexes are target specific, but specifying a
46
+particular index might be necessary if, for instance, the helper
47
+requires a "always as non-privileged" access rather than the
48
+default access for the current state of the guest CPU.
49
50
These functions may cause a guest CPU exception to be taken
51
(e.g. for an alignment fault or MMU fault) which will result in
52
@@ -XXX,XX +XXX,XX @@ function, which is a return address into the generated code [#gpc]_.
53
54
Function names follow the pattern:
55
56
+load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)``
57
+
58
+store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)``
59
+
60
+``size``
61
+ - ``b`` : 8 bits
62
+ - ``w`` : 16 bits
63
+ - ``l`` : 32 bits
64
+ - ``q`` : 64 bits
65
+
66
+``end``
67
+ - (empty) : for target endian, or 8 bit sizes
68
+ - ``_be`` : big endian
69
+ - ``_le`` : little endian
70
+
71
+Regexes for git grep:
72
+ - ``\<cpu_ld[bwlq](_[bl]e)\?_mmu\>``
73
+ - ``\<cpu_st[bwlq](_[bl]e)\?_mmu\>``
74
+
75
+
76
+``cpu_{ld,st}*_mmuidx_ra``
77
+~~~~~~~~~~~~~~~~~~~~~~~~~~
78
+
79
+These functions work like the ``cpu_{ld,st}_mmu`` functions except
80
+that the ``mmuidx`` parameter is not combined with a ``MemOp``,
81
+and therefore there is no required alignment supplied or enforced.
82
+
83
+Function names follow the pattern:
84
+
85
load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
86
87
store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
88
@@ -XXX,XX +XXX,XX @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
89
90
These are generally the preferred way to do accesses by guest
91
virtual address from helper functions, unless the access should
92
-be performed with a context other than the default.
93
+be performed with a context other than the default, or alignment
94
+should be enforced for the access.
95
96
Function names follow the pattern:
97
98
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/include/exec/cpu_ldst.h
101
+++ b/include/exec/cpu_ldst.h
102
@@ -XXX,XX +XXX,XX @@
103
* load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
104
* cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
105
* cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
106
+ * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
107
*
108
* store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
109
* cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
110
* cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
111
+ * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
112
*
113
* sign is:
114
* (empty): for 32 and 64 bit sizes
115
@@ -XXX,XX +XXX,XX @@
116
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
117
* the index to use; the "data" and "code" suffixes take the index from
118
* cpu_mmu_index().
119
+ *
120
+ * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
121
+ * MemOp including alignment requirements. The alignment will be enforced.
122
*/
123
#ifndef CPU_LDST_H
124
#define CPU_LDST_H
125
126
+#include "exec/memopidx.h"
127
+
128
#if defined(CONFIG_USER_ONLY)
129
/* sparc32plus has 64bit long but 32bit space address
130
* this can make bad result with g2h() and h2g()
131
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
132
133
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
134
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
135
-
136
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
137
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
138
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
139
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
140
-
141
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
142
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
143
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
144
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
145
146
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
147
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
148
-
149
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
150
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
151
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
152
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
153
-
154
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
155
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
156
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
157
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
158
159
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
160
-
161
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
162
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
163
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
164
-
165
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
166
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
167
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
168
169
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
170
uint32_t val, uintptr_t ra);
171
-
172
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
173
uint32_t val, uintptr_t ra);
174
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
175
uint32_t val, uintptr_t ra);
176
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
177
uint64_t val, uintptr_t ra);
178
-
179
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
180
uint32_t val, uintptr_t ra);
181
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
182
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
183
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
184
uint64_t val, uintptr_t ra);
185
186
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
187
+ int mmu_idx, uintptr_t ra);
188
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
189
+ int mmu_idx, uintptr_t ra);
190
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
191
+ int mmu_idx, uintptr_t ra);
192
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
193
+ int mmu_idx, uintptr_t ra);
194
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
195
+ int mmu_idx, uintptr_t ra);
196
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
197
+ int mmu_idx, uintptr_t ra);
198
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
199
+ int mmu_idx, uintptr_t ra);
200
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
201
+ int mmu_idx, uintptr_t ra);
202
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
203
+ int mmu_idx, uintptr_t ra);
204
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
205
+ int mmu_idx, uintptr_t ra);
206
+
207
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
208
+ int mmu_idx, uintptr_t ra);
209
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
210
+ int mmu_idx, uintptr_t ra);
211
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
212
+ int mmu_idx, uintptr_t ra);
213
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
214
+ int mmu_idx, uintptr_t ra);
215
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
216
+ int mmu_idx, uintptr_t ra);
217
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
218
+ int mmu_idx, uintptr_t ra);
219
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
220
+ int mmu_idx, uintptr_t ra);
221
+
222
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
223
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr,
224
+ MemOpIdx oi, uintptr_t ra);
225
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr,
226
+ MemOpIdx oi, uintptr_t ra);
227
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr,
228
+ MemOpIdx oi, uintptr_t ra);
229
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr,
230
+ MemOpIdx oi, uintptr_t ra);
231
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr,
232
+ MemOpIdx oi, uintptr_t ra);
233
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr,
234
+ MemOpIdx oi, uintptr_t ra);
235
+
236
+void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
237
+ MemOpIdx oi, uintptr_t ra);
238
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
239
+ MemOpIdx oi, uintptr_t ra);
240
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
241
+ MemOpIdx oi, uintptr_t ra);
242
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
243
+ MemOpIdx oi, uintptr_t ra);
244
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
245
+ MemOpIdx oi, uintptr_t ra);
246
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
247
+ MemOpIdx oi, uintptr_t ra);
248
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
249
+ MemOpIdx oi, uintptr_t ra);
250
+
251
#if defined(CONFIG_USER_ONLY)
252
253
extern __thread uintptr_t helper_retaddr;
254
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
255
helper_retaddr = 0;
256
}
257
258
-/*
259
- * Provide the same *_mmuidx_ra interface as for softmmu.
260
- * The mmu_idx argument is ignored.
261
- */
262
-
263
-static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
264
- int mmu_idx, uintptr_t ra)
265
-{
266
- return cpu_ldub_data_ra(env, addr, ra);
267
-}
268
-
269
-static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
270
- int mmu_idx, uintptr_t ra)
271
-{
272
- return cpu_ldsb_data_ra(env, addr, ra);
273
-}
274
-
275
-static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
276
- int mmu_idx, uintptr_t ra)
277
-{
278
- return cpu_lduw_be_data_ra(env, addr, ra);
279
-}
280
-
281
-static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
282
- int mmu_idx, uintptr_t ra)
283
-{
284
- return cpu_ldsw_be_data_ra(env, addr, ra);
285
-}
286
-
287
-static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
288
- int mmu_idx, uintptr_t ra)
289
-{
290
- return cpu_ldl_be_data_ra(env, addr, ra);
291
-}
292
-
293
-static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
294
- int mmu_idx, uintptr_t ra)
295
-{
296
- return cpu_ldq_be_data_ra(env, addr, ra);
297
-}
298
-
299
-static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
300
- int mmu_idx, uintptr_t ra)
301
-{
302
- return cpu_lduw_le_data_ra(env, addr, ra);
303
-}
304
-
305
-static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
306
- int mmu_idx, uintptr_t ra)
307
-{
308
- return cpu_ldsw_le_data_ra(env, addr, ra);
309
-}
310
-
311
-static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
312
- int mmu_idx, uintptr_t ra)
313
-{
314
- return cpu_ldl_le_data_ra(env, addr, ra);
315
-}
316
-
317
-static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
318
- int mmu_idx, uintptr_t ra)
319
-{
320
- return cpu_ldq_le_data_ra(env, addr, ra);
321
-}
322
-
323
-static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
324
- uint32_t val, int mmu_idx, uintptr_t ra)
325
-{
326
- cpu_stb_data_ra(env, addr, val, ra);
327
-}
328
-
329
-static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
330
- uint32_t val, int mmu_idx,
331
- uintptr_t ra)
332
-{
333
- cpu_stw_be_data_ra(env, addr, val, ra);
334
-}
335
-
336
-static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
337
- uint32_t val, int mmu_idx,
338
- uintptr_t ra)
339
-{
340
- cpu_stl_be_data_ra(env, addr, val, ra);
341
-}
342
-
343
-static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
344
- uint64_t val, int mmu_idx,
345
- uintptr_t ra)
346
-{
347
- cpu_stq_be_data_ra(env, addr, val, ra);
348
-}
349
-
350
-static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
351
- uint32_t val, int mmu_idx,
352
- uintptr_t ra)
353
-{
354
- cpu_stw_le_data_ra(env, addr, val, ra);
355
-}
356
-
357
-static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
358
- uint32_t val, int mmu_idx,
359
- uintptr_t ra)
360
-{
361
- cpu_stl_le_data_ra(env, addr, val, ra);
362
-}
363
-
364
-static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
365
- uint64_t val, int mmu_idx,
366
- uintptr_t ra)
367
-{
368
- cpu_stq_le_data_ra(env, addr, val, ra);
369
-}
370
-
371
#else
372
373
/* Needed for TCG_OVERSIZED_GUEST */
374
@@ -XXX,XX +XXX,XX @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
375
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
376
}
377
378
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
379
- int mmu_idx, uintptr_t ra);
380
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
381
- int mmu_idx, uintptr_t ra);
382
-
383
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
384
- int mmu_idx, uintptr_t ra);
385
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
386
- int mmu_idx, uintptr_t ra);
387
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
388
- int mmu_idx, uintptr_t ra);
389
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
390
- int mmu_idx, uintptr_t ra);
391
-
392
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
393
- int mmu_idx, uintptr_t ra);
394
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
395
- int mmu_idx, uintptr_t ra);
396
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
397
- int mmu_idx, uintptr_t ra);
398
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
399
- int mmu_idx, uintptr_t ra);
400
-
401
-void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
402
- int mmu_idx, uintptr_t retaddr);
403
-
404
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
405
- int mmu_idx, uintptr_t retaddr);
406
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
407
- int mmu_idx, uintptr_t retaddr);
408
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
409
- int mmu_idx, uintptr_t retaddr);
410
-
411
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
412
- int mmu_idx, uintptr_t retaddr);
413
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
414
- int mmu_idx, uintptr_t retaddr);
415
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
416
- int mmu_idx, uintptr_t retaddr);
417
-
418
#endif /* defined(CONFIG_USER_ONLY) */
419
420
#ifdef TARGET_WORDS_BIGENDIAN
421
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
422
# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
423
# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
424
# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
425
+# define cpu_ldw_mmu cpu_ldw_be_mmu
426
+# define cpu_ldl_mmu cpu_ldl_be_mmu
427
+# define cpu_ldq_mmu cpu_ldq_be_mmu
428
# define cpu_stw_data cpu_stw_be_data
429
# define cpu_stl_data cpu_stl_be_data
430
# define cpu_stq_data cpu_stq_be_data
431
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
432
# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
433
# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
434
# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
435
+# define cpu_stw_mmu cpu_stw_be_mmu
436
+# define cpu_stl_mmu cpu_stl_be_mmu
437
+# define cpu_stq_mmu cpu_stq_be_mmu
438
#else
439
# define cpu_lduw_data cpu_lduw_le_data
440
# define cpu_ldsw_data cpu_ldsw_le_data
441
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
442
# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
443
# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
444
# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
445
+# define cpu_ldw_mmu cpu_ldw_le_mmu
446
+# define cpu_ldl_mmu cpu_ldl_le_mmu
447
+# define cpu_ldq_mmu cpu_ldq_le_mmu
448
# define cpu_stw_data cpu_stw_le_data
449
# define cpu_stl_data cpu_stl_le_data
450
# define cpu_stq_data cpu_stq_le_data
451
@@ -XXX,XX +XXX,XX @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
452
# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
453
# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
454
# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
455
+# define cpu_stw_mmu cpu_stw_le_mmu
456
+# define cpu_stl_mmu cpu_stl_le_mmu
457
+# define cpu_stq_mmu cpu_stq_le_mmu
458
#endif
459
460
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
461
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
462
index XXXXXXX..XXXXXXX 100644
463
--- a/accel/tcg/cputlb.c
464
+++ b/accel/tcg/cputlb.c
465
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
466
cpu_loop_exit_atomic(env_cpu(env), retaddr);
467
}
468
469
+/*
470
+ * Verify that we have passed the correct MemOp to the correct function.
471
+ *
472
+ * In the case of the helper_*_mmu functions, we will have done this by
473
+ * using the MemOp to look up the helper during code generation.
474
+ *
475
+ * In the case of the cpu_*_mmu functions, this is up to the caller.
476
+ * We could present one function to target code, and dispatch based on
477
+ * the MemOp, but so far we have worked hard to avoid an indirect function
478
+ * call along the memory path.
479
+ */
480
+static void validate_memop(MemOpIdx oi, MemOp expected)
481
+{
482
+#ifdef CONFIG_DEBUG_TCG
483
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
484
+ assert(have == expected);
485
+#endif
486
+}
487
+
488
/*
489
* Load Helpers
490
*
491
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
492
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
493
MemOpIdx oi, uintptr_t retaddr)
494
{
495
+ validate_memop(oi, MO_UB);
496
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
497
}
498
499
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
500
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
501
MemOpIdx oi, uintptr_t retaddr)
502
{
503
+ validate_memop(oi, MO_LEUW);
504
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
505
full_le_lduw_mmu);
506
}
507
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
508
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
509
MemOpIdx oi, uintptr_t retaddr)
510
{
511
+ validate_memop(oi, MO_BEUW);
512
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
513
full_be_lduw_mmu);
514
}
515
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
516
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
517
MemOpIdx oi, uintptr_t retaddr)
518
{
519
+ validate_memop(oi, MO_LEUL);
520
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
521
full_le_ldul_mmu);
522
}
523
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
524
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
525
MemOpIdx oi, uintptr_t retaddr)
526
{
527
+ validate_memop(oi, MO_BEUL);
528
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
529
full_be_ldul_mmu);
530
}
531
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
532
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
533
MemOpIdx oi, uintptr_t retaddr)
534
{
535
+ validate_memop(oi, MO_LEQ);
536
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
537
helper_le_ldq_mmu);
538
}
539
@@ -XXX,XX +XXX,XX @@ uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
540
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
541
MemOpIdx oi, uintptr_t retaddr)
542
{
543
+ validate_memop(oi, MO_BEQ);
544
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
545
helper_be_ldq_mmu);
546
}
547
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
548
*/
549
550
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
551
- int mmu_idx, uintptr_t retaddr,
552
- MemOp op, FullLoadHelper *full_load)
553
+ MemOpIdx oi, uintptr_t retaddr,
554
+ FullLoadHelper *full_load)
555
{
556
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
557
uint64_t ret;
558
559
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
560
-
561
ret = full_load(env, addr, oi, retaddr);
562
-
563
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
564
-
565
return ret;
566
}
567
568
-uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
569
- int mmu_idx, uintptr_t ra)
570
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
571
{
572
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
573
+ return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
574
}
575
576
-int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
577
- int mmu_idx, uintptr_t ra)
578
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
579
+ MemOpIdx oi, uintptr_t ra)
580
{
581
- return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
582
+ return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
583
}
584
585
-uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
586
- int mmu_idx, uintptr_t ra)
587
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
588
+ MemOpIdx oi, uintptr_t ra)
589
{
590
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
591
+ return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
592
}
593
594
-int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
595
- int mmu_idx, uintptr_t ra)
596
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
597
+ MemOpIdx oi, uintptr_t ra)
598
{
599
- return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
600
+ return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
601
}
602
603
-uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
604
- int mmu_idx, uintptr_t ra)
605
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
606
+ MemOpIdx oi, uintptr_t ra)
607
{
608
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
609
+ return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
610
}
611
612
-uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
613
- int mmu_idx, uintptr_t ra)
614
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
615
+ MemOpIdx oi, uintptr_t ra)
616
{
617
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
618
+ return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
619
}
620
621
-uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
622
- int mmu_idx, uintptr_t ra)
623
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
624
+ MemOpIdx oi, uintptr_t ra)
625
{
626
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
627
-}
628
-
629
-int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
630
- int mmu_idx, uintptr_t ra)
631
-{
632
- return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
633
-}
634
-
635
-uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
636
- int mmu_idx, uintptr_t ra)
637
-{
638
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
639
-}
640
-
641
-uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
642
- int mmu_idx, uintptr_t ra)
643
-{
644
- return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
645
-}
646
-
647
-uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
648
- uintptr_t retaddr)
649
-{
650
- return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
651
-}
652
-
653
-int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
654
-{
655
- return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
656
-}
657
-
658
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
659
- uintptr_t retaddr)
660
-{
661
- return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
662
-}
663
-
664
-int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
665
-{
666
- return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
667
-}
668
-
669
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
670
- uintptr_t retaddr)
671
-{
672
- return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
673
-}
674
-
675
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
676
- uintptr_t retaddr)
677
-{
678
- return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
679
-}
680
-
681
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
682
- uintptr_t retaddr)
683
-{
684
- return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
685
-}
686
-
687
-int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
688
-{
689
- return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
690
-}
691
-
692
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
693
- uintptr_t retaddr)
694
-{
695
- return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
696
-}
697
-
698
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
699
- uintptr_t retaddr)
700
-{
701
- return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
702
-}
703
-
704
-uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
705
-{
706
- return cpu_ldub_data_ra(env, ptr, 0);
707
-}
708
-
709
-int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
710
-{
711
- return cpu_ldsb_data_ra(env, ptr, 0);
712
-}
713
-
714
-uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
715
-{
716
- return cpu_lduw_be_data_ra(env, ptr, 0);
717
-}
718
-
719
-int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
720
-{
721
- return cpu_ldsw_be_data_ra(env, ptr, 0);
722
-}
723
-
724
-uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
725
-{
726
- return cpu_ldl_be_data_ra(env, ptr, 0);
727
-}
728
-
729
-uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
730
-{
731
- return cpu_ldq_be_data_ra(env, ptr, 0);
732
-}
733
-
734
-uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
735
-{
736
- return cpu_lduw_le_data_ra(env, ptr, 0);
737
-}
738
-
739
-int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
740
-{
741
- return cpu_ldsw_le_data_ra(env, ptr, 0);
742
-}
743
-
744
-uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
745
-{
746
- return cpu_ldl_le_data_ra(env, ptr, 0);
747
-}
748
-
749
-uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
750
-{
751
- return cpu_ldq_le_data_ra(env, ptr, 0);
752
+ return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
753
}
754
755
/*
756
@@ -XXX,XX +XXX,XX @@ store_memop(void *haddr, uint64_t val, MemOp op)
757
}
758
}
759
760
+static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
761
+ MemOpIdx oi, uintptr_t retaddr);
762
+
763
static void __attribute__((noinline))
764
store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
765
uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
766
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
767
for (i = 0; i < size; ++i) {
768
/* Big-endian extract. */
769
uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
770
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
771
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
772
}
773
} else {
774
for (i = 0; i < size; ++i) {
775
/* Little-endian extract. */
776
uint8_t val8 = val >> (i * 8);
777
- helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
778
+ full_stb_mmu(env, addr + i, val8, oi, retaddr);
779
}
780
}
781
}
782
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
783
store_memop(haddr, val, op);
784
}
785
786
-void __attribute__((noinline))
787
-helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
788
- MemOpIdx oi, uintptr_t retaddr)
789
+static void __attribute__((noinline))
790
+full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
791
+ MemOpIdx oi, uintptr_t retaddr)
792
{
793
+ validate_memop(oi, MO_UB);
794
store_helper(env, addr, val, oi, retaddr, MO_UB);
795
}
796
797
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
798
+ MemOpIdx oi, uintptr_t retaddr)
799
+{
800
+ full_stb_mmu(env, addr, val, oi, retaddr);
801
+}
802
+
803
+static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
804
+ MemOpIdx oi, uintptr_t retaddr)
805
+{
806
+ validate_memop(oi, MO_LEUW);
807
+ store_helper(env, addr, val, oi, retaddr, MO_LEUW);
808
+}
809
+
810
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
811
MemOpIdx oi, uintptr_t retaddr)
812
{
813
- store_helper(env, addr, val, oi, retaddr, MO_LEUW);
814
+ full_le_stw_mmu(env, addr, val, oi, retaddr);
815
+}
816
+
817
+static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
818
+ MemOpIdx oi, uintptr_t retaddr)
819
+{
820
+ validate_memop(oi, MO_BEUW);
821
+ store_helper(env, addr, val, oi, retaddr, MO_BEUW);
822
}
823
824
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
825
MemOpIdx oi, uintptr_t retaddr)
826
{
827
- store_helper(env, addr, val, oi, retaddr, MO_BEUW);
828
+ full_be_stw_mmu(env, addr, val, oi, retaddr);
829
+}
830
+
831
+static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
832
+ MemOpIdx oi, uintptr_t retaddr)
833
+{
834
+ validate_memop(oi, MO_LEUL);
835
+ store_helper(env, addr, val, oi, retaddr, MO_LEUL);
836
}
837
838
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
839
MemOpIdx oi, uintptr_t retaddr)
840
{
841
- store_helper(env, addr, val, oi, retaddr, MO_LEUL);
842
+ full_le_stl_mmu(env, addr, val, oi, retaddr);
843
+}
844
+
845
+static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
846
+ MemOpIdx oi, uintptr_t retaddr)
847
+{
848
+ validate_memop(oi, MO_BEUL);
849
+ store_helper(env, addr, val, oi, retaddr, MO_BEUL);
850
}
851
852
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
853
MemOpIdx oi, uintptr_t retaddr)
854
{
855
- store_helper(env, addr, val, oi, retaddr, MO_BEUL);
856
+ full_be_stl_mmu(env, addr, val, oi, retaddr);
857
}
858
859
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
860
MemOpIdx oi, uintptr_t retaddr)
861
{
862
+ validate_memop(oi, MO_LEQ);
863
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
864
}
865
866
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
867
MemOpIdx oi, uintptr_t retaddr)
868
{
869
+ validate_memop(oi, MO_BEQ);
870
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
871
}
872
873
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
874
* Store Helpers for cpu_ldst.h
875
*/
876
877
-static inline void QEMU_ALWAYS_INLINE
878
-cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
879
- int mmu_idx, uintptr_t retaddr, MemOp op)
880
+typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
881
+ uint64_t val, MemOpIdx oi, uintptr_t retaddr);
882
+
883
+static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
884
+ uint64_t val, MemOpIdx oi, uintptr_t ra,
885
+ FullStoreHelper *full_store)
886
{
887
- MemOpIdx oi = make_memop_idx(op, mmu_idx);
888
-
889
trace_guest_st_before_exec(env_cpu(env), addr, oi);
890
-
891
- store_helper(env, addr, val, oi, retaddr, op);
892
-
893
+ full_store(env, addr, val, oi, ra);
894
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
895
}
896
897
-void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
898
- int mmu_idx, uintptr_t retaddr)
899
+void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
900
+ MemOpIdx oi, uintptr_t retaddr)
901
{
902
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
903
+ cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
904
}
905
906
-void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
907
- int mmu_idx, uintptr_t retaddr)
908
+void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
909
+ MemOpIdx oi, uintptr_t retaddr)
910
{
911
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
912
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
913
}
914
915
-void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
916
- int mmu_idx, uintptr_t retaddr)
917
+void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
918
+ MemOpIdx oi, uintptr_t retaddr)
919
{
920
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
921
+ cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
922
}
923
924
-void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
925
- int mmu_idx, uintptr_t retaddr)
926
+void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
927
+ MemOpIdx oi, uintptr_t retaddr)
928
{
929
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
930
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
931
}
932
933
-void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
934
- int mmu_idx, uintptr_t retaddr)
935
+void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
936
+ MemOpIdx oi, uintptr_t retaddr)
937
{
938
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
939
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
940
}
941
942
-void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
943
- int mmu_idx, uintptr_t retaddr)
944
+void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
945
+ MemOpIdx oi, uintptr_t retaddr)
946
{
947
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
948
+ cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
949
}
950
951
-void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
952
- int mmu_idx, uintptr_t retaddr)
953
+void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
954
+ MemOpIdx oi, uintptr_t retaddr)
955
{
956
- cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
957
+ cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
958
}
959
960
-void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
961
- uint32_t val, uintptr_t retaddr)
962
-{
963
- cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
964
-}
965
-
966
-void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
967
- uint32_t val, uintptr_t retaddr)
968
-{
969
- cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
970
-}
971
-
972
-void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
973
- uint32_t val, uintptr_t retaddr)
974
-{
975
- cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
976
-}
977
-
978
-void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
979
- uint64_t val, uintptr_t retaddr)
980
-{
981
- cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
982
-}
983
-
984
-void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
985
- uint32_t val, uintptr_t retaddr)
986
-{
987
- cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
988
-}
989
-
990
-void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
991
- uint32_t val, uintptr_t retaddr)
992
-{
993
- cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
994
-}
995
-
996
-void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
997
- uint64_t val, uintptr_t retaddr)
998
-{
999
- cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
1000
-}
1001
-
1002
-void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1003
-{
1004
- cpu_stb_data_ra(env, ptr, val, 0);
1005
-}
1006
-
1007
-void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1008
-{
1009
- cpu_stw_be_data_ra(env, ptr, val, 0);
1010
-}
1011
-
1012
-void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1013
-{
1014
- cpu_stl_be_data_ra(env, ptr, val, 0);
1015
-}
1016
-
1017
-void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1018
-{
1019
- cpu_stq_be_data_ra(env, ptr, val, 0);
1020
-}
1021
-
1022
-void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1023
-{
1024
- cpu_stw_le_data_ra(env, ptr, val, 0);
1025
-}
1026
-
1027
-void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
1028
-{
1029
- cpu_stl_le_data_ra(env, ptr, val, 0);
1030
-}
1031
-
1032
-void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
1033
-{
1034
- cpu_stq_le_data_ra(env, ptr, val, 0);
1035
-}
1036
+#include "ldst_common.c.inc"
1037
1038
/*
1039
* First set of functions passes in OI and RETADDR.
1040
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
1041
index XXXXXXX..XXXXXXX 100644
1042
--- a/accel/tcg/user-exec.c
1043
+++ b/accel/tcg/user-exec.c
1044
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
1045
1046
/* The softmmu versions of these helpers are in cputlb.c. */
1047
1048
-uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
1049
+/*
1050
+ * Verify that we have passed the correct MemOp to the correct function.
1051
+ *
1052
+ * We could present one function to target code, and dispatch based on
1053
+ * the MemOp, but so far we have worked hard to avoid an indirect function
1054
+ * call along the memory path.
1055
+ */
1056
+static void validate_memop(MemOpIdx oi, MemOp expected)
1057
{
1058
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1059
- uint32_t ret;
1060
+#ifdef CONFIG_DEBUG_TCG
1061
+ MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1062
+ assert(have == expected);
1063
+#endif
1064
+}
1065
1066
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1067
- ret = ldub_p(g2h(env_cpu(env), ptr));
1068
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1069
+static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
1070
+ MemOpIdx oi, uintptr_t ra, MMUAccessType type)
1071
+{
1072
+ MemOp mop = get_memop(oi);
1073
+ int a_bits = get_alignment_bits(mop);
1074
+ void *ret;
1075
+
1076
+ /* Enforce guest required alignment. */
1077
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
1078
+ cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
1079
+ }
1080
+
1081
+ ret = g2h(env_cpu(env), addr);
1082
+ set_helper_retaddr(ra);
1083
return ret;
1084
}
1085
1086
-int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
1087
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
1088
+ MemOpIdx oi, uintptr_t ra)
1089
{
1090
- return (int8_t)cpu_ldub_data(env, ptr);
1091
-}
1092
+ void *haddr;
1093
+ uint8_t ret;
1094
1095
-uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
1096
-{
1097
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1098
- uint32_t ret;
1099
-
1100
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1101
- ret = lduw_be_p(g2h(env_cpu(env), ptr));
1102
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1103
+ validate_memop(oi, MO_UB);
1104
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1105
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1106
+ ret = ldub_p(haddr);
1107
+ clear_helper_retaddr();
1108
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1109
return ret;
1110
}
1111
1112
-int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
1113
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
1114
+ MemOpIdx oi, uintptr_t ra)
1115
{
1116
- return (int16_t)cpu_lduw_be_data(env, ptr);
1117
-}
1118
+ void *haddr;
1119
+ uint16_t ret;
1120
1121
-uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
1122
-{
1123
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1124
- uint32_t ret;
1125
-
1126
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1127
- ret = ldl_be_p(g2h(env_cpu(env), ptr));
1128
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1129
+ validate_memop(oi, MO_BEUW);
1130
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1131
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1132
+ ret = lduw_be_p(haddr);
1133
+ clear_helper_retaddr();
1134
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1135
return ret;
1136
}
1137
1138
-uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
1139
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
1140
+ MemOpIdx oi, uintptr_t ra)
1141
{
1142
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1143
+ void *haddr;
1144
+ uint32_t ret;
1145
+
1146
+ validate_memop(oi, MO_BEUL);
1147
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1148
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1149
+ ret = ldl_be_p(haddr);
1150
+ clear_helper_retaddr();
1151
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1152
+ return ret;
1153
+}
1154
+
1155
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
1156
+ MemOpIdx oi, uintptr_t ra)
1157
+{
1158
+ void *haddr;
1159
uint64_t ret;
1160
1161
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1162
- ret = ldq_be_p(g2h(env_cpu(env), ptr));
1163
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1164
+ validate_memop(oi, MO_BEQ);
1165
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1166
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1167
+ ret = ldq_be_p(haddr);
1168
+ clear_helper_retaddr();
1169
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1170
return ret;
1171
}
1172
1173
-uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
1174
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
1175
+ MemOpIdx oi, uintptr_t ra)
1176
{
1177
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1178
+ void *haddr;
1179
+ uint16_t ret;
1180
+
1181
+ validate_memop(oi, MO_LEUW);
1182
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1183
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1184
+ ret = lduw_le_p(haddr);
1185
+ clear_helper_retaddr();
1186
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1187
+ return ret;
1188
+}
1189
+
1190
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
1191
+ MemOpIdx oi, uintptr_t ra)
1192
+{
1193
+ void *haddr;
1194
uint32_t ret;
1195
1196
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1197
- ret = lduw_le_p(g2h(env_cpu(env), ptr));
1198
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1199
+ validate_memop(oi, MO_LEUL);
1200
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1201
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1202
+ ret = ldl_le_p(haddr);
1203
+ clear_helper_retaddr();
1204
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1205
return ret;
1206
}
1207
1208
-int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
1209
+uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
1210
+ MemOpIdx oi, uintptr_t ra)
1211
{
1212
- return (int16_t)cpu_lduw_le_data(env, ptr);
1213
-}
1214
-
1215
-uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
1216
-{
1217
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1218
- uint32_t ret;
1219
-
1220
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1221
- ret = ldl_le_p(g2h(env_cpu(env), ptr));
1222
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1223
- return ret;
1224
-}
1225
-
1226
-uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
1227
-{
1228
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1229
+ void *haddr;
1230
uint64_t ret;
1231
1232
- trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
1233
- ret = ldq_le_p(g2h(env_cpu(env), ptr));
1234
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
1235
+ validate_memop(oi, MO_LEQ);
1236
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
1237
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1238
+ ret = ldq_le_p(haddr);
1239
+ clear_helper_retaddr();
1240
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1241
return ret;
1242
}
1243
1244
-uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1245
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1246
+ MemOpIdx oi, uintptr_t ra)
1247
{
1248
- uint32_t ret;
1249
+ void *haddr;
1250
1251
- set_helper_retaddr(retaddr);
1252
- ret = cpu_ldub_data(env, ptr);
1253
+ validate_memop(oi, MO_UB);
1254
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1255
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1256
+ stb_p(haddr, val);
1257
clear_helper_retaddr();
1258
- return ret;
1259
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1260
}
1261
1262
-int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1263
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1264
+ MemOpIdx oi, uintptr_t ra)
1265
{
1266
- return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr);
1267
-}
1268
+ void *haddr;
1269
1270
-uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1271
-{
1272
- uint32_t ret;
1273
-
1274
- set_helper_retaddr(retaddr);
1275
- ret = cpu_lduw_be_data(env, ptr);
1276
+ validate_memop(oi, MO_BEUW);
1277
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1278
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1279
+ stw_be_p(haddr, val);
1280
clear_helper_retaddr();
1281
- return ret;
1282
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1283
}
1284
1285
-int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1286
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1287
+ MemOpIdx oi, uintptr_t ra)
1288
{
1289
- return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr);
1290
-}
1291
+ void *haddr;
1292
1293
-uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1294
-{
1295
- uint32_t ret;
1296
-
1297
- set_helper_retaddr(retaddr);
1298
- ret = cpu_ldl_be_data(env, ptr);
1299
+ validate_memop(oi, MO_BEUL);
1300
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1301
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1302
+ stl_be_p(haddr, val);
1303
clear_helper_retaddr();
1304
- return ret;
1305
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1306
}
1307
1308
-uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1309
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1310
+ MemOpIdx oi, uintptr_t ra)
1311
{
1312
- uint64_t ret;
1313
+ void *haddr;
1314
1315
- set_helper_retaddr(retaddr);
1316
- ret = cpu_ldq_be_data(env, ptr);
1317
+ validate_memop(oi, MO_BEQ);
1318
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1319
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1320
+ stq_be_p(haddr, val);
1321
clear_helper_retaddr();
1322
- return ret;
1323
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1324
}
1325
1326
-uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1327
+void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1328
+ MemOpIdx oi, uintptr_t ra)
1329
{
1330
- uint32_t ret;
1331
+ void *haddr;
1332
1333
- set_helper_retaddr(retaddr);
1334
- ret = cpu_lduw_le_data(env, ptr);
1335
+ validate_memop(oi, MO_LEUW);
1336
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1337
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1338
+ stw_le_p(haddr, val);
1339
clear_helper_retaddr();
1340
- return ret;
1341
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1342
}
1343
1344
-int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1345
+void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1346
+ MemOpIdx oi, uintptr_t ra)
1347
{
1348
- return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr);
1349
-}
1350
+ void *haddr;
1351
1352
-uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1353
-{
1354
- uint32_t ret;
1355
-
1356
- set_helper_retaddr(retaddr);
1357
- ret = cpu_ldl_le_data(env, ptr);
1358
+ validate_memop(oi, MO_LEUL);
1359
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1360
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1361
+ stl_le_p(haddr, val);
1362
clear_helper_retaddr();
1363
- return ret;
1364
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1365
}
1366
1367
-uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
1368
+void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1369
+ MemOpIdx oi, uintptr_t ra)
1370
{
1371
- uint64_t ret;
1372
+ void *haddr;
1373
1374
- set_helper_retaddr(retaddr);
1375
- ret = cpu_ldq_le_data(env, ptr);
1376
- clear_helper_retaddr();
1377
- return ret;
1378
-}
1379
-
1380
-void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1381
-{
1382
- MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
1383
-
1384
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1385
- stb_p(g2h(env_cpu(env), ptr), val);
1386
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1387
-}
1388
-
1389
-void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1390
-{
1391
- MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
1392
-
1393
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1394
- stw_be_p(g2h(env_cpu(env), ptr), val);
1395
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1396
-}
1397
-
1398
-void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1399
-{
1400
- MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
1401
-
1402
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1403
- stl_be_p(g2h(env_cpu(env), ptr), val);
1404
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1405
-}
1406
-
1407
-void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1408
-{
1409
- MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
1410
-
1411
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1412
- stq_be_p(g2h(env_cpu(env), ptr), val);
1413
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1414
-}
1415
-
1416
-void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1417
-{
1418
- MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
1419
-
1420
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1421
- stw_le_p(g2h(env_cpu(env), ptr), val);
1422
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1423
-}
1424
-
1425
-void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
1426
-{
1427
- MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
1428
-
1429
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1430
- stl_le_p(g2h(env_cpu(env), ptr), val);
1431
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1432
-}
1433
-
1434
-void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
1435
-{
1436
- MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
1437
-
1438
- trace_guest_st_before_exec(env_cpu(env), ptr, oi);
1439
- stq_le_p(g2h(env_cpu(env), ptr), val);
1440
- qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
1441
-}
1442
-
1443
-void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
1444
- uint32_t val, uintptr_t retaddr)
1445
-{
1446
- set_helper_retaddr(retaddr);
1447
- cpu_stb_data(env, ptr, val);
1448
- clear_helper_retaddr();
1449
-}
1450
-
1451
-void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
1452
- uint32_t val, uintptr_t retaddr)
1453
-{
1454
- set_helper_retaddr(retaddr);
1455
- cpu_stw_be_data(env, ptr, val);
1456
- clear_helper_retaddr();
1457
-}
1458
-
1459
-void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
1460
- uint32_t val, uintptr_t retaddr)
1461
-{
1462
- set_helper_retaddr(retaddr);
1463
- cpu_stl_be_data(env, ptr, val);
1464
- clear_helper_retaddr();
1465
-}
1466
-
1467
-void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
1468
- uint64_t val, uintptr_t retaddr)
1469
-{
1470
- set_helper_retaddr(retaddr);
1471
- cpu_stq_be_data(env, ptr, val);
1472
- clear_helper_retaddr();
1473
-}
1474
-
1475
-void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
1476
- uint32_t val, uintptr_t retaddr)
1477
-{
1478
- set_helper_retaddr(retaddr);
1479
- cpu_stw_le_data(env, ptr, val);
1480
- clear_helper_retaddr();
1481
-}
1482
-
1483
-void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
1484
- uint32_t val, uintptr_t retaddr)
1485
-{
1486
- set_helper_retaddr(retaddr);
1487
- cpu_stl_le_data(env, ptr, val);
1488
- clear_helper_retaddr();
1489
-}
1490
-
1491
-void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
1492
- uint64_t val, uintptr_t retaddr)
1493
-{
1494
- set_helper_retaddr(retaddr);
1495
- cpu_stq_le_data(env, ptr, val);
1496
+ validate_memop(oi, MO_LEQ);
1497
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
1498
+ haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1499
+ stq_le_p(haddr, val);
1500
clear_helper_retaddr();
1501
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1502
}
1503
1504
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1505
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1506
return ret;
1507
}
1508
1509
+#include "ldst_common.c.inc"
1510
+
1511
/*
1512
* Do not allow unaligned operations to proceed. Return the host address.
1513
*
1514
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
1515
new file mode 100644
1516
index XXXXXXX..XXXXXXX
1517
--- /dev/null
1518
+++ b/accel/tcg/ldst_common.c.inc
1519
@@ -XXX,XX +XXX,XX @@
1520
+/*
1521
+ * Routines common to user and system emulation of load/store.
1522
+ *
1523
+ * Copyright (c) 2003 Fabrice Bellard
1524
+ *
1525
+ * SPDX-License-Identifier: GPL-2.0-or-later
1526
+ *
1527
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
1528
+ * See the COPYING file in the top-level directory.
1529
+ */
1530
+
1531
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1532
+ int mmu_idx, uintptr_t ra)
1533
+{
1534
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1535
+ return cpu_ldb_mmu(env, addr, oi, ra);
1536
+}
1537
+
1538
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1539
+ int mmu_idx, uintptr_t ra)
1540
+{
1541
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
1542
+}
1543
+
1544
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1545
+ int mmu_idx, uintptr_t ra)
1546
+{
1547
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1548
+ return cpu_ldw_be_mmu(env, addr, oi, ra);
1549
+}
1550
+
1551
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1552
+ int mmu_idx, uintptr_t ra)
1553
+{
1554
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
1555
+}
1556
+
1557
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1558
+ int mmu_idx, uintptr_t ra)
1559
+{
1560
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1561
+ return cpu_ldl_be_mmu(env, addr, oi, ra);
1562
+}
1563
+
1564
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1565
+ int mmu_idx, uintptr_t ra)
1566
+{
1567
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1568
+ return cpu_ldq_be_mmu(env, addr, oi, ra);
1569
+}
1570
+
1571
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1572
+ int mmu_idx, uintptr_t ra)
1573
+{
1574
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1575
+ return cpu_ldw_le_mmu(env, addr, oi, ra);
1576
+}
1577
+
1578
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1579
+ int mmu_idx, uintptr_t ra)
1580
+{
1581
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
1582
+}
1583
+
1584
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1585
+ int mmu_idx, uintptr_t ra)
1586
+{
1587
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1588
+ return cpu_ldl_le_mmu(env, addr, oi, ra);
1589
+}
1590
+
1591
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1592
+ int mmu_idx, uintptr_t ra)
1593
+{
1594
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1595
+ return cpu_ldq_le_mmu(env, addr, oi, ra);
1596
+}
1597
+
1598
+void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1599
+ int mmu_idx, uintptr_t ra)
1600
+{
1601
+ MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
1602
+ cpu_stb_mmu(env, addr, val, oi, ra);
1603
+}
1604
+
1605
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1606
+ int mmu_idx, uintptr_t ra)
1607
+{
1608
+ MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
1609
+ cpu_stw_be_mmu(env, addr, val, oi, ra);
1610
+}
1611
+
1612
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1613
+ int mmu_idx, uintptr_t ra)
1614
+{
1615
+ MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
1616
+ cpu_stl_be_mmu(env, addr, val, oi, ra);
1617
+}
1618
+
1619
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1620
+ int mmu_idx, uintptr_t ra)
1621
+{
1622
+ MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
1623
+ cpu_stq_be_mmu(env, addr, val, oi, ra);
1624
+}
1625
+
1626
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1627
+ int mmu_idx, uintptr_t ra)
1628
+{
1629
+ MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
1630
+ cpu_stw_le_mmu(env, addr, val, oi, ra);
1631
+}
1632
+
1633
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
1634
+ int mmu_idx, uintptr_t ra)
1635
+{
1636
+ MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
1637
+ cpu_stl_le_mmu(env, addr, val, oi, ra);
1638
+}
1639
+
1640
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
1641
+ int mmu_idx, uintptr_t ra)
1642
+{
1643
+ MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
1644
+ cpu_stq_le_mmu(env, addr, val, oi, ra);
1645
+}
1646
+
1647
+/*--------------------------*/
1648
+
1649
+uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1650
+{
1651
+ return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1652
+}
1653
+
1654
+int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1655
+{
1656
+ return (int8_t)cpu_ldub_data_ra(env, addr, ra);
1657
+}
1658
+
1659
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1660
+{
1661
+ return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1662
+}
1663
+
1664
+int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1665
+{
1666
+ return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
1667
+}
1668
+
1669
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1670
+{
1671
+ return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1672
+}
1673
+
1674
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1675
+{
1676
+ return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1677
+}
1678
+
1679
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1680
+{
1681
+ return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1682
+}
1683
+
1684
+int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1685
+{
1686
+ return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
1687
+}
1688
+
1689
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1690
+{
1691
+ return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1692
+}
1693
+
1694
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
1695
+{
1696
+ return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
1697
+}
1698
+
1699
+void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
1700
+ uint32_t val, uintptr_t ra)
1701
+{
1702
+ cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1703
+}
1704
+
1705
+void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
1706
+ uint32_t val, uintptr_t ra)
1707
+{
1708
+ cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1709
+}
1710
+
1711
+void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
1712
+ uint32_t val, uintptr_t ra)
1713
+{
1714
+ cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1715
+}
1716
+
1717
+void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
1718
+ uint64_t val, uintptr_t ra)
1719
+{
1720
+ cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1721
+}
1722
+
1723
+void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
1724
+ uint32_t val, uintptr_t ra)
1725
+{
1726
+ cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1727
+}
1728
+
1729
+void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
1730
+ uint32_t val, uintptr_t ra)
1731
+{
1732
+ cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1733
+}
1734
+
1735
+void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
1736
+ uint64_t val, uintptr_t ra)
1737
+{
1738
+ cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
1739
+}
1740
+
1741
+/*--------------------------*/
1742
+
1743
+uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
1744
+{
1745
+ return cpu_ldub_data_ra(env, addr, 0);
1746
+}
1747
+
1748
+int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
1749
+{
1750
+ return (int8_t)cpu_ldub_data(env, addr);
1751
+}
1752
+
1753
+uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
1754
+{
1755
+ return cpu_lduw_be_data_ra(env, addr, 0);
1756
+}
1757
+
1758
+int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
1759
+{
1760
+ return (int16_t)cpu_lduw_be_data(env, addr);
1761
+}
1762
+
1763
+uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
1764
+{
1765
+ return cpu_ldl_be_data_ra(env, addr, 0);
1766
+}
1767
+
1768
+uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
1769
+{
1770
+ return cpu_ldq_be_data_ra(env, addr, 0);
1771
+}
1772
+
1773
+uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
1774
+{
1775
+ return cpu_lduw_le_data_ra(env, addr, 0);
1776
+}
1777
+
1778
+int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
1779
+{
1780
+ return (int16_t)cpu_lduw_le_data(env, addr);
1781
+}
1782
+
1783
+uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
1784
+{
1785
+ return cpu_ldl_le_data_ra(env, addr, 0);
1786
+}
1787
+
1788
+uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
1789
+{
1790
+ return cpu_ldq_le_data_ra(env, addr, 0);
1791
+}
1792
+
1793
+void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1794
+{
1795
+ cpu_stb_data_ra(env, addr, val, 0);
1796
+}
1797
+
1798
+void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1799
+{
1800
+ cpu_stw_be_data_ra(env, addr, val, 0);
1801
+}
1802
+
1803
+void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1804
+{
1805
+ cpu_stl_be_data_ra(env, addr, val, 0);
1806
+}
1807
+
1808
+void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1809
+{
1810
+ cpu_stq_be_data_ra(env, addr, val, 0);
1811
+}
1812
+
1813
+void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1814
+{
1815
+ cpu_stw_le_data_ra(env, addr, val, 0);
1816
+}
1817
+
1818
+void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
1819
+{
1820
+ cpu_stl_le_data_ra(env, addr, val, 0);
1821
+}
1822
+
1823
+void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
1824
+{
1825
+ cpu_stq_le_data_ra(env, addr, val, 0);
1826
+}
1827
--
1828
2.25.1
1829
1830
diff view generated by jsdifflib
Deleted patch
1
The previous placement in tcg/tcg.h was not logical.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/exec/cpu_ldst.h | 87 +++++++++++++++++++++++++++++++++++
7
include/tcg/tcg.h | 87 -----------------------------------
8
target/arm/helper-a64.c | 1 -
9
target/m68k/op_helper.c | 1 -
10
target/ppc/mem_helper.c | 1 -
11
target/s390x/tcg/mem_helper.c | 1 -
12
6 files changed, 87 insertions(+), 91 deletions(-)
13
14
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu_ldst.h
17
+++ b/include/exec/cpu_ldst.h
18
@@ -XXX,XX +XXX,XX @@
19
#define CPU_LDST_H
20
21
#include "exec/memopidx.h"
22
+#include "qemu/int128.h"
23
24
#if defined(CONFIG_USER_ONLY)
25
/* sparc32plus has 64bit long but 32bit space address
26
@@ -XXX,XX +XXX,XX @@ void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
27
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
28
MemOpIdx oi, uintptr_t ra);
29
30
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
31
+ uint32_t cmpv, uint32_t newv,
32
+ MemOpIdx oi, uintptr_t retaddr);
33
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
34
+ uint32_t cmpv, uint32_t newv,
35
+ MemOpIdx oi, uintptr_t retaddr);
36
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
37
+ uint32_t cmpv, uint32_t newv,
38
+ MemOpIdx oi, uintptr_t retaddr);
39
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
40
+ uint64_t cmpv, uint64_t newv,
41
+ MemOpIdx oi, uintptr_t retaddr);
42
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
43
+ uint32_t cmpv, uint32_t newv,
44
+ MemOpIdx oi, uintptr_t retaddr);
45
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
46
+ uint32_t cmpv, uint32_t newv,
47
+ MemOpIdx oi, uintptr_t retaddr);
48
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
49
+ uint64_t cmpv, uint64_t newv,
50
+ MemOpIdx oi, uintptr_t retaddr);
51
+
52
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
53
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
54
+ (CPUArchState *env, target_ulong addr, TYPE val, \
55
+ MemOpIdx oi, uintptr_t retaddr);
56
+
57
+#ifdef CONFIG_ATOMIC64
58
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
59
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
60
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
61
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
62
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
63
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
64
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
65
+ GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
66
+#else
67
+#define GEN_ATOMIC_HELPER_ALL(NAME) \
68
+ GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
69
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
70
+ GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
71
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
72
+ GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
73
+#endif
74
+
75
+GEN_ATOMIC_HELPER_ALL(fetch_add)
76
+GEN_ATOMIC_HELPER_ALL(fetch_sub)
77
+GEN_ATOMIC_HELPER_ALL(fetch_and)
78
+GEN_ATOMIC_HELPER_ALL(fetch_or)
79
+GEN_ATOMIC_HELPER_ALL(fetch_xor)
80
+GEN_ATOMIC_HELPER_ALL(fetch_smin)
81
+GEN_ATOMIC_HELPER_ALL(fetch_umin)
82
+GEN_ATOMIC_HELPER_ALL(fetch_smax)
83
+GEN_ATOMIC_HELPER_ALL(fetch_umax)
84
+
85
+GEN_ATOMIC_HELPER_ALL(add_fetch)
86
+GEN_ATOMIC_HELPER_ALL(sub_fetch)
87
+GEN_ATOMIC_HELPER_ALL(and_fetch)
88
+GEN_ATOMIC_HELPER_ALL(or_fetch)
89
+GEN_ATOMIC_HELPER_ALL(xor_fetch)
90
+GEN_ATOMIC_HELPER_ALL(smin_fetch)
91
+GEN_ATOMIC_HELPER_ALL(umin_fetch)
92
+GEN_ATOMIC_HELPER_ALL(smax_fetch)
93
+GEN_ATOMIC_HELPER_ALL(umax_fetch)
94
+
95
+GEN_ATOMIC_HELPER_ALL(xchg)
96
+
97
+#undef GEN_ATOMIC_HELPER_ALL
98
+#undef GEN_ATOMIC_HELPER
99
+
100
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
101
+ Int128 cmpv, Int128 newv,
102
+ MemOpIdx oi, uintptr_t retaddr);
103
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
104
+ Int128 cmpv, Int128 newv,
105
+ MemOpIdx oi, uintptr_t retaddr);
106
+
107
+Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
108
+ MemOpIdx oi, uintptr_t retaddr);
109
+Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
110
+ MemOpIdx oi, uintptr_t retaddr);
111
+void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
112
+ MemOpIdx oi, uintptr_t retaddr);
113
+void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
114
+ MemOpIdx oi, uintptr_t retaddr);
115
+
116
#if defined(CONFIG_USER_ONLY)
117
118
extern __thread uintptr_t helper_retaddr;
119
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
120
index XXXXXXX..XXXXXXX 100644
121
--- a/include/tcg/tcg.h
122
+++ b/include/tcg/tcg.h
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/queue.h"
125
#include "tcg/tcg-mo.h"
126
#include "tcg-target.h"
127
-#include "qemu/int128.h"
128
#include "tcg/tcg-cond.h"
129
130
/* XXX: make safe guess about sizes */
131
@@ -XXX,XX +XXX,XX @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
132
#endif
133
#endif /* CONFIG_SOFTMMU */
134
135
-uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
136
- uint32_t cmpv, uint32_t newv,
137
- MemOpIdx oi, uintptr_t retaddr);
138
-uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
139
- uint32_t cmpv, uint32_t newv,
140
- MemOpIdx oi, uintptr_t retaddr);
141
-uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
142
- uint32_t cmpv, uint32_t newv,
143
- MemOpIdx oi, uintptr_t retaddr);
144
-uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
145
- uint64_t cmpv, uint64_t newv,
146
- MemOpIdx oi, uintptr_t retaddr);
147
-uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
148
- uint32_t cmpv, uint32_t newv,
149
- MemOpIdx oi, uintptr_t retaddr);
150
-uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
151
- uint32_t cmpv, uint32_t newv,
152
- MemOpIdx oi, uintptr_t retaddr);
153
-uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
154
- uint64_t cmpv, uint64_t newv,
155
- MemOpIdx oi, uintptr_t retaddr);
156
-
157
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
158
-TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
159
- (CPUArchState *env, target_ulong addr, TYPE val, \
160
- MemOpIdx oi, uintptr_t retaddr);
161
-
162
-#ifdef CONFIG_ATOMIC64
163
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
164
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
165
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
166
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
167
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
168
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
169
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
170
- GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
171
-#else
172
-#define GEN_ATOMIC_HELPER_ALL(NAME) \
173
- GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
174
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
175
- GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
176
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
177
- GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
178
-#endif
179
-
180
-GEN_ATOMIC_HELPER_ALL(fetch_add)
181
-GEN_ATOMIC_HELPER_ALL(fetch_sub)
182
-GEN_ATOMIC_HELPER_ALL(fetch_and)
183
-GEN_ATOMIC_HELPER_ALL(fetch_or)
184
-GEN_ATOMIC_HELPER_ALL(fetch_xor)
185
-GEN_ATOMIC_HELPER_ALL(fetch_smin)
186
-GEN_ATOMIC_HELPER_ALL(fetch_umin)
187
-GEN_ATOMIC_HELPER_ALL(fetch_smax)
188
-GEN_ATOMIC_HELPER_ALL(fetch_umax)
189
-
190
-GEN_ATOMIC_HELPER_ALL(add_fetch)
191
-GEN_ATOMIC_HELPER_ALL(sub_fetch)
192
-GEN_ATOMIC_HELPER_ALL(and_fetch)
193
-GEN_ATOMIC_HELPER_ALL(or_fetch)
194
-GEN_ATOMIC_HELPER_ALL(xor_fetch)
195
-GEN_ATOMIC_HELPER_ALL(smin_fetch)
196
-GEN_ATOMIC_HELPER_ALL(umin_fetch)
197
-GEN_ATOMIC_HELPER_ALL(smax_fetch)
198
-GEN_ATOMIC_HELPER_ALL(umax_fetch)
199
-
200
-GEN_ATOMIC_HELPER_ALL(xchg)
201
-
202
-#undef GEN_ATOMIC_HELPER_ALL
203
-#undef GEN_ATOMIC_HELPER
204
-
205
-Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
206
- Int128 cmpv, Int128 newv,
207
- MemOpIdx oi, uintptr_t retaddr);
208
-Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
209
- Int128 cmpv, Int128 newv,
210
- MemOpIdx oi, uintptr_t retaddr);
211
-
212
-Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
213
- MemOpIdx oi, uintptr_t retaddr);
214
-Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
215
- MemOpIdx oi, uintptr_t retaddr);
216
-void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
217
- MemOpIdx oi, uintptr_t retaddr);
218
-void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
219
- MemOpIdx oi, uintptr_t retaddr);
220
-
221
#ifdef CONFIG_DEBUG_TCG
222
void tcg_assert_listed_vecop(TCGOpcode);
223
#else
224
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/target/arm/helper-a64.c
227
+++ b/target/arm/helper-a64.c
228
@@ -XXX,XX +XXX,XX @@
229
#include "exec/cpu_ldst.h"
230
#include "qemu/int128.h"
231
#include "qemu/atomic128.h"
232
-#include "tcg/tcg.h"
233
#include "fpu/softfloat.h"
234
#include <zlib.h> /* For crc32 */
235
236
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/target/m68k/op_helper.c
239
+++ b/target/m68k/op_helper.c
240
@@ -XXX,XX +XXX,XX @@
241
#include "exec/exec-all.h"
242
#include "exec/cpu_ldst.h"
243
#include "semihosting/semihost.h"
244
-#include "tcg/tcg.h"
245
246
#if !defined(CONFIG_USER_ONLY)
247
248
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
249
index XXXXXXX..XXXXXXX 100644
250
--- a/target/ppc/mem_helper.c
251
+++ b/target/ppc/mem_helper.c
252
@@ -XXX,XX +XXX,XX @@
253
#include "exec/helper-proto.h"
254
#include "helper_regs.h"
255
#include "exec/cpu_ldst.h"
256
-#include "tcg/tcg.h"
257
#include "internal.h"
258
#include "qemu/atomic128.h"
259
260
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
261
index XXXXXXX..XXXXXXX 100644
262
--- a/target/s390x/tcg/mem_helper.c
263
+++ b/target/s390x/tcg/mem_helper.c
264
@@ -XXX,XX +XXX,XX @@
265
#include "exec/cpu_ldst.h"
266
#include "qemu/int128.h"
267
#include "qemu/atomic128.h"
268
-#include "tcg/tcg.h"
269
#include "trace.h"
270
271
#if !defined(CONFIG_USER_ONLY)
272
--
273
2.25.1
274
275
diff view generated by jsdifflib
Deleted patch
1
We should not have been using the helper_ret_* set of
2
functions, as they are supposed to be private to tcg.
3
Nor should we have been using the plain cpu_*_data set
4
of functions, as they do not handle unwinding properly.
5
1
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/mips/tcg/msa_helper.c | 420 +++++++++++------------------------
10
1 file changed, 135 insertions(+), 285 deletions(-)
11
12
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/mips/tcg/msa_helper.c
15
+++ b/target/mips/tcg/msa_helper.c
16
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
17
target_ulong addr)
18
{
19
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
20
- MEMOP_IDX(DF_BYTE)
21
-#if !defined(CONFIG_USER_ONLY)
22
+ uintptr_t ra = GETPC();
23
+
24
#if !defined(HOST_WORDS_BIGENDIAN)
25
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
26
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
27
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
28
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
29
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
30
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
31
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
32
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
33
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
34
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
35
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
36
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
37
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
38
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
39
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
40
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
41
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
42
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
43
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
44
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
45
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
46
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
47
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
48
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
49
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
50
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
51
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
52
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
53
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
54
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
55
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
56
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
57
#else
58
- pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC());
59
- pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC());
60
- pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC());
61
- pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC());
62
- pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC());
63
- pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC());
64
- pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC());
65
- pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC());
66
- pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
67
- pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
68
- pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
69
- pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
70
- pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
71
- pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
72
- pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC());
73
- pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC());
74
-#endif
75
-#else
76
-#if !defined(HOST_WORDS_BIGENDIAN)
77
- pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
78
- pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
79
- pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
80
- pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
81
- pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
82
- pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
83
- pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
84
- pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
85
- pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
86
- pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
87
- pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
88
- pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
89
- pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
90
- pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
91
- pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
92
- pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
93
-#else
94
- pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE));
95
- pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE));
96
- pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE));
97
- pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE));
98
- pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE));
99
- pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE));
100
- pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE));
101
- pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE));
102
- pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
103
- pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
104
- pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
105
- pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
106
- pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
107
- pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
108
- pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
109
- pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
110
-#endif
111
+ pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
112
+ pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
113
+ pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
114
+ pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
115
+ pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
116
+ pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
117
+ pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
118
+ pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
119
+ pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
120
+ pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
121
+ pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
122
+ pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
123
+ pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
124
+ pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
125
+ pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
126
+ pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
127
#endif
128
}
129
130
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
131
target_ulong addr)
132
{
133
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
134
- MEMOP_IDX(DF_HALF)
135
-#if !defined(CONFIG_USER_ONLY)
136
+ uintptr_t ra = GETPC();
137
+
138
#if !defined(HOST_WORDS_BIGENDIAN)
139
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
140
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
141
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
142
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
143
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
144
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
145
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
146
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
147
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
148
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
149
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
150
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
151
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
152
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
153
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
154
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
155
#else
156
- pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
157
- pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
158
- pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
159
- pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
160
- pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
161
- pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
162
- pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
163
- pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
164
-#endif
165
-#else
166
-#if !defined(HOST_WORDS_BIGENDIAN)
167
- pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
168
- pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
169
- pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
170
- pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
171
- pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
172
- pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
173
- pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
174
- pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
175
-#else
176
- pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
177
- pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
178
- pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
179
- pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
180
- pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
181
- pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
182
- pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
183
- pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
184
-#endif
185
+ pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
186
+ pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
187
+ pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
188
+ pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
189
+ pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
190
+ pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
191
+ pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
192
+ pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
193
#endif
194
}
195
196
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
197
target_ulong addr)
198
{
199
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
200
- MEMOP_IDX(DF_WORD)
201
-#if !defined(CONFIG_USER_ONLY)
202
+ uintptr_t ra = GETPC();
203
+
204
#if !defined(HOST_WORDS_BIGENDIAN)
205
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
206
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
207
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
208
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
209
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
210
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
211
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
212
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
213
#else
214
- pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
215
- pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
216
- pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
217
- pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
218
-#endif
219
-#else
220
-#if !defined(HOST_WORDS_BIGENDIAN)
221
- pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
222
- pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
223
- pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
224
- pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
225
-#else
226
- pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
227
- pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
228
- pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
229
- pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
230
-#endif
231
+ pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
232
+ pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
233
+ pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
234
+ pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
235
#endif
236
}
237
238
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
239
target_ulong addr)
240
{
241
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
242
- MEMOP_IDX(DF_DOUBLE)
243
-#if !defined(CONFIG_USER_ONLY)
244
- pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
245
- pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
246
-#else
247
- pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
248
- pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
249
-#endif
250
+ uintptr_t ra = GETPC();
251
+
252
+ pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
253
+ pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
254
}
255
256
#define MSA_PAGESPAN(x) \
257
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
258
{
259
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
260
int mmu_idx = cpu_mmu_index(env, false);
261
+ uintptr_t ra = GETPC();
262
+
263
+ ensure_writable_pages(env, addr, mmu_idx, ra);
264
265
- MEMOP_IDX(DF_BYTE)
266
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
267
-#if !defined(CONFIG_USER_ONLY)
268
#if !defined(HOST_WORDS_BIGENDIAN)
269
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC());
270
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC());
271
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC());
272
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC());
273
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC());
274
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC());
275
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC());
276
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC());
277
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC());
278
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC());
279
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
280
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
281
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
282
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
283
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
284
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
285
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
286
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
287
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
288
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
289
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
290
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
291
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
292
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
293
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
294
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
295
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
296
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
297
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
298
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
299
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
300
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
301
#else
302
- helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC());
303
- helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC());
304
- helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC());
305
- helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC());
306
- helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC());
307
- helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC());
308
- helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC());
309
- helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC());
310
- helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC());
311
- helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC());
312
- helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
313
- helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
314
- helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
315
- helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
316
- helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC());
317
- helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC());
318
-#endif
319
-#else
320
-#if !defined(HOST_WORDS_BIGENDIAN)
321
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]);
322
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]);
323
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]);
324
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]);
325
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]);
326
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]);
327
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]);
328
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]);
329
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]);
330
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]);
331
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
332
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
333
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
334
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
335
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
336
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
337
-#else
338
- cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]);
339
- cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]);
340
- cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]);
341
- cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]);
342
- cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]);
343
- cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]);
344
- cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]);
345
- cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]);
346
- cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
347
- cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
348
- cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
349
- cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
350
- cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
351
- cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
352
- cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]);
353
- cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]);
354
-#endif
355
+ cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
356
+ cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
357
+ cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
358
+ cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
359
+ cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
360
+ cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
361
+ cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
362
+ cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
363
+ cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
364
+ cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
365
+ cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
366
+ cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
367
+ cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
368
+ cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
369
+ cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
370
+ cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
371
#endif
372
}
373
374
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
375
{
376
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
377
int mmu_idx = cpu_mmu_index(env, false);
378
+ uintptr_t ra = GETPC();
379
+
380
+ ensure_writable_pages(env, addr, mmu_idx, ra);
381
382
- MEMOP_IDX(DF_HALF)
383
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
384
-#if !defined(CONFIG_USER_ONLY)
385
#if !defined(HOST_WORDS_BIGENDIAN)
386
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
387
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
388
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
389
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
390
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
391
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
392
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
393
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
394
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
395
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
396
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
397
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
398
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
399
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
400
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
401
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
402
#else
403
- helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
404
- helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
405
- helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
406
- helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
407
- helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
408
- helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
409
- helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
410
- helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
411
-#endif
412
-#else
413
-#if !defined(HOST_WORDS_BIGENDIAN)
414
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
415
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
416
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
417
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
418
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
419
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
420
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
421
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
422
-#else
423
- cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
424
- cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
425
- cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
426
- cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
427
- cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
428
- cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
429
- cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
430
- cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
431
-#endif
432
+ cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
433
+ cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
434
+ cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
435
+ cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
436
+ cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
437
+ cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
438
+ cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
439
+ cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
440
#endif
441
}
442
443
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
444
{
445
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
446
int mmu_idx = cpu_mmu_index(env, false);
447
+ uintptr_t ra = GETPC();
448
+
449
+ ensure_writable_pages(env, addr, mmu_idx, ra);
450
451
- MEMOP_IDX(DF_WORD)
452
- ensure_writable_pages(env, addr, mmu_idx, GETPC());
453
-#if !defined(CONFIG_USER_ONLY)
454
#if !defined(HOST_WORDS_BIGENDIAN)
455
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
456
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
457
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
458
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
459
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
460
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
461
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
462
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
463
#else
464
- helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
465
- helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
466
- helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
467
- helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
468
-#endif
469
-#else
470
-#if !defined(HOST_WORDS_BIGENDIAN)
471
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
472
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
473
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
474
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
475
-#else
476
- cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
477
- cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
478
- cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
479
- cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
480
-#endif
481
+ cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
482
+ cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
483
+ cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
484
+ cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
485
#endif
486
}
487
488
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
489
{
490
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
491
int mmu_idx = cpu_mmu_index(env, false);
492
+ uintptr_t ra = GETPC();
493
494
- MEMOP_IDX(DF_DOUBLE)
495
ensure_writable_pages(env, addr, mmu_idx, GETPC());
496
-#if !defined(CONFIG_USER_ONLY)
497
- helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
498
- helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
499
-#else
500
- cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
501
- cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
502
-#endif
503
+
504
+ cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
505
+ cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
506
}
507
--
508
2.25.1
509
510
diff view generated by jsdifflib
Deleted patch
1
Rather than use 4-16 separate operations, use 2 operations
2
plus some byte reordering as necessary.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/mips/tcg/msa_helper.c | 201 +++++++++++++----------------------
8
1 file changed, 71 insertions(+), 130 deletions(-)
9
10
diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/mips/tcg/msa_helper.c
13
+++ b/target/mips/tcg/msa_helper.c
14
@@ -XXX,XX +XXX,XX @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
15
#define MEMOP_IDX(DF)
16
#endif
17
18
+#ifdef TARGET_WORDS_BIGENDIAN
19
+static inline uint64_t bswap16x4(uint64_t x)
20
+{
21
+ uint64_t m = 0x00ff00ff00ff00ffull;
22
+ return ((x & m) << 8) | ((x >> 8) & m);
23
+}
24
+
25
+static inline uint64_t bswap32x2(uint64_t x)
26
+{
27
+ return ror64(bswap64(x), 32);
28
+}
29
+#endif
30
+
31
void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
32
target_ulong addr)
33
{
34
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
35
uintptr_t ra = GETPC();
36
+ uint64_t d0, d1;
37
38
-#if !defined(HOST_WORDS_BIGENDIAN)
39
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
40
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
41
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
42
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
43
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
44
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
45
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
46
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
47
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
48
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
49
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
50
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
51
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
52
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
53
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
54
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
55
-#else
56
- pwd->b[0] = cpu_ldub_data_ra(env, addr + (7 << DF_BYTE), ra);
57
- pwd->b[1] = cpu_ldub_data_ra(env, addr + (6 << DF_BYTE), ra);
58
- pwd->b[2] = cpu_ldub_data_ra(env, addr + (5 << DF_BYTE), ra);
59
- pwd->b[3] = cpu_ldub_data_ra(env, addr + (4 << DF_BYTE), ra);
60
- pwd->b[4] = cpu_ldub_data_ra(env, addr + (3 << DF_BYTE), ra);
61
- pwd->b[5] = cpu_ldub_data_ra(env, addr + (2 << DF_BYTE), ra);
62
- pwd->b[6] = cpu_ldub_data_ra(env, addr + (1 << DF_BYTE), ra);
63
- pwd->b[7] = cpu_ldub_data_ra(env, addr + (0 << DF_BYTE), ra);
64
- pwd->b[8] = cpu_ldub_data_ra(env, addr + (15 << DF_BYTE), ra);
65
- pwd->b[9] = cpu_ldub_data_ra(env, addr + (14 << DF_BYTE), ra);
66
- pwd->b[10] = cpu_ldub_data_ra(env, addr + (13 << DF_BYTE), ra);
67
- pwd->b[11] = cpu_ldub_data_ra(env, addr + (12 << DF_BYTE), ra);
68
- pwd->b[12] = cpu_ldub_data_ra(env, addr + (11 << DF_BYTE), ra);
69
- pwd->b[13] = cpu_ldub_data_ra(env, addr + (10 << DF_BYTE), ra);
70
- pwd->b[14] = cpu_ldub_data_ra(env, addr + (9 << DF_BYTE), ra);
71
- pwd->b[15] = cpu_ldub_data_ra(env, addr + (8 << DF_BYTE), ra);
72
-#endif
73
+ /* Load 8 bytes at a time. Vector element ordering makes this LE. */
74
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
75
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
76
+ pwd->d[0] = d0;
77
+ pwd->d[1] = d1;
78
}
79
80
void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
81
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
82
{
83
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
84
uintptr_t ra = GETPC();
85
+ uint64_t d0, d1;
86
87
-#if !defined(HOST_WORDS_BIGENDIAN)
88
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
89
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
90
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
91
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
92
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
93
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
94
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
95
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
96
-#else
97
- pwd->h[0] = cpu_lduw_data_ra(env, addr + (3 << DF_HALF), ra);
98
- pwd->h[1] = cpu_lduw_data_ra(env, addr + (2 << DF_HALF), ra);
99
- pwd->h[2] = cpu_lduw_data_ra(env, addr + (1 << DF_HALF), ra);
100
- pwd->h[3] = cpu_lduw_data_ra(env, addr + (0 << DF_HALF), ra);
101
- pwd->h[4] = cpu_lduw_data_ra(env, addr + (7 << DF_HALF), ra);
102
- pwd->h[5] = cpu_lduw_data_ra(env, addr + (6 << DF_HALF), ra);
103
- pwd->h[6] = cpu_lduw_data_ra(env, addr + (5 << DF_HALF), ra);
104
- pwd->h[7] = cpu_lduw_data_ra(env, addr + (4 << DF_HALF), ra);
105
+ /*
106
+ * Load 8 bytes at a time. Use little-endian load, then for
107
+ * big-endian target, we must then swap the four halfwords.
108
+ */
109
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
110
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
111
+#ifdef TARGET_WORDS_BIGENDIAN
112
+ d0 = bswap16x4(d0);
113
+ d1 = bswap16x4(d1);
114
#endif
115
+ pwd->d[0] = d0;
116
+ pwd->d[1] = d1;
117
}
118
119
void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
120
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
121
{
122
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
123
uintptr_t ra = GETPC();
124
+ uint64_t d0, d1;
125
126
-#if !defined(HOST_WORDS_BIGENDIAN)
127
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
128
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
129
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
130
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
131
-#else
132
- pwd->w[0] = cpu_ldl_data_ra(env, addr + (1 << DF_WORD), ra);
133
- pwd->w[1] = cpu_ldl_data_ra(env, addr + (0 << DF_WORD), ra);
134
- pwd->w[2] = cpu_ldl_data_ra(env, addr + (3 << DF_WORD), ra);
135
- pwd->w[3] = cpu_ldl_data_ra(env, addr + (2 << DF_WORD), ra);
136
+ /*
137
+ * Load 8 bytes at a time. Use little-endian load, then for
138
+ * big-endian target, we must then bswap the two words.
139
+ */
140
+ d0 = cpu_ldq_le_data_ra(env, addr + 0, ra);
141
+ d1 = cpu_ldq_le_data_ra(env, addr + 8, ra);
142
+#ifdef TARGET_WORDS_BIGENDIAN
143
+ d0 = bswap32x2(d0);
144
+ d1 = bswap32x2(d1);
145
#endif
146
+ pwd->d[0] = d0;
147
+ pwd->d[1] = d1;
148
}
149
150
void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
151
@@ -XXX,XX +XXX,XX @@ void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
152
{
153
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
154
uintptr_t ra = GETPC();
155
+ uint64_t d0, d1;
156
157
- pwd->d[0] = cpu_ldq_data_ra(env, addr + (0 << DF_DOUBLE), ra);
158
- pwd->d[1] = cpu_ldq_data_ra(env, addr + (1 << DF_DOUBLE), ra);
159
+ d0 = cpu_ldq_data_ra(env, addr + 0, ra);
160
+ d1 = cpu_ldq_data_ra(env, addr + 8, ra);
161
+ pwd->d[0] = d0;
162
+ pwd->d[1] = d1;
163
}
164
165
#define MSA_PAGESPAN(x) \
166
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
167
168
ensure_writable_pages(env, addr, mmu_idx, ra);
169
170
-#if !defined(HOST_WORDS_BIGENDIAN)
171
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[0], ra);
172
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[1], ra);
173
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[2], ra);
174
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[3], ra);
175
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[4], ra);
176
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[5], ra);
177
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[6], ra);
178
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[7], ra);
179
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[8], ra);
180
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[9], ra);
181
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[10], ra);
182
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[11], ra);
183
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[12], ra);
184
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[13], ra);
185
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[14], ra);
186
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[15], ra);
187
-#else
188
- cpu_stb_data_ra(env, addr + (7 << DF_BYTE), pwd->b[0], ra);
189
- cpu_stb_data_ra(env, addr + (6 << DF_BYTE), pwd->b[1], ra);
190
- cpu_stb_data_ra(env, addr + (5 << DF_BYTE), pwd->b[2], ra);
191
- cpu_stb_data_ra(env, addr + (4 << DF_BYTE), pwd->b[3], ra);
192
- cpu_stb_data_ra(env, addr + (3 << DF_BYTE), pwd->b[4], ra);
193
- cpu_stb_data_ra(env, addr + (2 << DF_BYTE), pwd->b[5], ra);
194
- cpu_stb_data_ra(env, addr + (1 << DF_BYTE), pwd->b[6], ra);
195
- cpu_stb_data_ra(env, addr + (0 << DF_BYTE), pwd->b[7], ra);
196
- cpu_stb_data_ra(env, addr + (15 << DF_BYTE), pwd->b[8], ra);
197
- cpu_stb_data_ra(env, addr + (14 << DF_BYTE), pwd->b[9], ra);
198
- cpu_stb_data_ra(env, addr + (13 << DF_BYTE), pwd->b[10], ra);
199
- cpu_stb_data_ra(env, addr + (12 << DF_BYTE), pwd->b[11], ra);
200
- cpu_stb_data_ra(env, addr + (11 << DF_BYTE), pwd->b[12], ra);
201
- cpu_stb_data_ra(env, addr + (10 << DF_BYTE), pwd->b[13], ra);
202
- cpu_stb_data_ra(env, addr + (9 << DF_BYTE), pwd->b[14], ra);
203
- cpu_stb_data_ra(env, addr + (8 << DF_BYTE), pwd->b[15], ra);
204
-#endif
205
+ /* Store 8 bytes at a time. Vector element ordering makes this LE. */
206
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra);
207
+ cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra);
208
}
209
210
void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
211
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
212
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
213
int mmu_idx = cpu_mmu_index(env, false);
214
uintptr_t ra = GETPC();
215
+ uint64_t d0, d1;
216
217
ensure_writable_pages(env, addr, mmu_idx, ra);
218
219
-#if !defined(HOST_WORDS_BIGENDIAN)
220
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[0], ra);
221
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[1], ra);
222
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[2], ra);
223
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[3], ra);
224
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[4], ra);
225
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[5], ra);
226
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[6], ra);
227
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[7], ra);
228
-#else
229
- cpu_stw_data_ra(env, addr + (3 << DF_HALF), pwd->h[0], ra);
230
- cpu_stw_data_ra(env, addr + (2 << DF_HALF), pwd->h[1], ra);
231
- cpu_stw_data_ra(env, addr + (1 << DF_HALF), pwd->h[2], ra);
232
- cpu_stw_data_ra(env, addr + (0 << DF_HALF), pwd->h[3], ra);
233
- cpu_stw_data_ra(env, addr + (7 << DF_HALF), pwd->h[4], ra);
234
- cpu_stw_data_ra(env, addr + (6 << DF_HALF), pwd->h[5], ra);
235
- cpu_stw_data_ra(env, addr + (5 << DF_HALF), pwd->h[6], ra);
236
- cpu_stw_data_ra(env, addr + (4 << DF_HALF), pwd->h[7], ra);
237
+ /* Store 8 bytes at a time. See helper_msa_ld_h. */
238
+ d0 = pwd->d[0];
239
+ d1 = pwd->d[1];
240
+#ifdef TARGET_WORDS_BIGENDIAN
241
+ d0 = bswap16x4(d0);
242
+ d1 = bswap16x4(d1);
243
#endif
244
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
245
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
246
}
247
248
void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
249
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
250
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
251
int mmu_idx = cpu_mmu_index(env, false);
252
uintptr_t ra = GETPC();
253
+ uint64_t d0, d1;
254
255
ensure_writable_pages(env, addr, mmu_idx, ra);
256
257
-#if !defined(HOST_WORDS_BIGENDIAN)
258
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[0], ra);
259
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[1], ra);
260
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[2], ra);
261
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[3], ra);
262
-#else
263
- cpu_stl_data_ra(env, addr + (1 << DF_WORD), pwd->w[0], ra);
264
- cpu_stl_data_ra(env, addr + (0 << DF_WORD), pwd->w[1], ra);
265
- cpu_stl_data_ra(env, addr + (3 << DF_WORD), pwd->w[2], ra);
266
- cpu_stl_data_ra(env, addr + (2 << DF_WORD), pwd->w[3], ra);
267
+ /* Store 8 bytes at a time. See helper_msa_ld_w. */
268
+ d0 = pwd->d[0];
269
+ d1 = pwd->d[1];
270
+#ifdef TARGET_WORDS_BIGENDIAN
271
+ d0 = bswap32x2(d0);
272
+ d1 = bswap32x2(d1);
273
#endif
274
+ cpu_stq_le_data_ra(env, addr + 0, d0, ra);
275
+ cpu_stq_le_data_ra(env, addr + 8, d1, ra);
276
}
277
278
void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
279
@@ -XXX,XX +XXX,XX @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
280
281
ensure_writable_pages(env, addr, mmu_idx, GETPC());
282
283
- cpu_stq_data_ra(env, addr + (0 << DF_DOUBLE), pwd->d[0], ra);
284
- cpu_stq_data_ra(env, addr + (1 << DF_DOUBLE), pwd->d[1], ra);
285
+ cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra);
286
+ cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra);
287
}
288
--
289
2.25.1
290
291
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Reviewed-by: David Hildenbrand <david@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/s390x/tcg/mem_helper.c | 8 ++++----
11
1 file changed, 4 insertions(+), 4 deletions(-)
12
13
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/s390x/tcg/mem_helper.c
16
+++ b/target/s390x/tcg/mem_helper.c
17
@@ -XXX,XX +XXX,XX @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
18
* page. This is especially relevant to speed up TLB_NOTDIRTY.
19
*/
20
g_assert(size > 0);
21
- helper_ret_stb_mmu(env, vaddr, byte, oi, ra);
22
+ cpu_stb_mmu(env, vaddr, byte, oi, ra);
23
haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
24
if (likely(haddr)) {
25
memset(haddr + 1, byte, size - 1);
26
} else {
27
for (i = 1; i < size; i++) {
28
- helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra);
29
+ cpu_stb_mmu(env, vaddr + i, byte, oi, ra);
30
}
31
}
32
}
33
@@ -XXX,XX +XXX,XX @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
34
* Do a single access and test if we can then get access to the
35
* page. This is especially relevant to speed up TLB_NOTDIRTY.
36
*/
37
- byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
38
+ byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra);
39
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
40
return byte;
41
#endif
42
@@ -XXX,XX +XXX,XX @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
43
* Do a single access and test if we can then get access to the
44
* page. This is especially relevant to speed up TLB_NOTDIRTY.
45
*/
46
- helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
47
+ cpu_stb_mmu(env, vaddr + offset, byte, oi, ra);
48
*haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
49
#endif
50
}
51
--
52
2.25.1
53
54
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/sparc/ldst_helper.c | 14 +++++++-------
11
1 file changed, 7 insertions(+), 7 deletions(-)
12
13
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/sparc/ldst_helper.c
16
+++ b/target/sparc/ldst_helper.c
17
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
18
oi = make_memop_idx(memop, idx);
19
switch (size) {
20
case 1:
21
- ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
22
+ ret = cpu_ldb_mmu(env, addr, oi, GETPC());
23
break;
24
case 2:
25
if (asi & 8) {
26
- ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
27
+ ret = cpu_ldw_le_mmu(env, addr, oi, GETPC());
28
} else {
29
- ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
30
+ ret = cpu_ldw_be_mmu(env, addr, oi, GETPC());
31
}
32
break;
33
case 4:
34
if (asi & 8) {
35
- ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
36
+ ret = cpu_ldl_le_mmu(env, addr, oi, GETPC());
37
} else {
38
- ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
39
+ ret = cpu_ldl_be_mmu(env, addr, oi, GETPC());
40
}
41
break;
42
case 8:
43
if (asi & 8) {
44
- ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
45
+ ret = cpu_ldq_le_mmu(env, addr, oi, GETPC());
46
} else {
47
- ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
48
+ ret = cpu_ldq_be_mmu(env, addr, oi, GETPC());
49
}
50
break;
51
default:
52
--
53
2.25.1
54
55
diff view generated by jsdifflib
Deleted patch
1
The helper_*_mmu functions were the only thing available
2
when this code was written. This could have been adjusted
3
when we added cpu_*_mmuidx_ra, but now we can most easily
4
use the newest set of interfaces.
5
1
6
Cc: qemu-arm@nongnu.org
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/arm/helper-a64.c | 52 +++++++----------------------------------
11
target/arm/m_helper.c | 6 ++---
12
2 files changed, 11 insertions(+), 47 deletions(-)
13
14
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/arm/helper-a64.c
17
+++ b/target/arm/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
19
uintptr_t ra = GETPC();
20
uint64_t o0, o1;
21
bool success;
22
-
23
-#ifdef CONFIG_USER_ONLY
24
- /* ??? Enforce alignment. */
25
- uint64_t *haddr = g2h(env_cpu(env), addr);
26
-
27
- set_helper_retaddr(ra);
28
- o0 = ldq_le_p(haddr + 0);
29
- o1 = ldq_le_p(haddr + 1);
30
- oldv = int128_make128(o0, o1);
31
-
32
- success = int128_eq(oldv, cmpv);
33
- if (success) {
34
- stq_le_p(haddr + 0, int128_getlo(newv));
35
- stq_le_p(haddr + 1, int128_gethi(newv));
36
- }
37
- clear_helper_retaddr();
38
-#else
39
int mem_idx = cpu_mmu_index(env, false);
40
MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
41
MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
42
43
- o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
44
- o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
45
+ o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
46
+ o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
47
oldv = int128_make128(o0, o1);
48
49
success = int128_eq(oldv, cmpv);
50
if (success) {
51
- helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
52
- helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
53
+ cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
54
+ cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
55
}
56
-#endif
57
58
return !success;
59
}
60
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
61
uintptr_t ra = GETPC();
62
uint64_t o0, o1;
63
bool success;
64
-
65
-#ifdef CONFIG_USER_ONLY
66
- /* ??? Enforce alignment. */
67
- uint64_t *haddr = g2h(env_cpu(env), addr);
68
-
69
- set_helper_retaddr(ra);
70
- o1 = ldq_be_p(haddr + 0);
71
- o0 = ldq_be_p(haddr + 1);
72
- oldv = int128_make128(o0, o1);
73
-
74
- success = int128_eq(oldv, cmpv);
75
- if (success) {
76
- stq_be_p(haddr + 0, int128_gethi(newv));
77
- stq_be_p(haddr + 1, int128_getlo(newv));
78
- }
79
- clear_helper_retaddr();
80
-#else
81
int mem_idx = cpu_mmu_index(env, false);
82
MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
83
MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
84
85
- o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
86
- o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
87
+ o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
88
+ o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
89
oldv = int128_make128(o0, o1);
90
91
success = int128_eq(oldv, cmpv);
92
if (success) {
93
- helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
94
- helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
95
+ cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
96
+ cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
97
}
98
-#endif
99
100
return !success;
101
}
102
diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/m_helper.c
105
+++ b/target/arm/m_helper.c
106
@@ -XXX,XX +XXX,XX @@ static bool do_v7m_function_return(ARMCPU *cpu)
107
* do them as secure, so work out what MMU index that is.
108
*/
109
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
110
- oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
111
- newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
112
- newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
113
+ oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
114
+ newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
115
+ newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
116
117
/* Consistency checks on new IPSR */
118
newpsr_exc = newpsr & XPSR_EXCP;
119
--
120
2.25.1
121
122
diff view generated by jsdifflib
Deleted patch
1
These functions have been replaced by cpu_*_mmu as the
2
most proper interface to use from target code.
3
1
4
Hide these declarations from code that should not use them.
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/tcg/tcg-ldst.h | 74 ++++++++++++++++++++++++++++++++++++++++++
10
include/tcg/tcg.h | 71 ----------------------------------------
11
accel/tcg/cputlb.c | 1 +
12
tcg/tcg.c | 1 +
13
tcg/tci.c | 1 +
14
5 files changed, 77 insertions(+), 71 deletions(-)
15
create mode 100644 include/tcg/tcg-ldst.h
16
17
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
18
new file mode 100644
19
index XXXXXXX..XXXXXXX
20
--- /dev/null
21
+++ b/include/tcg/tcg-ldst.h
22
@@ -XXX,XX +XXX,XX @@
23
+/*
24
+ * Memory helpers that will be used by TCG generated code.
25
+ *
26
+ * Copyright (c) 2008 Fabrice Bellard
27
+ *
28
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
29
+ * of this software and associated documentation files (the "Software"), to deal
30
+ * in the Software without restriction, including without limitation the rights
31
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
32
+ * copies of the Software, and to permit persons to whom the Software is
33
+ * furnished to do so, subject to the following conditions:
34
+ *
35
+ * The above copyright notice and this permission notice shall be included in
36
+ * all copies or substantial portions of the Software.
37
+ *
38
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
39
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
40
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
41
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
42
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
43
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44
+ * THE SOFTWARE.
45
+ */
46
+
47
+#ifndef TCG_LDST_H
48
+#define TCG_LDST_H 1
49
+
50
+#ifdef CONFIG_SOFTMMU
51
+
52
+/* Value zero-extended to tcg register size. */
53
+tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
54
+ MemOpIdx oi, uintptr_t retaddr);
55
+tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
56
+ MemOpIdx oi, uintptr_t retaddr);
57
+tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
58
+ MemOpIdx oi, uintptr_t retaddr);
59
+uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
60
+ MemOpIdx oi, uintptr_t retaddr);
61
+tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
62
+ MemOpIdx oi, uintptr_t retaddr);
63
+tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
64
+ MemOpIdx oi, uintptr_t retaddr);
65
+uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
66
+ MemOpIdx oi, uintptr_t retaddr);
67
+
68
+/* Value sign-extended to tcg register size. */
69
+tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
70
+ MemOpIdx oi, uintptr_t retaddr);
71
+tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
72
+ MemOpIdx oi, uintptr_t retaddr);
73
+tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
74
+ MemOpIdx oi, uintptr_t retaddr);
75
+tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
76
+ MemOpIdx oi, uintptr_t retaddr);
77
+tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
78
+ MemOpIdx oi, uintptr_t retaddr);
79
+
80
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
81
+ MemOpIdx oi, uintptr_t retaddr);
82
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
83
+ MemOpIdx oi, uintptr_t retaddr);
84
+void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
85
+ MemOpIdx oi, uintptr_t retaddr);
86
+void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
87
+ MemOpIdx oi, uintptr_t retaddr);
88
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
89
+ MemOpIdx oi, uintptr_t retaddr);
90
+void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
91
+ MemOpIdx oi, uintptr_t retaddr);
92
+void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
93
+ MemOpIdx oi, uintptr_t retaddr);
94
+
95
+#endif /* CONFIG_SOFTMMU */
96
+#endif /* TCG_LDST_H */
97
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/tcg/tcg.h
100
+++ b/include/tcg/tcg.h
101
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
102
: (target_long)dup_const(VECE, C))
103
#endif
104
105
-/*
106
- * Memory helpers that will be used by TCG generated code.
107
- */
108
-#ifdef CONFIG_SOFTMMU
109
-/* Value zero-extended to tcg register size. */
110
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
111
- MemOpIdx oi, uintptr_t retaddr);
112
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
113
- MemOpIdx oi, uintptr_t retaddr);
114
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
115
- MemOpIdx oi, uintptr_t retaddr);
116
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
117
- MemOpIdx oi, uintptr_t retaddr);
118
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
119
- MemOpIdx oi, uintptr_t retaddr);
120
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
121
- MemOpIdx oi, uintptr_t retaddr);
122
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
123
- MemOpIdx oi, uintptr_t retaddr);
124
-
125
-/* Value sign-extended to tcg register size. */
126
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
127
- MemOpIdx oi, uintptr_t retaddr);
128
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
129
- MemOpIdx oi, uintptr_t retaddr);
130
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
131
- MemOpIdx oi, uintptr_t retaddr);
132
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
133
- MemOpIdx oi, uintptr_t retaddr);
134
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
135
- MemOpIdx oi, uintptr_t retaddr);
136
-
137
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
138
- MemOpIdx oi, uintptr_t retaddr);
139
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
140
- MemOpIdx oi, uintptr_t retaddr);
141
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
142
- MemOpIdx oi, uintptr_t retaddr);
143
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
144
- MemOpIdx oi, uintptr_t retaddr);
145
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
146
- MemOpIdx oi, uintptr_t retaddr);
147
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
148
- MemOpIdx oi, uintptr_t retaddr);
149
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
150
- MemOpIdx oi, uintptr_t retaddr);
151
-
152
-/* Temporary aliases until backends are converted. */
153
-#ifdef TARGET_WORDS_BIGENDIAN
154
-# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
155
-# define helper_ret_lduw_mmu helper_be_lduw_mmu
156
-# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
157
-# define helper_ret_ldul_mmu helper_be_ldul_mmu
158
-# define helper_ret_ldl_mmu helper_be_ldul_mmu
159
-# define helper_ret_ldq_mmu helper_be_ldq_mmu
160
-# define helper_ret_stw_mmu helper_be_stw_mmu
161
-# define helper_ret_stl_mmu helper_be_stl_mmu
162
-# define helper_ret_stq_mmu helper_be_stq_mmu
163
-#else
164
-# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
165
-# define helper_ret_lduw_mmu helper_le_lduw_mmu
166
-# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
167
-# define helper_ret_ldul_mmu helper_le_ldul_mmu
168
-# define helper_ret_ldl_mmu helper_le_ldul_mmu
169
-# define helper_ret_ldq_mmu helper_le_ldq_mmu
170
-# define helper_ret_stw_mmu helper_le_stw_mmu
171
-# define helper_ret_stl_mmu helper_le_stl_mmu
172
-# define helper_ret_stq_mmu helper_le_stq_mmu
173
-#endif
174
-#endif /* CONFIG_SOFTMMU */
175
-
176
#ifdef CONFIG_DEBUG_TCG
177
void tcg_assert_listed_vecop(TCGOpcode);
178
#else
179
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
180
index XXXXXXX..XXXXXXX 100644
181
--- a/accel/tcg/cputlb.c
182
+++ b/accel/tcg/cputlb.c
183
@@ -XXX,XX +XXX,XX @@
184
#ifdef CONFIG_PLUGIN
185
#include "qemu/plugin-memory.h"
186
#endif
187
+#include "tcg/tcg-ldst.h"
188
189
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
190
/* #define DEBUG_TLB */
191
diff --git a/tcg/tcg.c b/tcg/tcg.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/tcg.c
194
+++ b/tcg/tcg.c
195
@@ -XXX,XX +XXX,XX @@
196
197
#include "elf.h"
198
#include "exec/log.h"
199
+#include "tcg/tcg-ldst.h"
200
#include "tcg-internal.h"
201
202
#ifdef CONFIG_TCG_INTERPRETER
203
diff --git a/tcg/tci.c b/tcg/tci.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/tci.c
206
+++ b/tcg/tci.c
207
@@ -XXX,XX +XXX,XX @@
208
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
209
#include "exec/cpu_ldst.h"
210
#include "tcg/tcg-op.h"
211
+#include "tcg/tcg-ldst.h"
212
#include "qemu/compiler.h"
213
#include <ffi.h>
214
215
--
216
2.25.1
217
218
diff view generated by jsdifflib
Deleted patch
1
To be called from tcg generated code on hosts that support
2
unaligned accesses natively, in response to an access that
3
is supposed to be aligned.
4
1
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-ldst.h | 5 +++++
9
accel/tcg/user-exec.c | 11 +++++++++++
10
2 files changed, 16 insertions(+)
11
12
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg-ldst.h
15
+++ b/include/tcg/tcg-ldst.h
16
@@ -XXX,XX +XXX,XX @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
17
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
18
MemOpIdx oi, uintptr_t retaddr);
19
20
+#else
21
+
22
+void QEMU_NORETURN helper_unaligned_ld(CPUArchState *env, target_ulong addr);
23
+void QEMU_NORETURN helper_unaligned_st(CPUArchState *env, target_ulong addr);
24
+
25
#endif /* CONFIG_SOFTMMU */
26
#endif /* TCG_LDST_H */
27
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/user-exec.c
30
+++ b/accel/tcg/user-exec.c
31
@@ -XXX,XX +XXX,XX @@
32
#include "exec/helper-proto.h"
33
#include "qemu/atomic128.h"
34
#include "trace/trace-root.h"
35
+#include "tcg/tcg-ldst.h"
36
#include "internal.h"
37
38
__thread uintptr_t helper_retaddr;
39
@@ -XXX,XX +XXX,XX @@ static void validate_memop(MemOpIdx oi, MemOp expected)
40
#endif
41
}
42
43
+void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
44
+{
45
+ cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
46
+}
47
+
48
+void helper_unaligned_st(CPUArchState *env, target_ulong addr)
49
+{
50
+ cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
51
+}
52
+
53
static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
54
MemOpIdx oi, uintptr_t ra, MMUAccessType type)
55
{
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
Deleted patch
1
Since the prctl constants are supposed to be generic, supply
2
any that are not provided by the host.
3
1
4
Split out subroutines for PR_GET_FP_MODE, PR_SET_FP_MODE,
5
PR_GET_VL, PR_SET_VL, PR_RESET_KEYS, PR_SET_TAGGED_ADDR_CTRL,
6
PR_GET_TAGGED_ADDR_CTRL. Return EINVAL for guests that do
7
not support these options rather than pass them on to the host.
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
linux-user/aarch64/target_prctl.h | 160 ++++++++++
12
linux-user/aarch64/target_syscall.h | 23 --
13
linux-user/alpha/target_prctl.h | 1 +
14
linux-user/arm/target_prctl.h | 1 +
15
linux-user/cris/target_prctl.h | 1 +
16
linux-user/hexagon/target_prctl.h | 1 +
17
linux-user/hppa/target_prctl.h | 1 +
18
linux-user/i386/target_prctl.h | 1 +
19
linux-user/m68k/target_prctl.h | 1 +
20
linux-user/microblaze/target_prctl.h | 1 +
21
linux-user/mips/target_prctl.h | 88 ++++++
22
linux-user/mips/target_syscall.h | 6 -
23
linux-user/mips64/target_prctl.h | 1 +
24
linux-user/mips64/target_syscall.h | 6 -
25
linux-user/nios2/target_prctl.h | 1 +
26
linux-user/openrisc/target_prctl.h | 1 +
27
linux-user/ppc/target_prctl.h | 1 +
28
linux-user/riscv/target_prctl.h | 1 +
29
linux-user/s390x/target_prctl.h | 1 +
30
linux-user/sh4/target_prctl.h | 1 +
31
linux-user/sparc/target_prctl.h | 1 +
32
linux-user/x86_64/target_prctl.h | 1 +
33
linux-user/xtensa/target_prctl.h | 1 +
34
linux-user/syscall.c | 433 +++++++++------------------
35
24 files changed, 414 insertions(+), 320 deletions(-)
36
create mode 100644 linux-user/aarch64/target_prctl.h
37
create mode 100644 linux-user/alpha/target_prctl.h
38
create mode 100644 linux-user/arm/target_prctl.h
39
create mode 100644 linux-user/cris/target_prctl.h
40
create mode 100644 linux-user/hexagon/target_prctl.h
41
create mode 100644 linux-user/hppa/target_prctl.h
42
create mode 100644 linux-user/i386/target_prctl.h
43
create mode 100644 linux-user/m68k/target_prctl.h
44
create mode 100644 linux-user/microblaze/target_prctl.h
45
create mode 100644 linux-user/mips/target_prctl.h
46
create mode 100644 linux-user/mips64/target_prctl.h
47
create mode 100644 linux-user/nios2/target_prctl.h
48
create mode 100644 linux-user/openrisc/target_prctl.h
49
create mode 100644 linux-user/ppc/target_prctl.h
50
create mode 100644 linux-user/riscv/target_prctl.h
51
create mode 100644 linux-user/s390x/target_prctl.h
52
create mode 100644 linux-user/sh4/target_prctl.h
53
create mode 100644 linux-user/sparc/target_prctl.h
54
create mode 100644 linux-user/x86_64/target_prctl.h
55
create mode 100644 linux-user/xtensa/target_prctl.h
56
57
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
58
new file mode 100644
59
index XXXXXXX..XXXXXXX
60
--- /dev/null
61
+++ b/linux-user/aarch64/target_prctl.h
62
@@ -XXX,XX +XXX,XX @@
63
+/*
64
+ * AArch64 specific prctl functions for linux-user
65
+ *
66
+ * SPDX-License-Identifier: GPL-2.0-or-later
67
+ */
68
+#ifndef AARCH64_TARGET_PRCTL_H
69
+#define AARCH64_TARGET_PRCTL_H
70
+
71
+static abi_long do_prctl_get_vl(CPUArchState *env)
72
+{
73
+ ARMCPU *cpu = env_archcpu(env);
74
+ if (cpu_isar_feature(aa64_sve, cpu)) {
75
+ return ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
76
+ }
77
+ return -TARGET_EINVAL;
78
+}
79
+#define do_prctl_get_vl do_prctl_get_vl
80
+
81
+static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
82
+{
83
+ /*
84
+ * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
85
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
86
+ * i.e. VL=8192, even though the current architectural maximum is VQ=16.
87
+ */
88
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))
89
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
90
+ ARMCPU *cpu = env_archcpu(env);
91
+ uint32_t vq, old_vq;
92
+
93
+ old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
94
+ vq = MAX(arg2 / 16, 1);
95
+ vq = MIN(vq, cpu->sve_max_vq);
96
+
97
+ if (vq < old_vq) {
98
+ aarch64_sve_narrow_vq(env, vq);
99
+ }
100
+ env->vfp.zcr_el[1] = vq - 1;
101
+ arm_rebuild_hflags(env);
102
+ return vq * 16;
103
+ }
104
+ return -TARGET_EINVAL;
105
+}
106
+#define do_prctl_set_vl do_prctl_set_vl
107
+
108
+static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
109
+{
110
+ ARMCPU *cpu = env_archcpu(env);
111
+
112
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
113
+ int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
114
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
115
+ int ret = 0;
116
+ Error *err = NULL;
117
+
118
+ if (arg2 == 0) {
119
+ arg2 = all;
120
+ } else if (arg2 & ~all) {
121
+ return -TARGET_EINVAL;
122
+ }
123
+ if (arg2 & PR_PAC_APIAKEY) {
124
+ ret |= qemu_guest_getrandom(&env->keys.apia,
125
+ sizeof(ARMPACKey), &err);
126
+ }
127
+ if (arg2 & PR_PAC_APIBKEY) {
128
+ ret |= qemu_guest_getrandom(&env->keys.apib,
129
+ sizeof(ARMPACKey), &err);
130
+ }
131
+ if (arg2 & PR_PAC_APDAKEY) {
132
+ ret |= qemu_guest_getrandom(&env->keys.apda,
133
+ sizeof(ARMPACKey), &err);
134
+ }
135
+ if (arg2 & PR_PAC_APDBKEY) {
136
+ ret |= qemu_guest_getrandom(&env->keys.apdb,
137
+ sizeof(ARMPACKey), &err);
138
+ }
139
+ if (arg2 & PR_PAC_APGAKEY) {
140
+ ret |= qemu_guest_getrandom(&env->keys.apga,
141
+ sizeof(ARMPACKey), &err);
142
+ }
143
+ if (ret != 0) {
144
+ /*
145
+ * Some unknown failure in the crypto. The best
146
+ * we can do is log it and fail the syscall.
147
+ * The real syscall cannot fail this way.
148
+ */
149
+ qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
150
+ error_get_pretty(err));
151
+ error_free(err);
152
+ return -TARGET_EIO;
153
+ }
154
+ return 0;
155
+ }
156
+ return -TARGET_EINVAL;
157
+}
158
+#define do_prctl_reset_keys do_prctl_reset_keys
159
+
160
+static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
161
+{
162
+ abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
163
+ ARMCPU *cpu = env_archcpu(env);
164
+
165
+ if (cpu_isar_feature(aa64_mte, cpu)) {
166
+ valid_mask |= PR_MTE_TCF_MASK;
167
+ valid_mask |= PR_MTE_TAG_MASK;
168
+ }
169
+
170
+ if (arg2 & ~valid_mask) {
171
+ return -TARGET_EINVAL;
172
+ }
173
+ env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
174
+
175
+ if (cpu_isar_feature(aa64_mte, cpu)) {
176
+ switch (arg2 & PR_MTE_TCF_MASK) {
177
+ case PR_MTE_TCF_NONE:
178
+ case PR_MTE_TCF_SYNC:
179
+ case PR_MTE_TCF_ASYNC:
180
+ break;
181
+ default:
182
+ return -EINVAL;
183
+ }
184
+
185
+ /*
186
+ * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
187
+ * Note that the syscall values are consistent with hw.
188
+ */
189
+ env->cp15.sctlr_el[1] =
190
+ deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
191
+
192
+ /*
193
+ * Write PR_MTE_TAG to GCR_EL1[Exclude].
194
+ * Note that the syscall uses an include mask,
195
+ * and hardware uses an exclude mask -- invert.
196
+ */
197
+ env->cp15.gcr_el1 =
198
+ deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
199
+ arm_rebuild_hflags(env);
200
+ }
201
+ return 0;
202
+}
203
+#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
204
+
205
+static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
206
+{
207
+ ARMCPU *cpu = env_archcpu(env);
208
+ abi_long ret = 0;
209
+
210
+ if (env->tagged_addr_enable) {
211
+ ret |= PR_TAGGED_ADDR_ENABLE;
212
+ }
213
+ if (cpu_isar_feature(aa64_mte, cpu)) {
214
+ /* See do_prctl_set_tagged_addr_ctrl. */
215
+ ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
216
+ ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
217
+ }
218
+ return ret;
219
+}
220
+#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
221
+
222
+#endif /* AARCH64_TARGET_PRCTL_H */
223
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
224
index XXXXXXX..XXXXXXX 100644
225
--- a/linux-user/aarch64/target_syscall.h
226
+++ b/linux-user/aarch64/target_syscall.h
227
@@ -XXX,XX +XXX,XX @@ struct target_pt_regs {
228
#define TARGET_MCL_FUTURE 2
229
#define TARGET_MCL_ONFAULT 4
230
231
-#define TARGET_PR_SVE_SET_VL 50
232
-#define TARGET_PR_SVE_GET_VL 51
233
-
234
-#define TARGET_PR_PAC_RESET_KEYS 54
235
-# define TARGET_PR_PAC_APIAKEY (1 << 0)
236
-# define TARGET_PR_PAC_APIBKEY (1 << 1)
237
-# define TARGET_PR_PAC_APDAKEY (1 << 2)
238
-# define TARGET_PR_PAC_APDBKEY (1 << 3)
239
-# define TARGET_PR_PAC_APGAKEY (1 << 4)
240
-
241
-#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
242
-#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
243
-# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
244
-/* MTE tag check fault modes */
245
-# define TARGET_PR_MTE_TCF_SHIFT 1
246
-# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
247
-# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
248
-# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
249
-# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
250
-/* MTE tag inclusion mask */
251
-# define TARGET_PR_MTE_TAG_SHIFT 3
252
-# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
253
-
254
#endif /* AARCH64_TARGET_SYSCALL_H */
255
diff --git a/linux-user/alpha/target_prctl.h b/linux-user/alpha/target_prctl.h
256
new file mode 100644
257
index XXXXXXX..XXXXXXX
258
--- /dev/null
259
+++ b/linux-user/alpha/target_prctl.h
260
@@ -0,0 +1 @@
261
+/* No special prctl support required. */
262
diff --git a/linux-user/arm/target_prctl.h b/linux-user/arm/target_prctl.h
263
new file mode 100644
264
index XXXXXXX..XXXXXXX
265
--- /dev/null
266
+++ b/linux-user/arm/target_prctl.h
267
@@ -0,0 +1 @@
268
+/* No special prctl support required. */
269
diff --git a/linux-user/cris/target_prctl.h b/linux-user/cris/target_prctl.h
270
new file mode 100644
271
index XXXXXXX..XXXXXXX
272
--- /dev/null
273
+++ b/linux-user/cris/target_prctl.h
274
@@ -0,0 +1 @@
275
+/* No special prctl support required. */
276
diff --git a/linux-user/hexagon/target_prctl.h b/linux-user/hexagon/target_prctl.h
277
new file mode 100644
278
index XXXXXXX..XXXXXXX
279
--- /dev/null
280
+++ b/linux-user/hexagon/target_prctl.h
281
@@ -0,0 +1 @@
282
+/* No special prctl support required. */
283
diff --git a/linux-user/hppa/target_prctl.h b/linux-user/hppa/target_prctl.h
284
new file mode 100644
285
index XXXXXXX..XXXXXXX
286
--- /dev/null
287
+++ b/linux-user/hppa/target_prctl.h
288
@@ -0,0 +1 @@
289
+/* No special prctl support required. */
290
diff --git a/linux-user/i386/target_prctl.h b/linux-user/i386/target_prctl.h
291
new file mode 100644
292
index XXXXXXX..XXXXXXX
293
--- /dev/null
294
+++ b/linux-user/i386/target_prctl.h
295
@@ -0,0 +1 @@
296
+/* No special prctl support required. */
297
diff --git a/linux-user/m68k/target_prctl.h b/linux-user/m68k/target_prctl.h
298
new file mode 100644
299
index XXXXXXX..XXXXXXX
300
--- /dev/null
301
+++ b/linux-user/m68k/target_prctl.h
302
@@ -0,0 +1 @@
303
+/* No special prctl support required. */
304
diff --git a/linux-user/microblaze/target_prctl.h b/linux-user/microblaze/target_prctl.h
305
new file mode 100644
306
index XXXXXXX..XXXXXXX
307
--- /dev/null
308
+++ b/linux-user/microblaze/target_prctl.h
309
@@ -0,0 +1 @@
310
+/* No special prctl support required. */
311
diff --git a/linux-user/mips/target_prctl.h b/linux-user/mips/target_prctl.h
312
new file mode 100644
313
index XXXXXXX..XXXXXXX
314
--- /dev/null
315
+++ b/linux-user/mips/target_prctl.h
316
@@ -XXX,XX +XXX,XX @@
317
+/*
318
+ * MIPS specific prctl functions for linux-user
319
+ *
320
+ * SPDX-License-Identifier: GPL-2.0-or-later
321
+ */
322
+#ifndef MIPS_TARGET_PRCTL_H
323
+#define MIPS_TARGET_PRCTL_H
324
+
325
+static abi_long do_prctl_get_fp_mode(CPUArchState *env)
326
+{
327
+ abi_long ret = 0;
328
+
329
+ if (env->CP0_Status & (1 << CP0St_FR)) {
330
+ ret |= PR_FP_MODE_FR;
331
+ }
332
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
333
+ ret |= PR_FP_MODE_FRE;
334
+ }
335
+ return ret;
336
+}
337
+#define do_prctl_get_fp_mode do_prctl_get_fp_mode
338
+
339
+static abi_long do_prctl_set_fp_mode(CPUArchState *env, abi_long arg2)
340
+{
341
+ bool old_fr = env->CP0_Status & (1 << CP0St_FR);
342
+ bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
343
+ bool new_fr = arg2 & PR_FP_MODE_FR;
344
+ bool new_fre = arg2 & PR_FP_MODE_FRE;
345
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
346
+
347
+ /* If nothing to change, return right away, successfully. */
348
+ if (old_fr == new_fr && old_fre == new_fre) {
349
+ return 0;
350
+ }
351
+ /* Check the value is valid */
352
+ if (arg2 & ~known_bits) {
353
+ return -TARGET_EOPNOTSUPP;
354
+ }
355
+ /* Setting FRE without FR is not supported. */
356
+ if (new_fre && !new_fr) {
357
+ return -TARGET_EOPNOTSUPP;
358
+ }
359
+ if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
360
+ /* FR1 is not supported */
361
+ return -TARGET_EOPNOTSUPP;
362
+ }
363
+ if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
364
+ && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
365
+ /* cannot set FR=0 */
366
+ return -TARGET_EOPNOTSUPP;
367
+ }
368
+ if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
369
+ /* Cannot set FRE=1 */
370
+ return -TARGET_EOPNOTSUPP;
371
+ }
372
+
373
+ int i;
374
+ fpr_t *fpr = env->active_fpu.fpr;
375
+ for (i = 0; i < 32 ; i += 2) {
376
+ if (!old_fr && new_fr) {
377
+ fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
378
+ } else if (old_fr && !new_fr) {
379
+ fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
380
+ }
381
+ }
382
+
383
+ if (new_fr) {
384
+ env->CP0_Status |= (1 << CP0St_FR);
385
+ env->hflags |= MIPS_HFLAG_F64;
386
+ } else {
387
+ env->CP0_Status &= ~(1 << CP0St_FR);
388
+ env->hflags &= ~MIPS_HFLAG_F64;
389
+ }
390
+ if (new_fre) {
391
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
392
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
393
+ env->hflags |= MIPS_HFLAG_FRE;
394
+ }
395
+ } else {
396
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
397
+ env->hflags &= ~MIPS_HFLAG_FRE;
398
+ }
399
+
400
+ return 0;
401
+}
402
+#define do_prctl_set_fp_mode do_prctl_set_fp_mode
403
+
404
+#endif /* MIPS_TARGET_PRCTL_H */
405
diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h
406
index XXXXXXX..XXXXXXX 100644
407
--- a/linux-user/mips/target_syscall.h
408
+++ b/linux-user/mips/target_syscall.h
409
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
410
return 0x40000;
411
}
412
413
-/* MIPS-specific prctl() options */
414
-#define TARGET_PR_SET_FP_MODE 45
415
-#define TARGET_PR_GET_FP_MODE 46
416
-#define TARGET_PR_FP_MODE_FR (1 << 0)
417
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
418
-
419
#endif /* MIPS_TARGET_SYSCALL_H */
420
diff --git a/linux-user/mips64/target_prctl.h b/linux-user/mips64/target_prctl.h
421
new file mode 100644
422
index XXXXXXX..XXXXXXX
423
--- /dev/null
424
+++ b/linux-user/mips64/target_prctl.h
425
@@ -0,0 +1 @@
426
+#include "../mips/target_prctl.h"
427
diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h
428
index XXXXXXX..XXXXXXX 100644
429
--- a/linux-user/mips64/target_syscall.h
430
+++ b/linux-user/mips64/target_syscall.h
431
@@ -XXX,XX +XXX,XX @@ static inline abi_ulong target_shmlba(CPUMIPSState *env)
432
return 0x40000;
433
}
434
435
-/* MIPS-specific prctl() options */
436
-#define TARGET_PR_SET_FP_MODE 45
437
-#define TARGET_PR_GET_FP_MODE 46
438
-#define TARGET_PR_FP_MODE_FR (1 << 0)
439
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
440
-
441
#endif /* MIPS64_TARGET_SYSCALL_H */
442
diff --git a/linux-user/nios2/target_prctl.h b/linux-user/nios2/target_prctl.h
443
new file mode 100644
444
index XXXXXXX..XXXXXXX
445
--- /dev/null
446
+++ b/linux-user/nios2/target_prctl.h
447
@@ -0,0 +1 @@
448
+/* No special prctl support required. */
449
diff --git a/linux-user/openrisc/target_prctl.h b/linux-user/openrisc/target_prctl.h
450
new file mode 100644
451
index XXXXXXX..XXXXXXX
452
--- /dev/null
453
+++ b/linux-user/openrisc/target_prctl.h
454
@@ -0,0 +1 @@
455
+/* No special prctl support required. */
456
diff --git a/linux-user/ppc/target_prctl.h b/linux-user/ppc/target_prctl.h
457
new file mode 100644
458
index XXXXXXX..XXXXXXX
459
--- /dev/null
460
+++ b/linux-user/ppc/target_prctl.h
461
@@ -0,0 +1 @@
462
+/* No special prctl support required. */
463
diff --git a/linux-user/riscv/target_prctl.h b/linux-user/riscv/target_prctl.h
464
new file mode 100644
465
index XXXXXXX..XXXXXXX
466
--- /dev/null
467
+++ b/linux-user/riscv/target_prctl.h
468
@@ -0,0 +1 @@
469
+/* No special prctl support required. */
470
diff --git a/linux-user/s390x/target_prctl.h b/linux-user/s390x/target_prctl.h
471
new file mode 100644
472
index XXXXXXX..XXXXXXX
473
--- /dev/null
474
+++ b/linux-user/s390x/target_prctl.h
475
@@ -0,0 +1 @@
476
+/* No special prctl support required. */
477
diff --git a/linux-user/sh4/target_prctl.h b/linux-user/sh4/target_prctl.h
478
new file mode 100644
479
index XXXXXXX..XXXXXXX
480
--- /dev/null
481
+++ b/linux-user/sh4/target_prctl.h
482
@@ -0,0 +1 @@
483
+/* No special prctl support required. */
484
diff --git a/linux-user/sparc/target_prctl.h b/linux-user/sparc/target_prctl.h
485
new file mode 100644
486
index XXXXXXX..XXXXXXX
487
--- /dev/null
488
+++ b/linux-user/sparc/target_prctl.h
489
@@ -0,0 +1 @@
490
+/* No special prctl support required. */
491
diff --git a/linux-user/x86_64/target_prctl.h b/linux-user/x86_64/target_prctl.h
492
new file mode 100644
493
index XXXXXXX..XXXXXXX
494
--- /dev/null
495
+++ b/linux-user/x86_64/target_prctl.h
496
@@ -0,0 +1 @@
497
+/* No special prctl support required. */
498
diff --git a/linux-user/xtensa/target_prctl.h b/linux-user/xtensa/target_prctl.h
499
new file mode 100644
500
index XXXXXXX..XXXXXXX
501
--- /dev/null
502
+++ b/linux-user/xtensa/target_prctl.h
503
@@ -0,0 +1 @@
504
+/* No special prctl support required. */
505
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
506
index XXXXXXX..XXXXXXX 100644
507
--- a/linux-user/syscall.c
508
+++ b/linux-user/syscall.c
509
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
510
return ret;
511
}
512
#endif /* defined(TARGET_ABI32 */
513
-
514
#endif /* defined(TARGET_I386) */
515
516
+/*
517
+ * These constants are generic. Supply any that are missing from the host.
518
+ */
519
+#ifndef PR_SET_NAME
520
+# define PR_SET_NAME 15
521
+# define PR_GET_NAME 16
522
+#endif
523
+#ifndef PR_SET_FP_MODE
524
+# define PR_SET_FP_MODE 45
525
+# define PR_GET_FP_MODE 46
526
+# define PR_FP_MODE_FR (1 << 0)
527
+# define PR_FP_MODE_FRE (1 << 1)
528
+#endif
529
+#ifndef PR_SVE_SET_VL
530
+# define PR_SVE_SET_VL 50
531
+# define PR_SVE_GET_VL 51
532
+# define PR_SVE_VL_LEN_MASK 0xffff
533
+# define PR_SVE_VL_INHERIT (1 << 17)
534
+#endif
535
+#ifndef PR_PAC_RESET_KEYS
536
+# define PR_PAC_RESET_KEYS 54
537
+# define PR_PAC_APIAKEY (1 << 0)
538
+# define PR_PAC_APIBKEY (1 << 1)
539
+# define PR_PAC_APDAKEY (1 << 2)
540
+# define PR_PAC_APDBKEY (1 << 3)
541
+# define PR_PAC_APGAKEY (1 << 4)
542
+#endif
543
+#ifndef PR_SET_TAGGED_ADDR_CTRL
544
+# define PR_SET_TAGGED_ADDR_CTRL 55
545
+# define PR_GET_TAGGED_ADDR_CTRL 56
546
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
547
+#endif
548
+#ifndef PR_MTE_TCF_SHIFT
549
+# define PR_MTE_TCF_SHIFT 1
550
+# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
551
+# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
552
+# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
553
+# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
554
+# define PR_MTE_TAG_SHIFT 3
555
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
556
+#endif
557
+
558
+#include "target_prctl.h"
559
+
560
+static abi_long do_prctl_inval0(CPUArchState *env)
561
+{
562
+ return -TARGET_EINVAL;
563
+}
564
+
565
+static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
566
+{
567
+ return -TARGET_EINVAL;
568
+}
569
+
570
+#ifndef do_prctl_get_fp_mode
571
+#define do_prctl_get_fp_mode do_prctl_inval0
572
+#endif
573
+#ifndef do_prctl_set_fp_mode
574
+#define do_prctl_set_fp_mode do_prctl_inval1
575
+#endif
576
+#ifndef do_prctl_get_vl
577
+#define do_prctl_get_vl do_prctl_inval0
578
+#endif
579
+#ifndef do_prctl_set_vl
580
+#define do_prctl_set_vl do_prctl_inval1
581
+#endif
582
+#ifndef do_prctl_reset_keys
583
+#define do_prctl_reset_keys do_prctl_inval1
584
+#endif
585
+#ifndef do_prctl_set_tagged_addr_ctrl
586
+#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
587
+#endif
588
+#ifndef do_prctl_get_tagged_addr_ctrl
589
+#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
590
+#endif
591
+
592
+static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
593
+ abi_long arg3, abi_long arg4, abi_long arg5)
594
+{
595
+ abi_long ret;
596
+
597
+ switch (option) {
598
+ case PR_GET_PDEATHSIG:
599
+ {
600
+ int deathsig;
601
+ ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
602
+ arg3, arg4, arg5));
603
+ if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
604
+ return -TARGET_EFAULT;
605
+ }
606
+ return ret;
607
+ }
608
+ case PR_GET_NAME:
609
+ {
610
+ void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
611
+ if (!name) {
612
+ return -TARGET_EFAULT;
613
+ }
614
+ ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
615
+ arg3, arg4, arg5));
616
+ unlock_user(name, arg2, 16);
617
+ return ret;
618
+ }
619
+ case PR_SET_NAME:
620
+ {
621
+ void *name = lock_user(VERIFY_READ, arg2, 16, 1);
622
+ if (!name) {
623
+ return -TARGET_EFAULT;
624
+ }
625
+ ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
626
+ arg3, arg4, arg5));
627
+ unlock_user(name, arg2, 0);
628
+ return ret;
629
+ }
630
+ case PR_GET_FP_MODE:
631
+ return do_prctl_get_fp_mode(env);
632
+ case PR_SET_FP_MODE:
633
+ return do_prctl_set_fp_mode(env, arg2);
634
+ case PR_SVE_GET_VL:
635
+ return do_prctl_get_vl(env);
636
+ case PR_SVE_SET_VL:
637
+ return do_prctl_set_vl(env, arg2);
638
+ case PR_PAC_RESET_KEYS:
639
+ if (arg3 || arg4 || arg5) {
640
+ return -TARGET_EINVAL;
641
+ }
642
+ return do_prctl_reset_keys(env, arg2);
643
+ case PR_SET_TAGGED_ADDR_CTRL:
644
+ if (arg3 || arg4 || arg5) {
645
+ return -TARGET_EINVAL;
646
+ }
647
+ return do_prctl_set_tagged_addr_ctrl(env, arg2);
648
+ case PR_GET_TAGGED_ADDR_CTRL:
649
+ if (arg2 || arg3 || arg4 || arg5) {
650
+ return -TARGET_EINVAL;
651
+ }
652
+ return do_prctl_get_tagged_addr_ctrl(env);
653
+ case PR_GET_SECCOMP:
654
+ case PR_SET_SECCOMP:
655
+ /* Disable seccomp to prevent the target disabling syscalls we need. */
656
+ return -TARGET_EINVAL;
657
+ default:
658
+ /* Most prctl options have no pointer arguments */
659
+ return get_errno(prctl(option, arg2, arg3, arg4, arg5));
660
+ }
661
+}
662
+
663
#define NEW_STACK_SIZE 0x40000
664
665
666
@@ -XXX,XX +XXX,XX @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
667
return ret;
668
#endif
669
case TARGET_NR_prctl:
670
- switch (arg1) {
671
- case PR_GET_PDEATHSIG:
672
- {
673
- int deathsig;
674
- ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
675
- if (!is_error(ret) && arg2
676
- && put_user_s32(deathsig, arg2)) {
677
- return -TARGET_EFAULT;
678
- }
679
- return ret;
680
- }
681
-#ifdef PR_GET_NAME
682
- case PR_GET_NAME:
683
- {
684
- void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
685
- if (!name) {
686
- return -TARGET_EFAULT;
687
- }
688
- ret = get_errno(prctl(arg1, (unsigned long)name,
689
- arg3, arg4, arg5));
690
- unlock_user(name, arg2, 16);
691
- return ret;
692
- }
693
- case PR_SET_NAME:
694
- {
695
- void *name = lock_user(VERIFY_READ, arg2, 16, 1);
696
- if (!name) {
697
- return -TARGET_EFAULT;
698
- }
699
- ret = get_errno(prctl(arg1, (unsigned long)name,
700
- arg3, arg4, arg5));
701
- unlock_user(name, arg2, 0);
702
- return ret;
703
- }
704
-#endif
705
-#ifdef TARGET_MIPS
706
- case TARGET_PR_GET_FP_MODE:
707
- {
708
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
709
- ret = 0;
710
- if (env->CP0_Status & (1 << CP0St_FR)) {
711
- ret |= TARGET_PR_FP_MODE_FR;
712
- }
713
- if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
714
- ret |= TARGET_PR_FP_MODE_FRE;
715
- }
716
- return ret;
717
- }
718
- case TARGET_PR_SET_FP_MODE:
719
- {
720
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
721
- bool old_fr = env->CP0_Status & (1 << CP0St_FR);
722
- bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
723
- bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
724
- bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
725
-
726
- const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
727
- TARGET_PR_FP_MODE_FRE;
728
-
729
- /* If nothing to change, return right away, successfully. */
730
- if (old_fr == new_fr && old_fre == new_fre) {
731
- return 0;
732
- }
733
- /* Check the value is valid */
734
- if (arg2 & ~known_bits) {
735
- return -TARGET_EOPNOTSUPP;
736
- }
737
- /* Setting FRE without FR is not supported. */
738
- if (new_fre && !new_fr) {
739
- return -TARGET_EOPNOTSUPP;
740
- }
741
- if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
742
- /* FR1 is not supported */
743
- return -TARGET_EOPNOTSUPP;
744
- }
745
- if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
746
- && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
747
- /* cannot set FR=0 */
748
- return -TARGET_EOPNOTSUPP;
749
- }
750
- if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
751
- /* Cannot set FRE=1 */
752
- return -TARGET_EOPNOTSUPP;
753
- }
754
-
755
- int i;
756
- fpr_t *fpr = env->active_fpu.fpr;
757
- for (i = 0; i < 32 ; i += 2) {
758
- if (!old_fr && new_fr) {
759
- fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
760
- } else if (old_fr && !new_fr) {
761
- fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
762
- }
763
- }
764
-
765
- if (new_fr) {
766
- env->CP0_Status |= (1 << CP0St_FR);
767
- env->hflags |= MIPS_HFLAG_F64;
768
- } else {
769
- env->CP0_Status &= ~(1 << CP0St_FR);
770
- env->hflags &= ~MIPS_HFLAG_F64;
771
- }
772
- if (new_fre) {
773
- env->CP0_Config5 |= (1 << CP0C5_FRE);
774
- if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
775
- env->hflags |= MIPS_HFLAG_FRE;
776
- }
777
- } else {
778
- env->CP0_Config5 &= ~(1 << CP0C5_FRE);
779
- env->hflags &= ~MIPS_HFLAG_FRE;
780
- }
781
-
782
- return 0;
783
- }
784
-#endif /* MIPS */
785
-#ifdef TARGET_AARCH64
786
- case TARGET_PR_SVE_SET_VL:
787
- /*
788
- * We cannot support either PR_SVE_SET_VL_ONEXEC or
789
- * PR_SVE_VL_INHERIT. Note the kernel definition
790
- * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
791
- * even though the current architectural maximum is VQ=16.
792
- */
793
- ret = -TARGET_EINVAL;
794
- if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
795
- && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
796
- CPUARMState *env = cpu_env;
797
- ARMCPU *cpu = env_archcpu(env);
798
- uint32_t vq, old_vq;
799
-
800
- old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
801
- vq = MAX(arg2 / 16, 1);
802
- vq = MIN(vq, cpu->sve_max_vq);
803
-
804
- if (vq < old_vq) {
805
- aarch64_sve_narrow_vq(env, vq);
806
- }
807
- env->vfp.zcr_el[1] = vq - 1;
808
- arm_rebuild_hflags(env);
809
- ret = vq * 16;
810
- }
811
- return ret;
812
- case TARGET_PR_SVE_GET_VL:
813
- ret = -TARGET_EINVAL;
814
- {
815
- ARMCPU *cpu = env_archcpu(cpu_env);
816
- if (cpu_isar_feature(aa64_sve, cpu)) {
817
- ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
818
- }
819
- }
820
- return ret;
821
- case TARGET_PR_PAC_RESET_KEYS:
822
- {
823
- CPUARMState *env = cpu_env;
824
- ARMCPU *cpu = env_archcpu(env);
825
-
826
- if (arg3 || arg4 || arg5) {
827
- return -TARGET_EINVAL;
828
- }
829
- if (cpu_isar_feature(aa64_pauth, cpu)) {
830
- int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
831
- TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
832
- TARGET_PR_PAC_APGAKEY);
833
- int ret = 0;
834
- Error *err = NULL;
835
-
836
- if (arg2 == 0) {
837
- arg2 = all;
838
- } else if (arg2 & ~all) {
839
- return -TARGET_EINVAL;
840
- }
841
- if (arg2 & TARGET_PR_PAC_APIAKEY) {
842
- ret |= qemu_guest_getrandom(&env->keys.apia,
843
- sizeof(ARMPACKey), &err);
844
- }
845
- if (arg2 & TARGET_PR_PAC_APIBKEY) {
846
- ret |= qemu_guest_getrandom(&env->keys.apib,
847
- sizeof(ARMPACKey), &err);
848
- }
849
- if (arg2 & TARGET_PR_PAC_APDAKEY) {
850
- ret |= qemu_guest_getrandom(&env->keys.apda,
851
- sizeof(ARMPACKey), &err);
852
- }
853
- if (arg2 & TARGET_PR_PAC_APDBKEY) {
854
- ret |= qemu_guest_getrandom(&env->keys.apdb,
855
- sizeof(ARMPACKey), &err);
856
- }
857
- if (arg2 & TARGET_PR_PAC_APGAKEY) {
858
- ret |= qemu_guest_getrandom(&env->keys.apga,
859
- sizeof(ARMPACKey), &err);
860
- }
861
- if (ret != 0) {
862
- /*
863
- * Some unknown failure in the crypto. The best
864
- * we can do is log it and fail the syscall.
865
- * The real syscall cannot fail this way.
866
- */
867
- qemu_log_mask(LOG_UNIMP,
868
- "PR_PAC_RESET_KEYS: Crypto failure: %s",
869
- error_get_pretty(err));
870
- error_free(err);
871
- return -TARGET_EIO;
872
- }
873
- return 0;
874
- }
875
- }
876
- return -TARGET_EINVAL;
877
- case TARGET_PR_SET_TAGGED_ADDR_CTRL:
878
- {
879
- abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
880
- CPUARMState *env = cpu_env;
881
- ARMCPU *cpu = env_archcpu(env);
882
-
883
- if (cpu_isar_feature(aa64_mte, cpu)) {
884
- valid_mask |= TARGET_PR_MTE_TCF_MASK;
885
- valid_mask |= TARGET_PR_MTE_TAG_MASK;
886
- }
887
-
888
- if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
889
- return -TARGET_EINVAL;
890
- }
891
- env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
892
-
893
- if (cpu_isar_feature(aa64_mte, cpu)) {
894
- switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
895
- case TARGET_PR_MTE_TCF_NONE:
896
- case TARGET_PR_MTE_TCF_SYNC:
897
- case TARGET_PR_MTE_TCF_ASYNC:
898
- break;
899
- default:
900
- return -EINVAL;
901
- }
902
-
903
- /*
904
- * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
905
- * Note that the syscall values are consistent with hw.
906
- */
907
- env->cp15.sctlr_el[1] =
908
- deposit64(env->cp15.sctlr_el[1], 38, 2,
909
- arg2 >> TARGET_PR_MTE_TCF_SHIFT);
910
-
911
- /*
912
- * Write PR_MTE_TAG to GCR_EL1[Exclude].
913
- * Note that the syscall uses an include mask,
914
- * and hardware uses an exclude mask -- invert.
915
- */
916
- env->cp15.gcr_el1 =
917
- deposit64(env->cp15.gcr_el1, 0, 16,
918
- ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
919
- arm_rebuild_hflags(env);
920
- }
921
- return 0;
922
- }
923
- case TARGET_PR_GET_TAGGED_ADDR_CTRL:
924
- {
925
- abi_long ret = 0;
926
- CPUARMState *env = cpu_env;
927
- ARMCPU *cpu = env_archcpu(env);
928
-
929
- if (arg2 || arg3 || arg4 || arg5) {
930
- return -TARGET_EINVAL;
931
- }
932
- if (env->tagged_addr_enable) {
933
- ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
934
- }
935
- if (cpu_isar_feature(aa64_mte, cpu)) {
936
- /* See above. */
937
- ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
938
- << TARGET_PR_MTE_TCF_SHIFT);
939
- ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
940
- ~env->cp15.gcr_el1);
941
- }
942
- return ret;
943
- }
944
-#endif /* AARCH64 */
945
- case PR_GET_SECCOMP:
946
- case PR_SET_SECCOMP:
947
- /* Disable seccomp to prevent the target disabling syscalls we
948
- * need. */
949
- return -TARGET_EINVAL;
950
- default:
951
- /* Most prctl options have no pointer arguments */
952
- return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
953
- }
954
+ return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
955
break;
956
#ifdef TARGET_NR_arch_prctl
957
case TARGET_NR_arch_prctl:
958
--
959
2.25.1
960
961
diff view generated by jsdifflib
Deleted patch
1
Create a list of subcodes that we want to pass on, a list of
2
subcodes that should not be passed on because they would affect
3
the running qemu itself, and a list that probably could be
4
implemented but require extra work. Do not pass on unknown subcodes.
5
1
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
linux-user/syscall.c | 56 ++++++++++++++++++++++++++++++++++++++++----
9
1 file changed, 52 insertions(+), 4 deletions(-)
10
11
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/syscall.c
14
+++ b/linux-user/syscall.c
15
@@ -XXX,XX +XXX,XX @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
16
# define PR_MTE_TAG_SHIFT 3
17
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
18
#endif
19
+#ifndef PR_SET_IO_FLUSHER
20
+# define PR_SET_IO_FLUSHER 57
21
+# define PR_GET_IO_FLUSHER 58
22
+#endif
23
+#ifndef PR_SET_SYSCALL_USER_DISPATCH
24
+# define PR_SET_SYSCALL_USER_DISPATCH 59
25
+#endif
26
27
#include "target_prctl.h"
28
29
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
30
return -TARGET_EINVAL;
31
}
32
return do_prctl_get_tagged_addr_ctrl(env);
33
+
34
+ case PR_GET_DUMPABLE:
35
+ case PR_SET_DUMPABLE:
36
+ case PR_GET_KEEPCAPS:
37
+ case PR_SET_KEEPCAPS:
38
+ case PR_GET_TIMING:
39
+ case PR_SET_TIMING:
40
+ case PR_GET_TIMERSLACK:
41
+ case PR_SET_TIMERSLACK:
42
+ case PR_MCE_KILL:
43
+ case PR_MCE_KILL_GET:
44
+ case PR_GET_NO_NEW_PRIVS:
45
+ case PR_SET_NO_NEW_PRIVS:
46
+ case PR_GET_IO_FLUSHER:
47
+ case PR_SET_IO_FLUSHER:
48
+ /* Some prctl options have no pointer arguments and we can pass on. */
49
+ return get_errno(prctl(option, arg2, arg3, arg4, arg5));
50
+
51
+ case PR_GET_CHILD_SUBREAPER:
52
+ case PR_SET_CHILD_SUBREAPER:
53
+ case PR_GET_SPECULATION_CTRL:
54
+ case PR_SET_SPECULATION_CTRL:
55
+ case PR_GET_TID_ADDRESS:
56
+ /* TODO */
57
+ return -TARGET_EINVAL;
58
+
59
+ case PR_GET_FPEXC:
60
+ case PR_SET_FPEXC:
61
+ /* Was used for SPE on PowerPC. */
62
+ return -TARGET_EINVAL;
63
+
64
+ case PR_GET_ENDIAN:
65
+ case PR_SET_ENDIAN:
66
+ case PR_GET_FPEMU:
67
+ case PR_SET_FPEMU:
68
+ case PR_SET_MM:
69
case PR_GET_SECCOMP:
70
case PR_SET_SECCOMP:
71
- /* Disable seccomp to prevent the target disabling syscalls we need. */
72
- return -TARGET_EINVAL;
73
+ case PR_SET_SYSCALL_USER_DISPATCH:
74
+ case PR_GET_THP_DISABLE:
75
+ case PR_SET_THP_DISABLE:
76
+ case PR_GET_TSC:
77
+ case PR_SET_TSC:
78
+ case PR_GET_UNALIGN:
79
+ case PR_SET_UNALIGN:
80
default:
81
- /* Most prctl options have no pointer arguments */
82
- return get_errno(prctl(option, arg2, arg3, arg4, arg5));
83
+ /* Disable to prevent the target disabling stuff we need. */
84
+ return -TARGET_EINVAL;
85
}
86
}
87
88
--
89
2.25.1
90
91
diff view generated by jsdifflib
Deleted patch
1
This reverts commit 1b36e4f5a5de585210ea95f2257839c2312be28f.
2
1
3
Despite a comment saying why cpu_common_props cannot be placed in
4
a file that is compiled once, it was moved anyway. Revert that.
5
6
Since then, Property is not defined in hw/core/cpu.h, so it is now
7
easier to declare a function to install the properties rather than
8
the Property array itself.
9
10
Cc: Eduardo Habkost <ehabkost@redhat.com>
11
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/hw/core/cpu.h | 1 +
15
cpu.c | 21 +++++++++++++++++++++
16
hw/core/cpu-common.c | 17 +----------------
17
3 files changed, 23 insertions(+), 16 deletions(-)
18
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
22
+++ b/include/hw/core/cpu.h
23
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
24
GCC_FMT_ATTR(2, 3);
25
26
/* $(top_srcdir)/cpu.c */
27
+void cpu_class_init_props(DeviceClass *dc);
28
void cpu_exec_initfn(CPUState *cpu);
29
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
30
void cpu_exec_unrealizefn(CPUState *cpu);
31
diff --git a/cpu.c b/cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/cpu.c
34
+++ b/cpu.c
35
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
36
cpu_list_remove(cpu);
37
}
38
39
+static Property cpu_common_props[] = {
40
+#ifndef CONFIG_USER_ONLY
41
+ /*
42
+ * Create a memory property for softmmu CPU object,
43
+ * so users can wire up its memory. (This can't go in hw/core/cpu.c
44
+ * because that file is compiled only once for both user-mode
45
+ * and system builds.) The default if no link is set up is to use
46
+ * the system address space.
47
+ */
48
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
49
+ MemoryRegion *),
50
+#endif
51
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
52
+ DEFINE_PROP_END_OF_LIST(),
53
+};
54
+
55
+void cpu_class_init_props(DeviceClass *dc)
56
+{
57
+ device_class_set_props(dc, cpu_common_props);
58
+}
59
+
60
void cpu_exec_initfn(CPUState *cpu)
61
{
62
cpu->as = NULL;
63
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/core/cpu-common.c
66
+++ b/hw/core/cpu-common.c
67
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
68
return cpu->cpu_index;
69
}
70
71
-static Property cpu_common_props[] = {
72
-#ifndef CONFIG_USER_ONLY
73
- /* Create a memory property for softmmu CPU object,
74
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
75
- * because that file is compiled only once for both user-mode
76
- * and system builds.) The default if no link is set up is to use
77
- * the system address space.
78
- */
79
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
80
- MemoryRegion *),
81
-#endif
82
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
83
- DEFINE_PROP_END_OF_LIST(),
84
-};
85
-
86
static void cpu_class_init(ObjectClass *klass, void *data)
87
{
88
DeviceClass *dc = DEVICE_CLASS(klass);
89
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
90
dc->realize = cpu_common_realizefn;
91
dc->unrealize = cpu_common_unrealizefn;
92
dc->reset = cpu_common_reset;
93
- device_class_set_props(dc, cpu_common_props);
94
+ cpu_class_init_props(dc);
95
/*
96
* Reason: CPUs still need special care by board code: wiring up
97
* IRQs, adding reset handlers, halting non-first CPUs, ...
98
--
99
2.25.1
100
101
diff view generated by jsdifflib
Deleted patch
1
This requires extra work for each target, but adds the
2
common syscall code, and the necessary flag in CPUState.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/hw/core/cpu.h | 3 +++
7
linux-user/generic/target_prctl_unalign.h | 27 +++++++++++++++++++++++
8
cpu.c | 20 ++++++++++++-----
9
linux-user/syscall.c | 13 +++++++++--
10
4 files changed, 56 insertions(+), 7 deletions(-)
11
create mode 100644 linux-user/generic/target_prctl_unalign.h
12
13
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/hw/core/cpu.h
16
+++ b/include/hw/core/cpu.h
17
@@ -XXX,XX +XXX,XX @@ struct CPUState {
18
19
bool ignore_memory_transaction_failures;
20
21
+ /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
22
+ bool prctl_unalign_sigbus;
23
+
24
struct hax_vcpu_state *hax_vcpu;
25
26
struct hvf_vcpu_state *hvf;
27
diff --git a/linux-user/generic/target_prctl_unalign.h b/linux-user/generic/target_prctl_unalign.h
28
new file mode 100644
29
index XXXXXXX..XXXXXXX
30
--- /dev/null
31
+++ b/linux-user/generic/target_prctl_unalign.h
32
@@ -XXX,XX +XXX,XX @@
33
+/*
34
+ * Generic prctl unalign functions for linux-user
35
+ *
36
+ * SPDX-License-Identifier: GPL-2.0-or-later
37
+ */
38
+#ifndef GENERIC_TARGET_PRCTL_UNALIGN_H
39
+#define GENERIC_TARGET_PRCTL_UNALIGN_H
40
+
41
+static abi_long do_prctl_get_unalign(CPUArchState *env, target_long arg2)
42
+{
43
+ CPUState *cs = env_cpu(env);
44
+ uint32_t res = PR_UNALIGN_NOPRINT;
45
+ if (cs->prctl_unalign_sigbus) {
46
+ res |= PR_UNALIGN_SIGBUS;
47
+ }
48
+ return put_user_u32(res, arg2);
49
+}
50
+#define do_prctl_get_unalign do_prctl_get_unalign
51
+
52
+static abi_long do_prctl_set_unalign(CPUArchState *env, target_long arg2)
53
+{
54
+ env_cpu(env)->prctl_unalign_sigbus = arg2 & PR_UNALIGN_SIGBUS;
55
+ return 0;
56
+}
57
+#define do_prctl_set_unalign do_prctl_set_unalign
58
+
59
+#endif /* GENERIC_TARGET_PRCTL_UNALIGN_H */
60
diff --git a/cpu.c b/cpu.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/cpu.c
63
+++ b/cpu.c
64
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
65
cpu_list_remove(cpu);
66
}
67
68
+/*
69
+ * This can't go in hw/core/cpu.c because that file is compiled only
70
+ * once for both user-mode and system builds.
71
+ */
72
static Property cpu_common_props[] = {
73
-#ifndef CONFIG_USER_ONLY
74
+#ifdef CONFIG_USER_ONLY
75
/*
76
- * Create a memory property for softmmu CPU object,
77
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
78
- * because that file is compiled only once for both user-mode
79
- * and system builds.) The default if no link is set up is to use
80
+ * Create a property for the user-only object, so users can
81
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
82
+ * Has no effect if the target does not support the feature.
83
+ */
84
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
85
+ prctl_unalign_sigbus, false),
86
+#else
87
+ /*
88
+ * Create a memory property for softmmu CPU object, so users can
89
+ * wire up its memory. The default if no link is set up is to use
90
* the system address space.
91
*/
92
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
93
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/linux-user/syscall.c
96
+++ b/linux-user/syscall.c
97
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
98
#ifndef do_prctl_get_tagged_addr_ctrl
99
#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
100
#endif
101
+#ifndef do_prctl_get_unalign
102
+#define do_prctl_get_unalign do_prctl_inval1
103
+#endif
104
+#ifndef do_prctl_set_unalign
105
+#define do_prctl_set_unalign do_prctl_inval1
106
+#endif
107
108
static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
109
abi_long arg3, abi_long arg4, abi_long arg5)
110
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
111
}
112
return do_prctl_get_tagged_addr_ctrl(env);
113
114
+ case PR_GET_UNALIGN:
115
+ return do_prctl_get_unalign(env, arg2);
116
+ case PR_SET_UNALIGN:
117
+ return do_prctl_set_unalign(env, arg2);
118
+
119
case PR_GET_DUMPABLE:
120
case PR_SET_DUMPABLE:
121
case PR_GET_KEEPCAPS:
122
@@ -XXX,XX +XXX,XX @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
123
case PR_SET_THP_DISABLE:
124
case PR_GET_TSC:
125
case PR_SET_TSC:
126
- case PR_GET_UNALIGN:
127
- case PR_SET_UNALIGN:
128
default:
129
/* Disable to prevent the target disabling stuff we need. */
130
return -TARGET_EINVAL;
131
--
132
2.25.1
133
134
diff view generated by jsdifflib
Deleted patch
1
Pass in the context to each mini-helper, instead of an
2
incorrectly named "flags". Separate gen_load_fp and
3
gen_store_fp, away from the integer helpers.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/alpha/translate.c | 83 +++++++++++++++++++++++++++-------------
8
1 file changed, 57 insertions(+), 26 deletions(-)
9
10
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/alpha/translate.c
13
+++ b/target/alpha/translate.c
14
@@ -XXX,XX +XXX,XX @@ static inline DisasJumpType gen_invalid(DisasContext *ctx)
15
return gen_excp(ctx, EXCP_OPCDEC, 0);
16
}
17
18
-static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
19
+static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
20
{
21
TCGv_i32 tmp32 = tcg_temp_new_i32();
22
- tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
23
- gen_helper_memory_to_f(t0, tmp32);
24
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
25
+ gen_helper_memory_to_f(dest, tmp32);
26
tcg_temp_free_i32(tmp32);
27
}
28
29
-static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
30
+static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
31
{
32
TCGv tmp = tcg_temp_new();
33
- tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
34
- gen_helper_memory_to_g(t0, tmp);
35
+ tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
36
+ gen_helper_memory_to_g(dest, tmp);
37
tcg_temp_free(tmp);
38
}
39
40
-static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
41
+static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
42
{
43
TCGv_i32 tmp32 = tcg_temp_new_i32();
44
- tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
45
- gen_helper_memory_to_s(t0, tmp32);
46
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
47
+ gen_helper_memory_to_s(dest, tmp32);
48
tcg_temp_free_i32(tmp32);
49
}
50
51
+static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
52
+{
53
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ);
54
+}
55
+
56
+static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
57
+ void (*func)(DisasContext *, TCGv, TCGv))
58
+{
59
+ /* Loads to $f31 are prefetches, which we can treat as nops. */
60
+ if (likely(ra != 31)) {
61
+ TCGv addr = tcg_temp_new();
62
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
63
+ func(ctx, cpu_fir[ra], addr);
64
+ tcg_temp_free(addr);
65
+ }
66
+}
67
+
68
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
69
{
70
tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
71
@@ -XXX,XX +XXX,XX @@ static inline void gen_load_mem(DisasContext *ctx,
72
tcg_temp_free(tmp);
73
}
74
75
-static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
76
+static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
77
{
78
TCGv_i32 tmp32 = tcg_temp_new_i32();
79
- gen_helper_f_to_memory(tmp32, t0);
80
- tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
81
+ gen_helper_f_to_memory(tmp32, addr);
82
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
83
tcg_temp_free_i32(tmp32);
84
}
85
86
-static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
87
+static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
88
{
89
TCGv tmp = tcg_temp_new();
90
- gen_helper_g_to_memory(tmp, t0);
91
- tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
92
+ gen_helper_g_to_memory(tmp, src);
93
+ tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
94
tcg_temp_free(tmp);
95
}
96
97
-static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
98
+static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
99
{
100
TCGv_i32 tmp32 = tcg_temp_new_i32();
101
- gen_helper_s_to_memory(tmp32, t0);
102
- tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
103
+ gen_helper_s_to_memory(tmp32, src);
104
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
105
tcg_temp_free_i32(tmp32);
106
}
107
108
+static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
109
+{
110
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ);
111
+}
112
+
113
+static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
114
+ void (*func)(DisasContext *, TCGv, TCGv))
115
+{
116
+ TCGv addr = tcg_temp_new();
117
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
118
+ func(ctx, load_fpr(ctx, ra), addr);
119
+ tcg_temp_free(addr);
120
+}
121
+
122
static inline void gen_store_mem(DisasContext *ctx,
123
void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
124
int flags),
125
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
126
case 0x20:
127
/* LDF */
128
REQUIRE_FEN;
129
- gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
130
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
131
break;
132
case 0x21:
133
/* LDG */
134
REQUIRE_FEN;
135
- gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
136
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
137
break;
138
case 0x22:
139
/* LDS */
140
REQUIRE_FEN;
141
- gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
142
+ gen_load_fp(ctx, ra, rb, disp16, gen_lds);
143
break;
144
case 0x23:
145
/* LDT */
146
REQUIRE_FEN;
147
- gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
148
+ gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
149
break;
150
case 0x24:
151
/* STF */
152
REQUIRE_FEN;
153
- gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
154
+ gen_store_fp(ctx, ra, rb, disp16, gen_stf);
155
break;
156
case 0x25:
157
/* STG */
158
REQUIRE_FEN;
159
- gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
160
+ gen_store_fp(ctx, ra, rb, disp16, gen_stg);
161
break;
162
case 0x26:
163
/* STS */
164
REQUIRE_FEN;
165
- gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
166
+ gen_store_fp(ctx, ra, rb, disp16, gen_sts);
167
break;
168
case 0x27:
169
/* STT */
170
REQUIRE_FEN;
171
- gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
172
+ gen_store_fp(ctx, ra, rb, disp16, gen_stt);
173
break;
174
case 0x28:
175
/* LDL */
176
--
177
2.25.1
178
179
diff view generated by jsdifflib
Deleted patch
1
Pass in the MemOp instead of a callback.
2
Drop the fp argument; add a locked argument.
3
1
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/alpha/translate.c | 104 +++++++++++++++------------------------
8
1 file changed, 40 insertions(+), 64 deletions(-)
9
10
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/alpha/translate.c
13
+++ b/target/alpha/translate.c
14
@@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
15
}
16
}
17
18
-static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
19
+static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
20
+ MemOp op, bool clear, bool locked)
21
{
22
- tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
23
- tcg_gen_mov_i64(cpu_lock_addr, t1);
24
- tcg_gen_mov_i64(cpu_lock_value, t0);
25
-}
26
-
27
-static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
28
-{
29
- tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
30
- tcg_gen_mov_i64(cpu_lock_addr, t1);
31
- tcg_gen_mov_i64(cpu_lock_value, t0);
32
-}
33
-
34
-static inline void gen_load_mem(DisasContext *ctx,
35
- void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
36
- int flags),
37
- int ra, int rb, int32_t disp16, bool fp,
38
- bool clear)
39
-{
40
- TCGv tmp, addr, va;
41
+ TCGv addr, dest;
42
43
/* LDQ_U with ra $31 is UNOP. Other various loads are forms of
44
prefetches, which we can treat as nops. No worries about
45
@@ -XXX,XX +XXX,XX @@ static inline void gen_load_mem(DisasContext *ctx,
46
return;
47
}
48
49
- tmp = tcg_temp_new();
50
- addr = load_gpr(ctx, rb);
51
-
52
- if (disp16) {
53
- tcg_gen_addi_i64(tmp, addr, disp16);
54
- addr = tmp;
55
- }
56
+ addr = tcg_temp_new();
57
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
58
if (clear) {
59
- tcg_gen_andi_i64(tmp, addr, ~0x7);
60
- addr = tmp;
61
+ tcg_gen_andi_i64(addr, addr, ~0x7);
62
}
63
64
- va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
65
- tcg_gen_qemu_load(va, addr, ctx->mem_idx);
66
+ dest = ctx->ir[ra];
67
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
68
69
- tcg_temp_free(tmp);
70
+ if (locked) {
71
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
72
+ tcg_gen_mov_i64(cpu_lock_value, dest);
73
+ }
74
+ tcg_temp_free(addr);
75
}
76
77
static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
78
@@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
79
tcg_temp_free(addr);
80
}
81
82
-static inline void gen_store_mem(DisasContext *ctx,
83
- void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
84
- int flags),
85
- int ra, int rb, int32_t disp16, bool fp,
86
- bool clear)
87
+static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
88
+ MemOp op, bool clear)
89
{
90
- TCGv tmp, addr, va;
91
+ TCGv addr, src;
92
93
- tmp = tcg_temp_new();
94
- addr = load_gpr(ctx, rb);
95
-
96
- if (disp16) {
97
- tcg_gen_addi_i64(tmp, addr, disp16);
98
- addr = tmp;
99
- }
100
+ addr = tcg_temp_new();
101
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
102
if (clear) {
103
- tcg_gen_andi_i64(tmp, addr, ~0x7);
104
- addr = tmp;
105
+ tcg_gen_andi_i64(addr, addr, ~0x7);
106
}
107
108
- va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
109
- tcg_gen_qemu_store(va, addr, ctx->mem_idx);
110
+ src = load_gpr(ctx, ra);
111
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
112
113
- tcg_temp_free(tmp);
114
+ tcg_temp_free(addr);
115
}
116
117
static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
118
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
119
case 0x0A:
120
/* LDBU */
121
REQUIRE_AMASK(BWX);
122
- gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
123
+ gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
124
break;
125
case 0x0B:
126
/* LDQ_U */
127
- gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
128
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0);
129
break;
130
case 0x0C:
131
/* LDWU */
132
REQUIRE_AMASK(BWX);
133
- gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
134
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
135
break;
136
case 0x0D:
137
/* STW */
138
REQUIRE_AMASK(BWX);
139
- gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
140
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
141
break;
142
case 0x0E:
143
/* STB */
144
REQUIRE_AMASK(BWX);
145
- gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
146
+ gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
147
break;
148
case 0x0F:
149
/* STQ_U */
150
- gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
151
+ gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1);
152
break;
153
154
case 0x10:
155
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
156
break;
157
case 0x2:
158
/* Longword physical access with lock (hw_ldl_l/p) */
159
- gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
160
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
161
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
162
+ tcg_gen_mov_i64(cpu_lock_value, va);
163
break;
164
case 0x3:
165
/* Quadword physical access with lock (hw_ldq_l/p) */
166
- gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
167
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
168
+ tcg_gen_mov_i64(cpu_lock_addr, addr);
169
+ tcg_gen_mov_i64(cpu_lock_value, va);
170
break;
171
case 0x4:
172
/* Longword virtual PTE fetch (hw_ldl/v) */
173
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
174
break;
175
case 0x28:
176
/* LDL */
177
- gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
178
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
179
break;
180
case 0x29:
181
/* LDQ */
182
- gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
183
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0);
184
break;
185
case 0x2A:
186
/* LDL_L */
187
- gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
188
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
189
break;
190
case 0x2B:
191
/* LDQ_L */
192
- gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
193
+ gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1);
194
break;
195
case 0x2C:
196
/* STL */
197
- gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
198
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
199
break;
200
case 0x2D:
201
/* STQ */
202
- gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
203
+ gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0);
204
break;
205
case 0x2E:
206
/* STL_C */
207
--
208
2.25.1
209
210
diff view generated by jsdifflib
Deleted patch
1
Leave TARGET_ALIGNED_ONLY set, but use the new CPUState
2
flag to set MO_UNALN for the instructions that the kernel
3
handles in the unaligned trap.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
linux-user/alpha/target_prctl.h | 2 +-
8
target/alpha/cpu.h | 5 +++++
9
target/alpha/translate.c | 31 ++++++++++++++++++++++---------
10
3 files changed, 28 insertions(+), 10 deletions(-)
11
12
diff --git a/linux-user/alpha/target_prctl.h b/linux-user/alpha/target_prctl.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/alpha/target_prctl.h
15
+++ b/linux-user/alpha/target_prctl.h
16
@@ -1 +1 @@
17
-/* No special prctl support required. */
18
+#include "../generic/target_prctl_unalign.h"
19
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/alpha/cpu.h
22
+++ b/target/alpha/cpu.h
23
@@ -XXX,XX +XXX,XX @@ enum {
24
#define ENV_FLAG_TB_MASK \
25
(ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN)
26
27
+#define TB_FLAG_UNALIGN (1u << 1)
28
+
29
static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
30
{
31
int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
32
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
33
*pc = env->pc;
34
*cs_base = 0;
35
*pflags = env->flags & ENV_FLAG_TB_MASK;
36
+#ifdef CONFIG_USER_ONLY
37
+ *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
38
+#endif
39
}
40
41
#ifdef CONFIG_USER_ONLY
42
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/alpha/translate.c
45
+++ b/target/alpha/translate.c
46
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext DisasContext;
47
struct DisasContext {
48
DisasContextBase base;
49
50
-#ifndef CONFIG_USER_ONLY
51
+#ifdef CONFIG_USER_ONLY
52
+ MemOp unalign;
53
+#else
54
uint64_t palbr;
55
#endif
56
uint32_t tbflags;
57
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
58
TCGv sink;
59
};
60
61
+#ifdef CONFIG_USER_ONLY
62
+#define UNALIGN(C) (C)->unalign
63
+#else
64
+#define UNALIGN(C) 0
65
+#endif
66
+
67
/* Target-specific return values from translate_one, indicating the
68
state of the TB. Note that DISAS_NEXT indicates that we are not
69
exiting the TB. */
70
@@ -XXX,XX +XXX,XX @@ static inline DisasJumpType gen_invalid(DisasContext *ctx)
71
static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
72
{
73
TCGv_i32 tmp32 = tcg_temp_new_i32();
74
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
75
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
76
gen_helper_memory_to_f(dest, tmp32);
77
tcg_temp_free_i32(tmp32);
78
}
79
@@ -XXX,XX +XXX,XX @@ static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
80
static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
81
{
82
TCGv tmp = tcg_temp_new();
83
- tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
84
+ tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx));
85
gen_helper_memory_to_g(dest, tmp);
86
tcg_temp_free(tmp);
87
}
88
@@ -XXX,XX +XXX,XX @@ static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
89
static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
90
{
91
TCGv_i32 tmp32 = tcg_temp_new_i32();
92
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
93
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
94
gen_helper_memory_to_s(dest, tmp32);
95
tcg_temp_free_i32(tmp32);
96
}
97
98
static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
99
{
100
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ);
101
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx));
102
}
103
104
static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
105
@@ -XXX,XX +XXX,XX @@ static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
106
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
107
if (clear) {
108
tcg_gen_andi_i64(addr, addr, ~0x7);
109
+ } else if (!locked) {
110
+ op |= UNALIGN(ctx);
111
}
112
113
dest = ctx->ir[ra];
114
@@ -XXX,XX +XXX,XX @@ static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
115
{
116
TCGv_i32 tmp32 = tcg_temp_new_i32();
117
gen_helper_f_to_memory(tmp32, addr);
118
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
119
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
120
tcg_temp_free_i32(tmp32);
121
}
122
123
@@ -XXX,XX +XXX,XX @@ static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
124
{
125
TCGv tmp = tcg_temp_new();
126
gen_helper_g_to_memory(tmp, src);
127
- tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
128
+ tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx));
129
tcg_temp_free(tmp);
130
}
131
132
@@ -XXX,XX +XXX,XX @@ static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
133
{
134
TCGv_i32 tmp32 = tcg_temp_new_i32();
135
gen_helper_s_to_memory(tmp32, src);
136
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
137
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
138
tcg_temp_free_i32(tmp32);
139
}
140
141
static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
142
{
143
- tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ);
144
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ | UNALIGN(ctx));
145
}
146
147
static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
148
@@ -XXX,XX +XXX,XX @@ static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
149
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
150
if (clear) {
151
tcg_gen_andi_i64(addr, addr, ~0x7);
152
+ } else {
153
+ op |= UNALIGN(ctx);
154
}
155
156
src = load_gpr(ctx, ra);
157
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
158
159
#ifdef CONFIG_USER_ONLY
160
ctx->ir = cpu_std_ir;
161
+ ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
162
#else
163
ctx->palbr = env->palbr;
164
ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
165
--
166
2.25.1
167
168
diff view generated by jsdifflib
Deleted patch
1
Leave TARGET_ALIGNED_ONLY set, but use the new CPUState
2
flag to set MO_UNALN for the instructions that the kernel
3
handles in the unaligned trap.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
linux-user/hppa/target_prctl.h | 2 +-
8
target/hppa/cpu.h | 5 ++++-
9
target/hppa/translate.c | 19 +++++++++++++++----
10
3 files changed, 20 insertions(+), 6 deletions(-)
11
12
diff --git a/linux-user/hppa/target_prctl.h b/linux-user/hppa/target_prctl.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/hppa/target_prctl.h
15
+++ b/linux-user/hppa/target_prctl.h
16
@@ -1 +1 @@
17
-/* No special prctl support required. */
18
+#include "../generic/target_prctl_unalign.h"
19
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/hppa/cpu.h
22
+++ b/target/hppa/cpu.h
23
@@ -XXX,XX +XXX,XX @@ static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
24
return hppa_form_gva_psw(env->psw, spc, off);
25
}
26
27
-/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
28
+/*
29
+ * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
30
* TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
31
* same value.
32
*/
33
#define TB_FLAG_SR_SAME PSW_I
34
#define TB_FLAG_PRIV_SHIFT 8
35
+#define TB_FLAG_UNALIGN 0x400
36
37
static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
38
target_ulong *cs_base,
39
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
40
#ifdef CONFIG_USER_ONLY
41
*pc = env->iaoq_f & -4;
42
*cs_base = env->iaoq_b & -4;
43
+ flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
44
#else
45
/* ??? E, T, H, L, B, P bits need to be here, when implemented. */
46
flags |= env->psw & (PSW_W | PSW_C | PSW_D);
47
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/hppa/translate.c
50
+++ b/target/hppa/translate.c
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
52
int mmu_idx;
53
int privilege;
54
bool psw_n_nonzero;
55
+
56
+#ifdef CONFIG_USER_ONLY
57
+ MemOp unalign;
58
+#endif
59
} DisasContext;
60
61
+#ifdef CONFIG_USER_ONLY
62
+#define UNALIGN(C) (C)->unalign
63
+#else
64
+#define UNALIGN(C) 0
65
+#endif
66
+
67
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
68
static int expand_sm_imm(DisasContext *ctx, int val)
69
{
70
@@ -XXX,XX +XXX,XX @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
71
72
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
73
ctx->mmu_idx == MMU_PHYS_IDX);
74
- tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
75
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
76
if (modify) {
77
save_gpr(ctx, rb, ofs);
78
}
79
@@ -XXX,XX +XXX,XX @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
80
81
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
82
ctx->mmu_idx == MMU_PHYS_IDX);
83
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
84
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
85
if (modify) {
86
save_gpr(ctx, rb, ofs);
87
}
88
@@ -XXX,XX +XXX,XX @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
89
90
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
91
ctx->mmu_idx == MMU_PHYS_IDX);
92
- tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
93
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
94
if (modify) {
95
save_gpr(ctx, rb, ofs);
96
}
97
@@ -XXX,XX +XXX,XX @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
98
99
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
100
ctx->mmu_idx == MMU_PHYS_IDX);
101
- tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
102
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
103
if (modify) {
104
save_gpr(ctx, rb, ofs);
105
}
106
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
107
ctx->mmu_idx = MMU_USER_IDX;
108
ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
109
ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
110
+ ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
111
#else
112
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
113
ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
114
--
115
2.25.1
116
117
diff view generated by jsdifflib
Deleted patch
1
Leave TARGET_ALIGNED_ONLY set, but use the new CPUState
2
flag to set MO_UNALN for the instructions that the kernel
3
handles in the unaligned trap.
4
1
5
The Linux kernel does not handle all memory operations: no
6
floating-point and no MAC.
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
linux-user/sh4/target_prctl.h | 2 +-
11
target/sh4/cpu.h | 4 +++
12
target/sh4/translate.c | 50 ++++++++++++++++++++++++-----------
13
3 files changed, 39 insertions(+), 17 deletions(-)
14
15
diff --git a/linux-user/sh4/target_prctl.h b/linux-user/sh4/target_prctl.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/linux-user/sh4/target_prctl.h
18
+++ b/linux-user/sh4/target_prctl.h
19
@@ -1 +1 @@
20
-/* No special prctl support required. */
21
+#include "../generic/target_prctl_unalign.h"
22
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/sh4/cpu.h
25
+++ b/target/sh4/cpu.h
26
@@ -XXX,XX +XXX,XX @@
27
#define DELAY_SLOT_RTE (1 << 2)
28
29
#define TB_FLAG_PENDING_MOVCA (1 << 3)
30
+#define TB_FLAG_UNALIGN (1 << 4)
31
32
#define GUSA_SHIFT 4
33
#ifdef CONFIG_USER_ONLY
34
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
35
| (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
36
| (env->sr & (1u << SR_FD)) /* Bit 15 */
37
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
38
+#ifdef CONFIG_USER_ONLY
39
+ *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
40
+#endif
41
}
42
43
#endif /* SH4_CPU_H */
44
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/sh4/translate.c
47
+++ b/target/sh4/translate.c
48
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
49
50
#if defined(CONFIG_USER_ONLY)
51
#define IS_USER(ctx) 1
52
+#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : 0)
53
#else
54
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
55
+#define UNALIGN(C) 0
56
#endif
57
58
/* Target-specific values for ctx->base.is_jmp. */
59
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
60
    {
61
     TCGv addr = tcg_temp_new();
62
     tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
63
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
64
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
65
+ MO_TEUL | UNALIGN(ctx));
66
     tcg_temp_free(addr);
67
    }
68
    return;
69
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
70
    {
71
     TCGv addr = tcg_temp_new();
72
     tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
73
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
74
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
75
+ MO_TESL | UNALIGN(ctx));
76
     tcg_temp_free(addr);
77
    }
78
    return;
79
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
80
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
81
    return;
82
case 0x2001:        /* mov.w Rm,@Rn */
83
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
84
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
85
+ MO_TEUW | UNALIGN(ctx));
86
    return;
87
case 0x2002:        /* mov.l Rm,@Rn */
88
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
89
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
90
+ MO_TEUL | UNALIGN(ctx));
91
    return;
92
case 0x6000:        /* mov.b @Rm,Rn */
93
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
94
    return;
95
case 0x6001:        /* mov.w @Rm,Rn */
96
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
97
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
98
+ MO_TESW | UNALIGN(ctx));
99
    return;
100
case 0x6002:        /* mov.l @Rm,Rn */
101
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
102
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
103
+ MO_TESL | UNALIGN(ctx));
104
    return;
105
case 0x2004:        /* mov.b Rm,@-Rn */
106
    {
107
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
108
    {
109
     TCGv addr = tcg_temp_new();
110
     tcg_gen_subi_i32(addr, REG(B11_8), 2);
111
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
112
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
113
+ MO_TEUW | UNALIGN(ctx));
114
     tcg_gen_mov_i32(REG(B11_8), addr);
115
     tcg_temp_free(addr);
116
    }
117
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
118
    {
119
     TCGv addr = tcg_temp_new();
120
     tcg_gen_subi_i32(addr, REG(B11_8), 4);
121
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
122
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
123
+ MO_TEUL | UNALIGN(ctx));
124
     tcg_gen_mov_i32(REG(B11_8), addr);
125
tcg_temp_free(addr);
126
    }
127
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
128
        tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
129
    return;
130
case 0x6005:        /* mov.w @Rm+,Rn */
131
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
132
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
133
+ MO_TESW | UNALIGN(ctx));
134
    if ( B11_8 != B7_4 )
135
        tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
136
    return;
137
case 0x6006:        /* mov.l @Rm+,Rn */
138
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
139
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
140
+ MO_TESL | UNALIGN(ctx));
141
    if ( B11_8 != B7_4 )
142
        tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
143
    return;
144
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
145
    {
146
     TCGv addr = tcg_temp_new();
147
     tcg_gen_add_i32(addr, REG(B11_8), REG(0));
148
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
149
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
150
+ MO_TEUW | UNALIGN(ctx));
151
     tcg_temp_free(addr);
152
    }
153
    return;
154
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
155
    {
156
     TCGv addr = tcg_temp_new();
157
     tcg_gen_add_i32(addr, REG(B11_8), REG(0));
158
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
159
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
160
+ MO_TEUL | UNALIGN(ctx));
161
     tcg_temp_free(addr);
162
    }
163
    return;
164
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
165
    {
166
     TCGv addr = tcg_temp_new();
167
     tcg_gen_add_i32(addr, REG(B7_4), REG(0));
168
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
169
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
170
+ MO_TESW | UNALIGN(ctx));
171
     tcg_temp_free(addr);
172
    }
173
    return;
174
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
175
    {
176
     TCGv addr = tcg_temp_new();
177
     tcg_gen_add_i32(addr, REG(B7_4), REG(0));
178
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
179
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
180
+ MO_TESL | UNALIGN(ctx));
181
     tcg_temp_free(addr);
182
    }
183
    return;
184
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
185
    {
186
     TCGv addr = tcg_temp_new();
187
     tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
188
- tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
189
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
190
+ MO_TEUW | UNALIGN(ctx));
191
     tcg_temp_free(addr);
192
    }
193
    return;
194
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
195
    {
196
     TCGv addr = tcg_temp_new();
197
     tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
198
- tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
199
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
200
+ MO_TESW | UNALIGN(ctx));
201
     tcg_temp_free(addr);
202
    }
203
    return;
204
--
205
2.25.1
206
207
diff view generated by jsdifflib
Deleted patch
1
Handle BUS_ADRALN via cpu_loop_exit_sigbus, but allow other SIGBUS
2
si_codes to continue into the host-to-guest signal coversion code.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
linux-user/signal.c | 3 +++
7
1 file changed, 3 insertions(+)
8
9
diff --git a/linux-user/signal.c b/linux-user/signal.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/linux-user/signal.c
12
+++ b/linux-user/signal.c
13
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
14
cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
15
} else {
16
sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
17
+ if (info->si_code == BUS_ADRALN) {
18
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
19
+ }
20
}
21
22
sync_sig = true;
23
--
24
2.25.1
25
26
diff view generated by jsdifflib
Deleted patch
1
Having observed e.g. al8+leq in dumps, canonicalize to al+leq.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/tcg-op.c | 7 ++++++-
6
1 file changed, 6 insertions(+), 1 deletion(-)
7
8
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tcg-op.c
11
+++ b/tcg/tcg-op.c
12
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
13
static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
14
{
15
/* Trigger the asserts within as early as possible. */
16
- (void)get_alignment_bits(op);
17
+ unsigned a_bits = get_alignment_bits(op);
18
+
19
+ /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
20
+ if (a_bits == (op & MO_SIZE)) {
21
+ op = (op & ~MO_AMASK) | MO_ALIGN;
22
+ }
23
24
switch (op & MO_SIZE) {
25
case MO_8:
26
--
27
2.25.1
28
29
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/i386/tcg-target.h | 2 -
5
tcg/i386/tcg-target.c.inc | 103 ++++++++++++++++++++++++++++++++++++--
6
2 files changed, 98 insertions(+), 7 deletions(-)
7
1
8
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/i386/tcg-target.h
11
+++ b/tcg/i386/tcg-target.h
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
13
14
#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/i386/tcg-target.c.inc
25
+++ b/tcg/i386/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
* THE SOFTWARE.
28
*/
29
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
33
#ifdef CONFIG_DEBUG_TCG
34
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
35
#define OPC_VZEROUPPER (0x77 | P_EXT)
36
#define OPC_XCHG_ax_r32    (0x90)
37
38
-#define OPC_GRP3_Ev    (0xf7)
39
-#define OPC_GRP5    (0xff)
40
+#define OPC_GRP3_Eb (0xf6)
41
+#define OPC_GRP3_Ev (0xf7)
42
+#define OPC_GRP5 (0xff)
43
#define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
44
45
/* Group 1 opcode extensions for 0x80-0x83.
46
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
47
#define SHIFT_SAR 7
48
49
/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
50
+#define EXT3_TESTi 0
51
#define EXT3_NOT 2
52
#define EXT3_NEG 3
53
#define EXT3_MUL 4
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
55
}
56
57
#if defined(CONFIG_SOFTMMU)
58
-#include "../tcg-ldst.c.inc"
59
-
60
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
61
* int mmu_idx, uintptr_t ra)
62
*/
63
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
64
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
65
return true;
66
}
67
-#elif TCG_TARGET_REG_BITS == 32
68
+#else
69
+
70
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
71
+ TCGReg addrhi, unsigned a_bits)
72
+{
73
+ unsigned a_mask = (1 << a_bits) - 1;
74
+ TCGLabelQemuLdst *label;
75
+
76
+ /*
77
+ * We are expecting a_bits to max out at 7, so we can usually use testb.
78
+ * For i686, we have to use testl for %esi/%edi.
79
+ */
80
+ if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
81
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
82
+ tcg_out8(s, a_mask);
83
+ } else {
84
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
85
+ tcg_out32(s, a_mask);
86
+ }
87
+
88
+ /* jne slow_path */
89
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
90
+
91
+ label = new_ldst_label(s);
92
+ label->is_ld = is_ld;
93
+ label->addrlo_reg = addrlo;
94
+ label->addrhi_reg = addrhi;
95
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4);
96
+ label->label_ptr[0] = s->code_ptr;
97
+
98
+ s->code_ptr += 4;
99
+}
100
+
101
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
102
+{
103
+ /* resolve label address */
104
+ tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
105
+
106
+ if (TCG_TARGET_REG_BITS == 32) {
107
+ int ofs = 0;
108
+
109
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
110
+ ofs += 4;
111
+
112
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
113
+ ofs += 4;
114
+ if (TARGET_LONG_BITS == 64) {
115
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
116
+ ofs += 4;
117
+ }
118
+
119
+ tcg_out_pushi(s, (uintptr_t)l->raddr);
120
+ } else {
121
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
122
+ l->addrlo_reg);
123
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
124
+
125
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
126
+ tcg_out_push(s, TCG_REG_RAX);
127
+ }
128
+
129
+ /* "Tail call" to the helper, with the return address back inline. */
130
+ tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
131
+ : helper_unaligned_st));
132
+ return true;
133
+}
134
+
135
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
136
+{
137
+ return tcg_out_fail_alignment(s, l);
138
+}
139
+
140
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
141
+{
142
+ return tcg_out_fail_alignment(s, l);
143
+}
144
+
145
+#if TCG_TARGET_REG_BITS == 32
146
# define x86_guest_base_seg 0
147
# define x86_guest_base_index -1
148
# define x86_guest_base_offset guest_base
149
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
150
return 0;
151
}
152
# endif
153
+#endif
154
#endif /* SOFTMMU */
155
156
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
158
#if defined(CONFIG_SOFTMMU)
159
int mem_index;
160
tcg_insn_unit *label_ptr[2];
161
+#else
162
+ unsigned a_bits;
163
#endif
164
165
datalo = *args++;
166
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
167
add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
168
s->code_ptr, label_ptr);
169
#else
170
+ a_bits = get_alignment_bits(opc);
171
+ if (a_bits) {
172
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
173
+ }
174
+
175
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
176
x86_guest_base_offset, x86_guest_base_seg,
177
is64, opc);
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
179
#if defined(CONFIG_SOFTMMU)
180
int mem_index;
181
tcg_insn_unit *label_ptr[2];
182
+#else
183
+ unsigned a_bits;
184
#endif
185
186
datalo = *args++;
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
188
add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
189
s->code_ptr, label_ptr);
190
#else
191
+ a_bits = get_alignment_bits(opc);
192
+ if (a_bits) {
193
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
194
+ }
195
+
196
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
197
x86_guest_base_offset, x86_guest_base_seg, opc);
198
#endif
199
--
200
2.25.1
201
202
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/ppc/tcg-target.h | 2 -
5
tcg/ppc/tcg-target.c.inc | 98 ++++++++++++++++++++++++++++++++++++----
6
2 files changed, 90 insertions(+), 10 deletions(-)
7
1
8
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.h
11
+++ b/tcg/ppc/tcg-target.h
12
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
#define TCG_TARGET_DEFAULT_MO (0)
14
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/ppc/tcg-target.c.inc
25
+++ b/tcg/ppc/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
28
#include "elf.h"
29
#include "../tcg-pool.c.inc"
30
+#include "../tcg-ldst.c.inc"
31
32
/*
33
* Standardize on the _CALL_FOO symbols used by GCC:
34
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
35
}
36
}
37
38
-static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
39
+static void tcg_out_call_int(TCGContext *s, int lk,
40
+ const tcg_insn_unit *target)
41
{
42
#ifdef _CALL_AIX
43
/* Look through the descriptor. If the branch is in range, and we
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
45
46
if (in_range_b(diff) && toc == (uint32_t)toc) {
47
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
48
- tcg_out_b(s, LK, tgt);
49
+ tcg_out_b(s, lk, tgt);
50
} else {
51
/* Fold the low bits of the constant into the addresses below. */
52
intptr_t arg = (intptr_t)target;
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
54
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
55
tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
56
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
57
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
58
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
59
}
60
#elif defined(_CALL_ELF) && _CALL_ELF == 2
61
intptr_t diff;
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
63
64
diff = tcg_pcrel_diff(s, target);
65
if (in_range_b(diff)) {
66
- tcg_out_b(s, LK, target);
67
+ tcg_out_b(s, lk, target);
68
} else {
69
tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
70
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
71
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
72
}
73
#else
74
- tcg_out_b(s, LK, target);
75
+ tcg_out_b(s, lk, target);
76
#endif
77
}
78
79
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
80
+{
81
+ tcg_out_call_int(s, LK, target);
82
+}
83
+
84
static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
85
[MO_UB] = LBZX,
86
[MO_UW] = LHZX,
87
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
88
};
89
90
#if defined (CONFIG_SOFTMMU)
91
-#include "../tcg-ldst.c.inc"
92
-
93
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
94
* int mmu_idx, uintptr_t ra)
95
*/
96
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
97
tcg_out_b(s, 0, lb->raddr);
98
return true;
99
}
100
+#else
101
+
102
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
103
+ TCGReg addrhi, unsigned a_bits)
104
+{
105
+ unsigned a_mask = (1 << a_bits) - 1;
106
+ TCGLabelQemuLdst *label = new_ldst_label(s);
107
+
108
+ label->is_ld = is_ld;
109
+ label->addrlo_reg = addrlo;
110
+ label->addrhi_reg = addrhi;
111
+
112
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
113
+ tcg_debug_assert(a_bits < 16);
114
+ tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
115
+
116
+ label->label_ptr[0] = s->code_ptr;
117
+ tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
118
+
119
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
120
+}
121
+
122
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
123
+{
124
+ if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
125
+ return false;
126
+ }
127
+
128
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
129
+ TCGReg arg = TCG_REG_R4;
130
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
131
+ arg |= 1;
132
+#endif
133
+ if (l->addrlo_reg != arg) {
134
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
135
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
136
+ } else if (l->addrhi_reg != arg + 1) {
137
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
138
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
139
+ } else {
140
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
141
+ tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
142
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
143
+ }
144
+ } else {
145
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
146
+ }
147
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
148
+
149
+ /* "Tail call" to the helper, with the return address back inline. */
150
+ tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
151
+ : helper_unaligned_st));
152
+ return true;
153
+}
154
+
155
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
156
+{
157
+ return tcg_out_fail_alignment(s, l);
158
+}
159
+
160
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
161
+{
162
+ return tcg_out_fail_alignment(s, l);
163
+}
164
+
165
#endif /* SOFTMMU */
166
167
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
169
#ifdef CONFIG_SOFTMMU
170
int mem_index;
171
tcg_insn_unit *label_ptr;
172
+#else
173
+ unsigned a_bits;
174
#endif
175
176
datalo = *args++;
177
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
178
179
rbase = TCG_REG_R3;
180
#else /* !CONFIG_SOFTMMU */
181
+ a_bits = get_alignment_bits(opc);
182
+ if (a_bits) {
183
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
184
+ }
185
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
186
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
187
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
189
#ifdef CONFIG_SOFTMMU
190
int mem_index;
191
tcg_insn_unit *label_ptr;
192
+#else
193
+ unsigned a_bits;
194
#endif
195
196
datalo = *args++;
197
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
198
199
rbase = TCG_REG_R3;
200
#else /* !CONFIG_SOFTMMU */
201
+ a_bits = get_alignment_bits(opc);
202
+ if (a_bits) {
203
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
204
+ }
205
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
206
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
207
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
208
--
209
2.25.1
210
211
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/s390x/tcg-target.h | 2 --
5
tcg/s390x/tcg-target.c.inc | 59 ++++++++++++++++++++++++++++++++++++--
6
2 files changed, 57 insertions(+), 4 deletions(-)
7
1
8
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/s390x/tcg-target.h
11
+++ b/tcg/s390x/tcg-target.h
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
13
/* no need to flush icache explicitly */
14
}
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/s390x/tcg-target.c.inc
25
+++ b/tcg/s390x/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
#error "unsupported code generation mode"
28
#endif
29
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
#include "elf.h"
33
34
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
35
RI_OIHL = 0xa509,
36
RI_OILH = 0xa50a,
37
RI_OILL = 0xa50b,
38
+ RI_TMLL = 0xa701,
39
40
RIE_CGIJ = 0xec7c,
41
RIE_CGRJ = 0xec64,
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
43
}
44
45
#if defined(CONFIG_SOFTMMU)
46
-#include "../tcg-ldst.c.inc"
47
-
48
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
49
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
50
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
51
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
52
return true;
53
}
54
#else
55
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld,
56
+ TCGReg addrlo, unsigned a_bits)
57
+{
58
+ unsigned a_mask = (1 << a_bits) - 1;
59
+ TCGLabelQemuLdst *l = new_ldst_label(s);
60
+
61
+ l->is_ld = is_ld;
62
+ l->addrlo_reg = addrlo;
63
+
64
+ /* We are expecting a_bits to max out at 7, much lower than TMLL. */
65
+ tcg_debug_assert(a_bits < 16);
66
+ tcg_out_insn(s, RI, TMLL, addrlo, a_mask);
67
+
68
+ tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
69
+ l->label_ptr[0] = s->code_ptr;
70
+ s->code_ptr += 1;
71
+
72
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
73
+}
74
+
75
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
76
+{
77
+ if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL,
78
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
79
+ return false;
80
+ }
81
+
82
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg);
83
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
84
+
85
+ /* "Tail call" to the helper, with the return address back inline. */
86
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr);
87
+ tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld
88
+ : helper_unaligned_st));
89
+ return true;
90
+}
91
+
92
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
93
+{
94
+ return tcg_out_fail_alignment(s, l);
95
+}
96
+
97
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
98
+{
99
+ return tcg_out_fail_alignment(s, l);
100
+}
101
+
102
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
103
TCGReg *index_reg, tcg_target_long *disp)
104
{
105
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
106
#else
107
TCGReg index_reg;
108
tcg_target_long disp;
109
+ unsigned a_bits = get_alignment_bits(opc);
110
111
+ if (a_bits) {
112
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
113
+ }
114
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
115
tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
116
#endif
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
118
#else
119
TCGReg index_reg;
120
tcg_target_long disp;
121
+ unsigned a_bits = get_alignment_bits(opc);
122
123
+ if (a_bits) {
124
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
125
+ }
126
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
127
tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
128
#endif
129
--
130
2.25.1
131
132
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/tci.c | 20 ++++++++++++++------
4
1 file changed, 14 insertions(+), 6 deletions(-)
5
1
6
diff --git a/tcg/tci.c b/tcg/tci.c
7
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tci.c
9
+++ b/tcg/tci.c
10
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
11
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
12
MemOpIdx oi, const void *tb_ptr)
13
{
14
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
15
+ MemOp mop = get_memop(oi);
16
uintptr_t ra = (uintptr_t)tb_ptr;
17
18
#ifdef CONFIG_SOFTMMU
19
- switch (mop) {
20
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
21
case MO_UB:
22
return helper_ret_ldub_mmu(env, taddr, oi, ra);
23
case MO_SB:
24
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
25
}
26
#else
27
void *haddr = g2h(env_cpu(env), taddr);
28
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
29
uint64_t ret;
30
31
set_helper_retaddr(ra);
32
- switch (mop) {
33
+ if (taddr & a_mask) {
34
+ helper_unaligned_ld(env, taddr);
35
+ }
36
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
37
case MO_UB:
38
ret = ldub_p(haddr);
39
break;
40
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
41
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
42
MemOpIdx oi, const void *tb_ptr)
43
{
44
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
45
+ MemOp mop = get_memop(oi);
46
uintptr_t ra = (uintptr_t)tb_ptr;
47
48
#ifdef CONFIG_SOFTMMU
49
- switch (mop) {
50
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
51
case MO_UB:
52
helper_ret_stb_mmu(env, taddr, val, oi, ra);
53
break;
54
@@ -XXX,XX +XXX,XX @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
55
}
56
#else
57
void *haddr = g2h(env_cpu(env), taddr);
58
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
59
60
set_helper_retaddr(ra);
61
- switch (mop) {
62
+ if (taddr & a_mask) {
63
+ helper_unaligned_st(env, taddr);
64
+ }
65
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
66
case MO_UB:
67
stb_p(haddr, val);
68
break;
69
--
70
2.25.1
71
72
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/riscv/tcg-target.h | 2 --
4
tcg/riscv/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++++++++++--
5
2 files changed, 61 insertions(+), 4 deletions(-)
6
1
7
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/riscv/tcg-target.h
10
+++ b/tcg/riscv/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
12
13
#define TCG_TARGET_DEFAULT_MO (0)
14
15
-#ifdef CONFIG_SOFTMMU
16
#define TCG_TARGET_NEED_LDST_LABELS
17
-#endif
18
#define TCG_TARGET_NEED_POOL_LABELS
19
20
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
21
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/riscv/tcg-target.c.inc
24
+++ b/tcg/riscv/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@
26
* THE SOFTWARE.
27
*/
28
29
+#include "../tcg-ldst.c.inc"
30
#include "../tcg-pool.c.inc"
31
32
#ifdef CONFIG_DEBUG_TCG
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
34
*/
35
36
#if defined(CONFIG_SOFTMMU)
37
-#include "../tcg-ldst.c.inc"
38
-
39
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
40
* MemOpIdx oi, uintptr_t ra)
41
*/
42
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
43
tcg_out_goto(s, l->raddr);
44
return true;
45
}
46
+#else
47
+
48
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
49
+ unsigned a_bits)
50
+{
51
+ unsigned a_mask = (1 << a_bits) - 1;
52
+ TCGLabelQemuLdst *l = new_ldst_label(s);
53
+
54
+ l->is_ld = is_ld;
55
+ l->addrlo_reg = addr_reg;
56
+
57
+ /* We are expecting a_bits to max out at 7, so we can always use andi. */
58
+ tcg_debug_assert(a_bits < 12);
59
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
60
+
61
+ l->label_ptr[0] = s->code_ptr;
62
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
63
+
64
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
65
+}
66
+
67
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
68
+{
69
+ /* resolve label address */
70
+ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
71
+ return false;
72
+ }
73
+
74
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
75
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
76
+
77
+ /* tail call, with the return address back inline. */
78
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
79
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
80
+ : helper_unaligned_st), true);
81
+ return true;
82
+}
83
+
84
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
85
+{
86
+ return tcg_out_fail_alignment(s, l);
87
+}
88
+
89
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
90
+{
91
+ return tcg_out_fail_alignment(s, l);
92
+}
93
+
94
#endif /* CONFIG_SOFTMMU */
95
96
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
98
MemOp opc;
99
#if defined(CONFIG_SOFTMMU)
100
tcg_insn_unit *label_ptr[1];
101
+#else
102
+ unsigned a_bits;
103
#endif
104
TCGReg base = TCG_REG_TMP0;
105
106
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
107
tcg_out_ext32u(s, base, addr_regl);
108
addr_regl = base;
109
}
110
+ a_bits = get_alignment_bits(opc);
111
+ if (a_bits) {
112
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
113
+ }
114
if (guest_base != 0) {
115
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
116
}
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
118
MemOp opc;
119
#if defined(CONFIG_SOFTMMU)
120
tcg_insn_unit *label_ptr[1];
121
+#else
122
+ unsigned a_bits;
123
#endif
124
TCGReg base = TCG_REG_TMP0;
125
126
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
127
tcg_out_ext32u(s, base, addr_regl);
128
addr_regl = base;
129
}
130
+ a_bits = get_alignment_bits(opc);
131
+ if (a_bits) {
132
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
133
+ }
134
if (guest_base != 0) {
135
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
136
}
137
--
138
2.25.1
139
140
diff view generated by jsdifflib
Deleted patch
1
A mostly generic test for unaligned access raising SIGBUS.
2
1
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tests/tcg/multiarch/sigbus.c | 68 ++++++++++++++++++++++++++++++++++++
7
1 file changed, 68 insertions(+)
8
create mode 100644 tests/tcg/multiarch/sigbus.c
9
10
diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tests/tcg/multiarch/sigbus.c
15
@@ -XXX,XX +XXX,XX @@
16
+#define _GNU_SOURCE 1
17
+
18
+#include <assert.h>
19
+#include <stdlib.h>
20
+#include <signal.h>
21
+#include <endian.h>
22
+
23
+
24
+unsigned long long x = 0x8877665544332211ull;
25
+void * volatile p = (void *)&x + 1;
26
+
27
+void sigbus(int sig, siginfo_t *info, void *uc)
28
+{
29
+ assert(sig == SIGBUS);
30
+ assert(info->si_signo == SIGBUS);
31
+#ifdef BUS_ADRALN
32
+ assert(info->si_code == BUS_ADRALN);
33
+#endif
34
+ assert(info->si_addr == p);
35
+ exit(EXIT_SUCCESS);
36
+}
37
+
38
+int main()
39
+{
40
+ struct sigaction sa = {
41
+ .sa_sigaction = sigbus,
42
+ .sa_flags = SA_SIGINFO
43
+ };
44
+ int allow_fail = 0;
45
+ int tmp;
46
+
47
+ tmp = sigaction(SIGBUS, &sa, NULL);
48
+ assert(tmp == 0);
49
+
50
+ /*
51
+ * Select an operation that's likely to enforce alignment.
52
+ * On many guests that support unaligned accesses by default,
53
+ * this is often an atomic operation.
54
+ */
55
+#if defined(__aarch64__)
56
+ asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
57
+#elif defined(__alpha__)
58
+ asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
59
+#elif defined(__arm__)
60
+ asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
61
+#elif defined(__powerpc__)
62
+ asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
63
+#elif defined(__riscv_atomic)
64
+ asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
65
+#else
66
+ /* No insn known to fault unaligned -- try for a straight load. */
67
+ allow_fail = 1;
68
+ tmp = *(volatile int *)p;
69
+#endif
70
+
71
+ assert(allow_fail);
72
+
73
+ /*
74
+ * We didn't see a signal.
75
+ * We might as well validate the unaligned load worked.
76
+ */
77
+ if (BYTE_ORDER == LITTLE_ENDIAN) {
78
+ assert(tmp == 0x55443322);
79
+ } else {
80
+ assert(tmp == 0x77665544);
81
+ }
82
+ return EXIT_SUCCESS;
83
+}
84
--
85
2.25.1
86
87
diff view generated by jsdifflib