1
The following changes since commit e93ded1bf6c94ab95015b33e188bc8b0b0c32670:
1
The following changes since commit a9fe9e191b4305b88c356a1ed9ac3baf89eb18aa:
2
2
3
Merge tag 'testing-pull-request-2022-08-30' of https://gitlab.com/thuth/qemu into staging (2022-08-31 18:19:03 -0400)
3
Merge tag 'pull-riscv-to-apply-20230505-1' of https://github.com/alistair23/qemu into staging (2023-05-05 09:25:13 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220901
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230505
8
8
9
for you to fetch changes up to 20011be2e30b8aa8ef1fc258485f00c688703deb:
9
for you to fetch changes up to 35a0bd63b458f30389b6bc6b7471c1665fe7b9d8:
10
10
11
target/riscv: Make translator stop before the end of a page (2022-09-01 07:43:08 +0100)
11
tcg: Widen helper_*_st[bw]_mmu val arguments (2023-05-05 17:21:03 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Respect PROT_EXEC in user-only mode.
14
softfloat: Fix the incorrect computation in float32_exp2
15
Fix s390x, i386 and riscv for translations crossing a page.
15
tcg: Remove compatability helpers for qemu ld/st
16
target/alpha: Remove TARGET_ALIGNED_ONLY
17
target/hppa: Remove TARGET_ALIGNED_ONLY
18
target/sparc: Remove TARGET_ALIGNED_ONLY
19
tcg: Cleanups preparing to unify calls to qemu_ld/st helpers
16
20
17
----------------------------------------------------------------
21
----------------------------------------------------------------
18
Ilya Leoshkevich (4):
22
Richard Henderson (41):
19
linux-user: Clear translations on mprotect()
23
target/avr: Finish conversion to tcg_gen_qemu_{ld,st}_*
20
accel/tcg: Introduce is_same_page()
24
target/cris: Finish conversion to tcg_gen_qemu_{ld,st}_*
21
target/s390x: Make translator stop before the end of a page
25
target/Hexagon: Finish conversion to tcg_gen_qemu_{ld, st}_*
22
target/i386: Make translator stop before the end of a page
26
target/m68k: Finish conversion to tcg_gen_qemu_{ld,st}_*
27
target/mips: Finish conversion to tcg_gen_qemu_{ld,st}_*
28
target/s390x: Finish conversion to tcg_gen_qemu_{ld, st}_*
29
target/sparc: Finish conversion to tcg_gen_qemu_{ld, st}_*
30
target/xtensa: Finish conversion to tcg_gen_qemu_{ld, st}_*
31
tcg: Remove compatability helpers for qemu ld/st
32
target/alpha: Use MO_ALIGN for system UNALIGN()
33
target/alpha: Use MO_ALIGN where required
34
target/alpha: Remove TARGET_ALIGNED_ONLY
35
target/hppa: Use MO_ALIGN for system UNALIGN()
36
target/hppa: Remove TARGET_ALIGNED_ONLY
37
target/sparc: Use MO_ALIGN where required
38
target/sparc: Use cpu_ld*_code_mmu
39
target/sparc: Remove TARGET_ALIGNED_ONLY
40
tcg/i386: Rationalize args to tcg_out_qemu_{ld,st}
41
tcg/i386: Generalize multi-part load overlap test
42
tcg/i386: Introduce HostAddress
43
tcg/i386: Drop r0+r1 local variables from tcg_out_tlb_load
44
tcg/i386: Introduce tcg_out_testi
45
tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st}
46
tcg/aarch64: Introduce HostAddress
47
tcg/arm: Rationalize args to tcg_out_qemu_{ld,st}
48
tcg/arm: Introduce HostAddress
49
tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld,st}
50
tcg/loongarch64: Introduce HostAddress
51
tcg/mips: Rationalize args to tcg_out_qemu_{ld,st}
52
tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st}
53
tcg/ppc: Introduce HostAddress
54
tcg/riscv: Require TCG_TARGET_REG_BITS == 64
55
tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st}
56
tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st}
57
tcg/s390x: Introduce HostAddress
58
tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return
59
tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}
60
tcg: Move TCGLabelQemuLdst to tcg.c
61
tcg: Replace REG_P with arg_loc_reg_p
62
tcg: Introduce arg_slot_stk_ofs
63
tcg: Widen helper_*_st[bw]_mmu val arguments
23
64
24
Richard Henderson (16):
65
Shivaprasad G Bhat (1):
25
linux-user/arm: Mark the commpage executable
66
softfloat: Fix the incorrect computation in float32_exp2
26
linux-user/hppa: Allocate page zero as a commpage
27
linux-user/x86_64: Allocate vsyscall page as a commpage
28
linux-user: Honor PT_GNU_STACK
29
tests/tcg/i386: Move smc_code2 to an executable section
30
accel/tcg: Properly implement get_page_addr_code for user-only
31
accel/tcg: Unlock mmap_lock after longjmp
32
accel/tcg: Make tb_htable_lookup static
33
accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
34
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
35
accel/tcg: Document the faulting lookup in tb_lookup_cmp
36
accel/tcg: Remove translator_ldsw
37
accel/tcg: Add pc and host_pc params to gen_intermediate_code
38
accel/tcg: Add fast path for translator_ld*
39
target/riscv: Add MAX_INSN_LEN and insn_len
40
target/riscv: Make translator stop before the end of a page
41
67
42
include/elf.h | 1 +
68
configs/targets/alpha-linux-user.mak | 1 -
43
include/exec/cpu-common.h | 1 +
69
configs/targets/alpha-softmmu.mak | 1 -
44
include/exec/exec-all.h | 89 ++++++++----------------
70
configs/targets/hppa-linux-user.mak | 1 -
45
include/exec/translator.h | 96 ++++++++++++++++---------
71
configs/targets/hppa-softmmu.mak | 1 -
46
linux-user/arm/target_cpu.h | 4 +-
72
configs/targets/sparc-linux-user.mak | 1 -
47
linux-user/qemu.h | 1 +
73
configs/targets/sparc-softmmu.mak | 1 -
48
accel/tcg/cpu-exec.c | 143 ++++++++++++++++++++------------------
74
configs/targets/sparc32plus-linux-user.mak | 1 -
49
accel/tcg/cputlb.c | 93 +++++++------------------
75
configs/targets/sparc64-linux-user.mak | 1 -
50
accel/tcg/translate-all.c | 29 ++++----
76
configs/targets/sparc64-softmmu.mak | 1 -
51
accel/tcg/translator.c | 135 ++++++++++++++++++++++++++---------
77
include/tcg/tcg-ldst.h | 10 +-
52
accel/tcg/user-exec.c | 17 ++++-
78
include/tcg/tcg-op.h | 55 -----
53
linux-user/elfload.c | 82 ++++++++++++++++++++--
79
target/hexagon/macros.h | 14 +-
54
linux-user/mmap.c | 6 +-
80
tcg/riscv/tcg-target-con-set.h | 8 -
55
softmmu/physmem.c | 12 ++++
81
tcg/riscv/tcg-target.h | 22 +-
56
target/alpha/translate.c | 5 +-
82
tcg/tcg-internal.h | 4 -
57
target/arm/translate.c | 5 +-
83
accel/tcg/cputlb.c | 6 +-
58
target/avr/translate.c | 5 +-
84
fpu/softfloat.c | 2 +-
59
target/cris/translate.c | 5 +-
85
target/alpha/translate.c | 38 +--
60
target/hexagon/translate.c | 6 +-
86
target/avr/translate.c | 16 +-
61
target/hppa/translate.c | 5 +-
87
target/hexagon/genptr.c | 8 +-
62
target/i386/tcg/translate.c | 71 +++++++++++--------
88
target/hexagon/idef-parser/parser-helpers.c | 28 +--
63
target/loongarch/translate.c | 6 +-
89
target/hexagon/translate.c | 32 +--
64
target/m68k/translate.c | 5 +-
90
target/hppa/translate.c | 2 +-
65
target/microblaze/translate.c | 5 +-
91
target/m68k/translate.c | 76 ++----
66
target/mips/tcg/translate.c | 5 +-
92
target/mips/tcg/translate.c | 8 +-
67
target/nios2/translate.c | 5 +-
93
target/s390x/tcg/translate.c | 152 ++++++------
68
target/openrisc/translate.c | 6 +-
94
target/sparc/ldst_helper.c | 10 +-
69
target/ppc/translate.c | 5 +-
95
target/sparc/translate.c | 85 ++++---
70
target/riscv/translate.c | 32 +++++++--
96
target/xtensa/translate.c | 4 +-
71
target/rx/translate.c | 5 +-
97
tcg/tcg.c | 58 +++--
72
target/s390x/tcg/translate.c | 20 ++++--
98
target/cris/translate_v10.c.inc | 18 +-
73
target/sh4/translate.c | 5 +-
99
target/mips/tcg/nanomips_translate.c.inc | 2 +-
74
target/sparc/translate.c | 5 +-
100
tcg/aarch64/tcg-target.c.inc | 108 ++++++---
75
target/tricore/translate.c | 6 +-
101
tcg/arm/tcg-target.c.inc | 357 +++++++++++++---------------
76
target/xtensa/translate.c | 6 +-
102
tcg/i386/tcg-target.c.inc | 345 ++++++++++++++-------------
77
tests/tcg/i386/test-i386.c | 2 +-
103
tcg/loongarch64/tcg-target.c.inc | 135 +++++------
78
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++
104
tcg/mips/tcg-target.c.inc | 186 ++++++++-------
79
tests/tcg/s390x/noexec.c | 106 ++++++++++++++++++++++++++++
105
tcg/ppc/tcg-target.c.inc | 192 ++++++++-------
80
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++
106
tcg/riscv/tcg-target.c.inc | 268 ++++++---------------
81
tests/tcg/multiarch/noexec.c.inc | 139 ++++++++++++++++++++++++++++++++++++
107
tcg/s390x/tcg-target.c.inc | 131 +++++-----
82
tests/tcg/riscv64/Makefile.target | 1 +
108
tcg/sparc64/tcg-target.c.inc | 8 +-
83
tests/tcg/s390x/Makefile.target | 1 +
109
tcg/tcg-ldst.c.inc | 14 --
84
tests/tcg/x86_64/Makefile.target | 3 +-
110
42 files changed, 1120 insertions(+), 1291 deletions(-)
85
43 files changed, 966 insertions(+), 367 deletions(-)
86
create mode 100644 tests/tcg/riscv64/noexec.c
87
create mode 100644 tests/tcg/s390x/noexec.c
88
create mode 100644 tests/tcg/x86_64/noexec.c
89
create mode 100644 tests/tcg/multiarch/noexec.c.inc
diff view generated by jsdifflib
New patch
1
From: Shivaprasad G Bhat <sbhat@linux.ibm.com>
1
2
3
The float32_exp2 function is computing wrong exponent of 2.
4
5
For example, with the following set of values {0.1, 2.0, 2.0, -1.0},
6
the expected output would be {1.071773, 4.000000, 4.000000, 0.500000}.
7
Instead, the function is computing {1.119102, 3.382044, 3.382044, -0.191022}
8
9
Looking at the code, the float32_exp2() attempts to do this
10
11
2 3 4 5 n
12
x x x x x x x
13
e = 1 + --- + --- + --- + --- + --- + ... + --- + ...
14
1! 2! 3! 4! 5! n!
15
16
But because of the typo it ends up doing
17
18
x x x x x x x
19
e = 1 + --- + --- + --- + --- + --- + ... + --- + ...
20
1! 2! 3! 4! 5! n!
21
22
This is because instead of the xnp which holds the numerator, parts_muladd
23
is using the xp which is just 'x'. Commit '572c4d862ff2' refactored this
24
function, and mistakenly used xp instead of xnp.
25
26
Cc: qemu-stable@nongnu.org
27
Fixes: 572c4d862ff2 "softfloat: Convert float32_exp2 to FloatParts"
28
Partially-Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1623
29
Reported-By: Luca Barbato (https://gitlab.com/lu-zero)
30
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
31
Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
32
Message-Id: <168304110865.537992.13059030916325018670.stgit@localhost.localdomain>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
35
---
36
fpu/softfloat.c | 2 +-
37
1 file changed, 1 insertion(+), 1 deletion(-)
38
39
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/fpu/softfloat.c
42
+++ b/fpu/softfloat.c
43
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
44
float64_unpack_canonical(&rp, float64_one, status);
45
for (i = 0 ; i < 15 ; i++) {
46
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
47
- rp = *parts_muladd(&tp, &xp, &rp, 0, status);
48
+ rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
49
xnp = *parts_mul(&xnp, &xp, status);
50
}
51
52
--
53
2.34.1
diff view generated by jsdifflib
1
Pass these along to translator_loop -- pc may be used instead
1
Convert away from the old interface with the implicit
2
of tb->pc, and host_pc is currently unused. Adjust all targets
2
MemOp argument.
3
at one time.
4
3
5
Acked-by: Alistair Francis <alistair.francis@wdc.com>
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-2-richard.henderson@linaro.org>
9
---
7
---
10
include/exec/exec-all.h | 1 -
8
target/avr/translate.c | 16 ++++++++--------
11
include/exec/translator.h | 24 ++++++++++++++++++++----
9
1 file changed, 8 insertions(+), 8 deletions(-)
12
accel/tcg/translate-all.c | 6 ++++--
13
accel/tcg/translator.c | 9 +++++----
14
target/alpha/translate.c | 5 +++--
15
target/arm/translate.c | 5 +++--
16
target/avr/translate.c | 5 +++--
17
target/cris/translate.c | 5 +++--
18
target/hexagon/translate.c | 6 ++++--
19
target/hppa/translate.c | 5 +++--
20
target/i386/tcg/translate.c | 5 +++--
21
target/loongarch/translate.c | 6 ++++--
22
target/m68k/translate.c | 5 +++--
23
target/microblaze/translate.c | 5 +++--
24
target/mips/tcg/translate.c | 5 +++--
25
target/nios2/translate.c | 5 +++--
26
target/openrisc/translate.c | 6 ++++--
27
target/ppc/translate.c | 5 +++--
28
target/riscv/translate.c | 5 +++--
29
target/rx/translate.c | 5 +++--
30
target/s390x/tcg/translate.c | 5 +++--
31
target/sh4/translate.c | 5 +++--
32
target/sparc/translate.c | 5 +++--
33
target/tricore/translate.c | 6 ++++--
34
target/xtensa/translate.c | 6 ++++--
35
25 files changed, 97 insertions(+), 53 deletions(-)
36
10
37
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/exec/exec-all.h
40
+++ b/include/exec/exec-all.h
41
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
42
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
43
#endif
44
45
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
46
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
47
target_ulong *data);
48
49
diff --git a/include/exec/translator.h b/include/exec/translator.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/include/exec/translator.h
52
+++ b/include/exec/translator.h
53
@@ -XXX,XX +XXX,XX @@
54
#include "exec/translate-all.h"
55
#include "tcg/tcg.h"
56
57
+/**
58
+ * gen_intermediate_code
59
+ * @cpu: cpu context
60
+ * @tb: translation block
61
+ * @max_insns: max number of instructions to translate
62
+ * @pc: guest virtual program counter address
63
+ * @host_pc: host physical program counter address
64
+ *
65
+ * This function must be provided by the target, which should create
66
+ * the target-specific DisasContext, and then invoke translator_loop.
67
+ */
68
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
69
+ target_ulong pc, void *host_pc);
70
71
/**
72
* DisasJumpType:
73
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
74
75
/**
76
* translator_loop:
77
- * @ops: Target-specific operations.
78
- * @db: Disassembly context.
79
* @cpu: Target vCPU.
80
* @tb: Translation block.
81
* @max_insns: Maximum number of insns to translate.
82
+ * @pc: guest virtual program counter address
83
+ * @host_pc: host physical program counter address
84
+ * @ops: Target-specific operations.
85
+ * @db: Disassembly context.
86
*
87
* Generic translator loop.
88
*
89
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
90
* - When single-stepping is enabled (system-wide or on the current vCPU).
91
* - When too many instructions have been translated.
92
*/
93
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
94
- CPUState *cpu, TranslationBlock *tb, int max_insns);
95
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
96
+ target_ulong pc, void *host_pc,
97
+ const TranslatorOps *ops, DisasContextBase *db);
98
99
void translator_loop_temp_check(DisasContextBase *db);
100
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/accel/tcg/translate-all.c
104
+++ b/accel/tcg/translate-all.c
105
@@ -XXX,XX +XXX,XX @@
106
107
#include "exec/cputlb.h"
108
#include "exec/translate-all.h"
109
+#include "exec/translator.h"
110
#include "qemu/bitmap.h"
111
#include "qemu/qemu-print.h"
112
#include "qemu/timer.h"
113
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
114
TCGProfile *prof = &tcg_ctx->prof;
115
int64_t ti;
116
#endif
117
+ void *host_pc;
118
119
assert_memory_lock();
120
qemu_thread_jit_write();
121
122
- phys_pc = get_page_addr_code(env, pc);
123
+ phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
124
125
if (phys_pc == -1) {
126
/* Generate a one-shot TB with 1 insn in it */
127
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
128
tcg_func_start(tcg_ctx);
129
130
tcg_ctx->cpu = env_cpu(env);
131
- gen_intermediate_code(cpu, tb, max_insns);
132
+ gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
133
assert(tb->size != 0);
134
tcg_ctx->cpu = NULL;
135
max_insns = tb->icount;
136
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/accel/tcg/translator.c
139
+++ b/accel/tcg/translator.c
140
@@ -XXX,XX +XXX,XX @@ static inline void translator_page_protect(DisasContextBase *dcbase,
141
#endif
142
}
143
144
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
145
- CPUState *cpu, TranslationBlock *tb, int max_insns)
146
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
147
+ target_ulong pc, void *host_pc,
148
+ const TranslatorOps *ops, DisasContextBase *db)
149
{
150
uint32_t cflags = tb_cflags(tb);
151
bool plugin_enabled;
152
153
/* Initialize DisasContext */
154
db->tb = tb;
155
- db->pc_first = tb->pc;
156
- db->pc_next = db->pc_first;
157
+ db->pc_first = pc;
158
+ db->pc_next = pc;
159
db->is_jmp = DISAS_NEXT;
160
db->num_insns = 0;
161
db->max_insns = max_insns;
162
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/alpha/translate.c
165
+++ b/target/alpha/translate.c
166
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
167
.disas_log = alpha_tr_disas_log,
168
};
169
170
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
171
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
172
+ target_ulong pc, void *host_pc)
173
{
174
DisasContext dc;
175
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
176
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
177
}
178
179
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
180
diff --git a/target/arm/translate.c b/target/arm/translate.c
181
index XXXXXXX..XXXXXXX 100644
182
--- a/target/arm/translate.c
183
+++ b/target/arm/translate.c
184
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
185
};
186
187
/* generate intermediate code for basic block 'tb'. */
188
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
189
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
190
+ target_ulong pc, void *host_pc)
191
{
192
DisasContext dc = { };
193
const TranslatorOps *ops = &arm_translator_ops;
194
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
195
}
196
#endif
197
198
- translator_loop(ops, &dc.base, cpu, tb, max_insns);
199
+ translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
200
}
201
202
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
203
diff --git a/target/avr/translate.c b/target/avr/translate.c
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
204
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
205
--- a/target/avr/translate.c
13
--- a/target/avr/translate.c
206
+++ b/target/avr/translate.c
14
+++ b/target/avr/translate.c
207
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
15
@@ -XXX,XX +XXX,XX @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
208
.disas_log = avr_tr_disas_log,
16
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
209
};
17
gen_helper_fullwr(cpu_env, data, addr);
210
18
} else {
211
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
19
- tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
212
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
20
+ tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
213
+ target_ulong pc, void *host_pc)
21
}
214
{
215
DisasContext dc = { };
216
- translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
217
+ translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
218
}
22
}
219
23
220
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
24
@@ -XXX,XX +XXX,XX @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
221
diff --git a/target/cris/translate.c b/target/cris/translate.c
25
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
222
index XXXXXXX..XXXXXXX 100644
26
gen_helper_fullrd(data, cpu_env, addr);
223
--- a/target/cris/translate.c
27
} else {
224
+++ b/target/cris/translate.c
28
- tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps cris_tr_ops = {
29
+ tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
226
.disas_log = cris_tr_disas_log,
30
}
227
};
228
229
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
230
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
231
+ target_ulong pc, void *host_pc)
232
{
233
DisasContext dc;
234
- translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
235
+ translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
236
}
31
}
237
32
238
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
33
@@ -XXX,XX +XXX,XX @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
239
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
34
240
index XXXXXXX..XXXXXXX 100644
35
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
241
--- a/target/hexagon/translate.c
36
tcg_gen_or_tl(addr, addr, L);
242
+++ b/target/hexagon/translate.c
37
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
38
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
244
.disas_log = hexagon_tr_disas_log,
39
return true;
245
};
246
247
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
248
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
249
+ target_ulong pc, void *host_pc)
250
{
251
DisasContext ctx;
252
253
- translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
254
+ translator_loop(cs, tb, max_insns, pc, host_pc,
255
+ &hexagon_tr_ops, &ctx.base);
256
}
40
}
257
41
258
#define NAME_LEN 64
42
@@ -XXX,XX +XXX,XX @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
259
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
43
260
index XXXXXXX..XXXXXXX 100644
44
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
261
--- a/target/hppa/translate.c
45
tcg_gen_or_tl(addr, addr, L);
262
+++ b/target/hppa/translate.c
46
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
263
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
47
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
264
.disas_log = hppa_tr_disas_log,
48
return true;
265
};
266
267
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
268
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
269
+ target_ulong pc, void *host_pc)
270
{
271
DisasContext ctx;
272
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
273
+ translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
274
}
49
}
275
50
276
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
277
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
52
278
index XXXXXXX..XXXXXXX 100644
53
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
279
--- a/target/i386/tcg/translate.c
54
tcg_gen_or_tl(addr, addr, L);
280
+++ b/target/i386/tcg/translate.c
55
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
281
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
56
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
282
};
57
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
283
58
tcg_gen_andi_tl(L, addr, 0xff);
284
/* generate intermediate code for basic block 'tb'. */
59
tcg_gen_shri_tl(addr, addr, 8);
285
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
60
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
286
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
61
TCGv Rd = cpu_r[0];
287
+ target_ulong pc, void *host_pc)
62
TCGv addr = gen_get_zaddr();
288
{
63
289
DisasContext dc;
64
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
290
65
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
291
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
66
return true;
292
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
293
}
67
}
294
68
295
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
69
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
296
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
70
TCGv Rd = cpu_r[a->rd];
297
index XXXXXXX..XXXXXXX 100644
71
TCGv addr = gen_get_zaddr();
298
--- a/target/loongarch/translate.c
72
299
+++ b/target/loongarch/translate.c
73
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
300
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
74
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
301
.disas_log = loongarch_tr_disas_log,
75
return true;
302
};
303
304
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
305
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
306
+ target_ulong pc, void *host_pc)
307
{
308
DisasContext ctx;
309
310
- translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
311
+ translator_loop(cs, tb, max_insns, pc, host_pc,
312
+ &loongarch_tr_ops, &ctx.base);
313
}
76
}
314
77
315
void loongarch_translate_init(void)
78
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
316
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
79
TCGv Rd = cpu_r[a->rd];
317
index XXXXXXX..XXXXXXX 100644
80
TCGv addr = gen_get_zaddr();
318
--- a/target/m68k/translate.c
81
319
+++ b/target/m68k/translate.c
82
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
320
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
83
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
321
.disas_log = m68k_tr_disas_log,
84
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
322
};
85
gen_set_zaddr(addr);
323
86
return true;
324
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
325
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
326
+ target_ulong pc, void *host_pc)
327
{
328
DisasContext dc;
329
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
330
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
331
}
332
333
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
334
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
335
index XXXXXXX..XXXXXXX 100644
336
--- a/target/microblaze/translate.c
337
+++ b/target/microblaze/translate.c
338
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
339
.disas_log = mb_tr_disas_log,
340
};
341
342
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
343
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
344
+ target_ulong pc, void *host_pc)
345
{
346
DisasContext dc;
347
- translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
348
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
349
}
350
351
void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
352
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
353
index XXXXXXX..XXXXXXX 100644
354
--- a/target/mips/tcg/translate.c
355
+++ b/target/mips/tcg/translate.c
356
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
357
.disas_log = mips_tr_disas_log,
358
};
359
360
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
361
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
362
+ target_ulong pc, void *host_pc)
363
{
364
DisasContext ctx;
365
366
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
367
+ translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base);
368
}
369
370
void mips_tcg_init(void)
371
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
372
index XXXXXXX..XXXXXXX 100644
373
--- a/target/nios2/translate.c
374
+++ b/target/nios2/translate.c
375
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps nios2_tr_ops = {
376
.disas_log = nios2_tr_disas_log,
377
};
378
379
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
380
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
381
+ target_ulong pc, void *host_pc)
382
{
383
DisasContext dc;
384
- translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
385
+ translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
386
}
387
388
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
389
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/target/openrisc/translate.c
392
+++ b/target/openrisc/translate.c
393
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
394
.disas_log = openrisc_tr_disas_log,
395
};
396
397
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
398
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
399
+ target_ulong pc, void *host_pc)
400
{
401
DisasContext ctx;
402
403
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
404
+ translator_loop(cs, tb, max_insns, pc, host_pc,
405
+ &openrisc_tr_ops, &ctx.base);
406
}
407
408
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
409
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
410
index XXXXXXX..XXXXXXX 100644
411
--- a/target/ppc/translate.c
412
+++ b/target/ppc/translate.c
413
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
414
.disas_log = ppc_tr_disas_log,
415
};
416
417
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
418
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
419
+ target_ulong pc, void *host_pc)
420
{
421
DisasContext ctx;
422
423
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
424
+ translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
425
}
426
427
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
428
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/riscv/translate.c
431
+++ b/target/riscv/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
433
.disas_log = riscv_tr_disas_log,
434
};
435
436
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
437
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
438
+ target_ulong pc, void *host_pc)
439
{
440
DisasContext ctx;
441
442
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
443
+ translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
444
}
445
446
void riscv_translate_init(void)
447
diff --git a/target/rx/translate.c b/target/rx/translate.c
448
index XXXXXXX..XXXXXXX 100644
449
--- a/target/rx/translate.c
450
+++ b/target/rx/translate.c
451
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
452
.disas_log = rx_tr_disas_log,
453
};
454
455
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
456
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
457
+ target_ulong pc, void *host_pc)
458
{
459
DisasContext dc;
460
461
- translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
462
+ translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
463
}
464
465
void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
466
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
467
index XXXXXXX..XXXXXXX 100644
468
--- a/target/s390x/tcg/translate.c
469
+++ b/target/s390x/tcg/translate.c
470
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
471
.disas_log = s390x_tr_disas_log,
472
};
473
474
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
475
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
476
+ target_ulong pc, void *host_pc)
477
{
478
DisasContext dc;
479
480
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
481
+ translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
482
}
483
484
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
485
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
486
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/translate.c
488
+++ b/target/sh4/translate.c
489
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
490
.disas_log = sh4_tr_disas_log,
491
};
492
493
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
494
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
495
+ target_ulong pc, void *host_pc)
496
{
497
DisasContext ctx;
498
499
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
500
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
501
}
502
503
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
504
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
505
index XXXXXXX..XXXXXXX 100644
506
--- a/target/sparc/translate.c
507
+++ b/target/sparc/translate.c
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
509
.disas_log = sparc_tr_disas_log,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
513
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
514
+ target_ulong pc, void *host_pc)
515
{
516
DisasContext dc = {};
517
518
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
519
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
520
}
521
522
void sparc_tcg_init(void)
523
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
524
index XXXXXXX..XXXXXXX 100644
525
--- a/target/tricore/translate.c
526
+++ b/target/tricore/translate.c
527
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
528
};
529
530
531
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
532
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
533
+ target_ulong pc, void *host_pc)
534
{
535
DisasContext ctx;
536
- translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
537
+ translator_loop(cs, tb, max_insns, pc, host_pc,
538
+ &tricore_tr_ops, &ctx.base);
539
}
540
541
void
542
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
543
index XXXXXXX..XXXXXXX 100644
544
--- a/target/xtensa/translate.c
545
+++ b/target/xtensa/translate.c
546
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
547
.disas_log = xtensa_tr_disas_log,
548
};
549
550
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
551
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
552
+ target_ulong pc, void *host_pc)
553
{
554
DisasContext dc = {};
555
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
556
+ translator_loop(cpu, tb, max_insns, pc, host_pc,
557
+ &xtensa_translator_ops, &dc.base);
558
}
559
560
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
561
--
87
--
562
2.34.1
88
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument. In this case we can fold the calls
3
using the size bits of MemOp.
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Anton Johansson <anjo@rev.ng>
7
Message-Id: <20230502135741.1158035-3-richard.henderson@linaro.org>
8
---
9
target/cris/translate_v10.c.inc | 18 ++++--------------
10
1 file changed, 4 insertions(+), 14 deletions(-)
11
12
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate_v10.c.inc
15
+++ b/target/cris/translate_v10.c.inc
16
@@ -XXX,XX +XXX,XX @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
17
/* Store only if F flag isn't set */
18
tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
19
tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
20
- if (size == 1) {
21
- tcg_gen_qemu_st8(tval, taddr, mem_index);
22
- } else if (size == 2) {
23
- tcg_gen_qemu_st16(tval, taddr, mem_index);
24
- } else {
25
- tcg_gen_qemu_st32(tval, taddr, mem_index);
26
- }
27
+
28
+ tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE);
29
+
30
gen_set_label(l1);
31
tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
32
tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
33
@@ -XXX,XX +XXX,XX @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
34
return;
35
}
36
37
- if (size == 1) {
38
- tcg_gen_qemu_st8(val, addr, mem_index);
39
- } else if (size == 2) {
40
- tcg_gen_qemu_st16(val, addr, mem_index);
41
- } else {
42
- tcg_gen_qemu_st32(val, addr, mem_index);
43
- }
44
+ tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE);
45
}
46
47
48
--
49
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument. Importantly, this removes some incorrect
3
casts generated by idef-parser's gen_load().
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Tested-by: Taylor Simpson <tsimpson@quicinc.com>
7
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
8
Reviewed-by: Anton Johansson <anjo@rev.ng>
9
Message-Id: <20230502135741.1158035-4-richard.henderson@linaro.org>
10
---
11
target/hexagon/macros.h | 14 ++++-----
12
target/hexagon/genptr.c | 8 +++---
13
target/hexagon/idef-parser/parser-helpers.c | 28 +++++++++---------
14
target/hexagon/translate.c | 32 ++++++++++-----------
15
4 files changed, 40 insertions(+), 42 deletions(-)
16
17
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/hexagon/macros.h
20
+++ b/target/hexagon/macros.h
21
@@ -XXX,XX +XXX,XX @@
22
#define MEM_LOAD1s(DST, VA) \
23
do { \
24
CHECK_NOSHUF(VA, 1); \
25
- tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \
26
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \
27
} while (0)
28
#define MEM_LOAD1u(DST, VA) \
29
do { \
30
CHECK_NOSHUF(VA, 1); \
31
- tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \
32
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \
33
} while (0)
34
#define MEM_LOAD2s(DST, VA) \
35
do { \
36
CHECK_NOSHUF(VA, 2); \
37
- tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \
38
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \
39
} while (0)
40
#define MEM_LOAD2u(DST, VA) \
41
do { \
42
CHECK_NOSHUF(VA, 2); \
43
- tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \
44
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \
45
} while (0)
46
#define MEM_LOAD4s(DST, VA) \
47
do { \
48
CHECK_NOSHUF(VA, 4); \
49
- tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
50
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \
51
} while (0)
52
#define MEM_LOAD4u(DST, VA) \
53
do { \
54
CHECK_NOSHUF(VA, 4); \
55
- tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
56
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \
57
} while (0)
58
#define MEM_LOAD8u(DST, VA) \
59
do { \
60
CHECK_NOSHUF(VA, 8); \
61
- tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \
62
+ tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \
63
} while (0)
64
65
#define MEM_STORE1_FUNC(X) \
66
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/hexagon/genptr.c
69
+++ b/target/hexagon/genptr.c
70
@@ -XXX,XX +XXX,XX @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
71
72
static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
73
{
74
- tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
75
+ tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL);
76
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
77
tcg_gen_mov_tl(hex_llsc_val, dest);
78
}
79
80
static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
81
{
82
- tcg_gen_qemu_ld64(dest, vaddr, mem_index);
83
+ tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ);
84
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
85
tcg_gen_mov_i64(hex_llsc_val_i64, dest);
86
}
87
@@ -XXX,XX +XXX,XX @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
88
{
89
Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
90
CHECK_NOSHUF(EA, 8);
91
- tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx);
92
+ tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ);
93
}
94
95
static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
96
@@ -XXX,XX +XXX,XX @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
97
tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
98
}
99
for (int i = 0; i < sizeof(MMVector) / 8; i++) {
100
- tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
101
+ tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
102
tcg_gen_addi_tl(src, src, 8);
103
tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
104
}
105
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/hexagon/idef-parser/parser-helpers.c
108
+++ b/target/hexagon/idef-parser/parser-helpers.c
109
@@ -XXX,XX +XXX,XX @@ void gen_load_cancel(Context *c, YYLTYPE *locp)
110
void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
111
HexSignedness signedness, HexValue *ea, HexValue *dst)
112
{
113
- char size_suffix[4] = {0};
114
- const char *sign_suffix;
115
+ unsigned dst_bit_width;
116
+ unsigned src_bit_width;
117
+
118
/* Memop width is specified in the load macro */
119
assert_signedness(c, locp, signedness);
120
- sign_suffix = (width->imm.value > 4)
121
- ? ""
122
- : ((signedness == UNSIGNED) ? "u" : "s");
123
+
124
/* If dst is a variable, assert that is declared and load the type info */
125
if (dst->type == VARID) {
126
find_variable(c, locp, dst, dst);
127
}
128
129
- snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8);
130
+ src_bit_width = width->imm.value * 8;
131
+ dst_bit_width = MAX(dst->bit_width, 32);
132
+
133
/* Lookup the effective address EA */
134
find_variable(c, locp, ea, ea);
135
OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
136
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
137
OUT(c, locp, "process_store(ctx, 1);\n");
138
OUT(c, locp, "}\n");
139
- OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix);
140
+
141
+ OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width);
142
OUT(c, locp, "(");
143
- if (dst->bit_width > width->imm.value * 8) {
144
- /*
145
- * Cast to the correct TCG type if necessary, to avoid implict cast
146
- * warnings. This is needed when the width of the destination var is
147
- * larger than the size of the requested load.
148
- */
149
- OUT(c, locp, "(TCGv) ");
150
+ OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width);
151
+ if (signedness == SIGNED) {
152
+ OUT(c, locp, " | MO_SIGN");
153
}
154
- OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n");
155
+ OUT(c, locp, " | MO_TE);\n");
156
}
157
158
void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea,
159
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/hexagon/translate.c
162
+++ b/target/hexagon/translate.c
163
@@ -XXX,XX +XXX,XX @@ void process_store(DisasContext *ctx, int slot_num)
164
switch (ctx->store_width[slot_num]) {
165
case 1:
166
gen_check_store_width(ctx, slot_num);
167
- tcg_gen_qemu_st8(hex_store_val32[slot_num],
168
- hex_store_addr[slot_num],
169
- ctx->mem_idx);
170
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
171
+ hex_store_addr[slot_num],
172
+ ctx->mem_idx, MO_UB);
173
break;
174
case 2:
175
gen_check_store_width(ctx, slot_num);
176
- tcg_gen_qemu_st16(hex_store_val32[slot_num],
177
- hex_store_addr[slot_num],
178
- ctx->mem_idx);
179
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
180
+ hex_store_addr[slot_num],
181
+ ctx->mem_idx, MO_TEUW);
182
break;
183
case 4:
184
gen_check_store_width(ctx, slot_num);
185
- tcg_gen_qemu_st32(hex_store_val32[slot_num],
186
- hex_store_addr[slot_num],
187
- ctx->mem_idx);
188
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
189
+ hex_store_addr[slot_num],
190
+ ctx->mem_idx, MO_TEUL);
191
break;
192
case 8:
193
gen_check_store_width(ctx, slot_num);
194
- tcg_gen_qemu_st64(hex_store_val64[slot_num],
195
- hex_store_addr[slot_num],
196
- ctx->mem_idx);
197
+ tcg_gen_qemu_st_i64(hex_store_val64[slot_num],
198
+ hex_store_addr[slot_num],
199
+ ctx->mem_idx, MO_TEUQ);
200
break;
201
default:
202
{
203
@@ -XXX,XX +XXX,XX @@ static void process_dczeroa(DisasContext *ctx)
204
TCGv_i64 zero = tcg_constant_i64(0);
205
206
tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
207
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
208
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
209
tcg_gen_addi_tl(addr, addr, 8);
210
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
211
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
212
tcg_gen_addi_tl(addr, addr, 8);
213
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
214
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
215
tcg_gen_addi_tl(addr, addr, 8);
216
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
217
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
218
}
219
}
220
221
--
222
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-5-richard.henderson@linaro.org>
7
---
8
target/m68k/translate.c | 76 ++++++++++++++---------------------------
9
1 file changed, 25 insertions(+), 51 deletions(-)
10
11
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/m68k/translate.c
14
+++ b/target/m68k/translate.c
15
@@ -XXX,XX +XXX,XX @@ static inline void gen_addr_fault(DisasContext *s)
16
static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
17
int sign, int index)
18
{
19
- TCGv tmp;
20
- tmp = tcg_temp_new_i32();
21
- switch(opsize) {
22
+ TCGv tmp = tcg_temp_new_i32();
23
+
24
+ switch (opsize) {
25
case OS_BYTE:
26
- if (sign)
27
- tcg_gen_qemu_ld8s(tmp, addr, index);
28
- else
29
- tcg_gen_qemu_ld8u(tmp, addr, index);
30
- break;
31
case OS_WORD:
32
- if (sign)
33
- tcg_gen_qemu_ld16s(tmp, addr, index);
34
- else
35
- tcg_gen_qemu_ld16u(tmp, addr, index);
36
- break;
37
case OS_LONG:
38
- tcg_gen_qemu_ld32u(tmp, addr, index);
39
+ tcg_gen_qemu_ld_tl(tmp, addr, index,
40
+ opsize | (sign ? MO_SIGN : 0) | MO_TE);
41
break;
42
default:
43
g_assert_not_reached();
44
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
45
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
46
int index)
47
{
48
- switch(opsize) {
49
+ switch (opsize) {
50
case OS_BYTE:
51
- tcg_gen_qemu_st8(val, addr, index);
52
- break;
53
case OS_WORD:
54
- tcg_gen_qemu_st16(val, addr, index);
55
- break;
56
case OS_LONG:
57
- tcg_gen_qemu_st32(val, addr, index);
58
+ tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
59
break;
60
default:
61
g_assert_not_reached();
62
@@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
63
tmp = tcg_temp_new();
64
switch (opsize) {
65
case OS_BYTE:
66
- tcg_gen_qemu_ld8s(tmp, addr, index);
67
- gen_helper_exts32(cpu_env, fp, tmp);
68
- break;
69
case OS_WORD:
70
- tcg_gen_qemu_ld16s(tmp, addr, index);
71
- gen_helper_exts32(cpu_env, fp, tmp);
72
- break;
73
- case OS_LONG:
74
- tcg_gen_qemu_ld32u(tmp, addr, index);
75
+ tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
76
gen_helper_exts32(cpu_env, fp, tmp);
77
break;
78
case OS_SINGLE:
79
- tcg_gen_qemu_ld32u(tmp, addr, index);
80
+ tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
81
gen_helper_extf32(cpu_env, fp, tmp);
82
break;
83
case OS_DOUBLE:
84
- tcg_gen_qemu_ld64(t64, addr, index);
85
+ tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
86
gen_helper_extf64(cpu_env, fp, t64);
87
break;
88
case OS_EXTENDED:
89
@@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
90
gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
91
break;
92
}
93
- tcg_gen_qemu_ld32u(tmp, addr, index);
94
+ tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
95
tcg_gen_shri_i32(tmp, tmp, 16);
96
tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
97
tcg_gen_addi_i32(tmp, addr, 4);
98
- tcg_gen_qemu_ld64(t64, tmp, index);
99
+ tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
100
tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
101
break;
102
case OS_PACKED:
103
@@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
104
tmp = tcg_temp_new();
105
switch (opsize) {
106
case OS_BYTE:
107
- gen_helper_reds32(tmp, cpu_env, fp);
108
- tcg_gen_qemu_st8(tmp, addr, index);
109
- break;
110
case OS_WORD:
111
- gen_helper_reds32(tmp, cpu_env, fp);
112
- tcg_gen_qemu_st16(tmp, addr, index);
113
- break;
114
case OS_LONG:
115
gen_helper_reds32(tmp, cpu_env, fp);
116
- tcg_gen_qemu_st32(tmp, addr, index);
117
+ tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
118
break;
119
case OS_SINGLE:
120
gen_helper_redf32(tmp, cpu_env, fp);
121
- tcg_gen_qemu_st32(tmp, addr, index);
122
+ tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
123
break;
124
case OS_DOUBLE:
125
gen_helper_redf64(t64, cpu_env, fp);
126
- tcg_gen_qemu_st64(t64, addr, index);
127
+ tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
128
break;
129
case OS_EXTENDED:
130
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
131
@@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
132
}
133
tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
134
tcg_gen_shli_i32(tmp, tmp, 16);
135
- tcg_gen_qemu_st32(tmp, addr, index);
136
+ tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
137
tcg_gen_addi_i32(tmp, addr, 4);
138
tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
139
- tcg_gen_qemu_st64(t64, tmp, index);
140
+ tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
141
break;
142
case OS_PACKED:
143
/*
144
@@ -XXX,XX +XXX,XX @@ DISAS_INSN(movep)
145
if (insn & 0x80) {
146
for ( ; i > 0 ; i--) {
147
tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
148
- tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
149
+ tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
150
if (i > 1) {
151
tcg_gen_addi_i32(abuf, abuf, 2);
152
}
153
}
154
} else {
155
for ( ; i > 0 ; i--) {
156
- tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
157
+ tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
158
tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
159
if (i > 1) {
160
tcg_gen_addi_i32(abuf, abuf, 2);
161
@@ -XXX,XX +XXX,XX @@ static void m68k_copy_line(TCGv dst, TCGv src, int index)
162
t1 = tcg_temp_new_i64();
163
164
tcg_gen_andi_i32(addr, src, ~15);
165
- tcg_gen_qemu_ld64(t0, addr, index);
166
+ tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
167
tcg_gen_addi_i32(addr, addr, 8);
168
- tcg_gen_qemu_ld64(t1, addr, index);
169
+ tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
170
171
tcg_gen_andi_i32(addr, dst, ~15);
172
- tcg_gen_qemu_st64(t0, addr, index);
173
+ tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
174
tcg_gen_addi_i32(addr, addr, 8);
175
- tcg_gen_qemu_st64(t1, addr, index);
176
+ tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
177
}
178
179
DISAS_INSN(move16_reg)
180
@@ -XXX,XX +XXX,XX @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
181
182
tmp = tcg_temp_new();
183
gen_load_fcr(s, tmp, reg);
184
- tcg_gen_qemu_st32(tmp, addr, index);
185
+ tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
186
}
187
188
static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
189
@@ -XXX,XX +XXX,XX @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
190
TCGv tmp;
191
192
tmp = tcg_temp_new();
193
- tcg_gen_qemu_ld32u(tmp, addr, index);
194
+ tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
195
gen_store_fcr(s, tmp, reg);
196
}
197
198
--
199
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-6-richard.henderson@linaro.org>
7
---
8
target/mips/tcg/translate.c | 8 ++++----
9
target/mips/tcg/nanomips_translate.c.inc | 2 +-
10
2 files changed, 5 insertions(+), 5 deletions(-)
11
12
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/mips/tcg/translate.c
15
+++ b/target/mips/tcg/translate.c
16
@@ -XXX,XX +XXX,XX @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
17
18
/* load/store instructions. */
19
#ifdef CONFIG_USER_ONLY
20
-#define OP_LD_ATOMIC(insn, fname) \
21
+#define OP_LD_ATOMIC(insn, memop) \
22
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
23
DisasContext *ctx) \
24
{ \
25
TCGv t0 = tcg_temp_new(); \
26
tcg_gen_mov_tl(t0, arg1); \
27
- tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
28
+ tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \
29
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
30
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
31
}
32
@@ -XXX,XX +XXX,XX @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
33
gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \
34
}
35
#endif
36
-OP_LD_ATOMIC(ll, ld32s);
37
+OP_LD_ATOMIC(ll, MO_TESL);
38
#if defined(TARGET_MIPS64)
39
-OP_LD_ATOMIC(lld, ld64);
40
+OP_LD_ATOMIC(lld, MO_TEUQ);
41
#endif
42
#undef OP_LD_ATOMIC
43
44
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/mips/tcg/nanomips_translate.c.inc
47
+++ b/target/mips/tcg/nanomips_translate.c.inc
48
@@ -XXX,XX +XXX,XX @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
49
TCGv tmp2 = tcg_temp_new();
50
51
gen_base_offset_addr(ctx, taddr, base, offset);
52
- tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx);
53
+ tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ);
54
if (cpu_is_bigendian(ctx)) {
55
tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
56
} else {
57
--
58
2.34.1
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Convert away from the old interface with the implicit
2
MemOp argument.
2
3
3
Right now translator stops right *after* the end of a page, which
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20220817150506.592862-3-iii@linux.ibm.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Message-Id: <20230502135741.1158035-7-richard.henderson@linaro.org>
11
---
8
---
12
target/s390x/tcg/translate.c | 15 +++-
9
target/s390x/tcg/translate.c | 152 ++++++++++++++++-------------------
13
tests/tcg/s390x/noexec.c | 106 +++++++++++++++++++++++
10
1 file changed, 71 insertions(+), 81 deletions(-)
14
tests/tcg/multiarch/noexec.c.inc | 139 +++++++++++++++++++++++++++++++
15
tests/tcg/s390x/Makefile.target | 1 +
16
4 files changed, 257 insertions(+), 4 deletions(-)
17
create mode 100644 tests/tcg/s390x/noexec.c
18
create mode 100644 tests/tcg/multiarch/noexec.c.inc
19
11
20
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
12
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/target/s390x/tcg/translate.c
14
--- a/target/s390x/tcg/translate.c
23
+++ b/target/s390x/tcg/translate.c
15
+++ b/target/s390x/tcg/translate.c
24
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
16
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
25
dc->insn_start = tcg_last_op();
17
{
26
}
18
int l = get_field(s, l1);
27
19
TCGv_i32 vl;
28
+static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
20
+ MemOp mop;
29
+ uint64_t pc)
21
30
+{
22
switch (l + 1) {
31
+ uint64_t insn = ld_code2(env, s, pc);
23
case 1:
24
- tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
25
- tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
26
- break;
27
case 2:
28
- tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
29
- tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
30
- break;
31
case 4:
32
- tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
33
- tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
34
- break;
35
case 8:
36
- tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
37
- tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
38
- break;
39
+ mop = ctz32(l + 1) | MO_TE;
40
+ tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
41
+ tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
42
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
43
+ return DISAS_NEXT;
44
default:
45
vl = tcg_constant_i32(l);
46
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
47
set_cc_static(s);
48
return DISAS_NEXT;
49
}
50
- gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
51
- return DISAS_NEXT;
52
}
53
54
static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
55
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
56
TCGv_i32 t2 = tcg_temp_new_i32();
57
tcg_gen_extrl_i64_i32(t2, o->in1);
58
gen_helper_cvd(t1, t2);
59
- tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
60
+ tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
61
return DISAS_NEXT;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
65
switch (m3) {
66
case 0xf:
67
/* Effectively a 32-bit load. */
68
- tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
69
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
70
len = 32;
71
goto one_insert;
72
73
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
74
case 0x6:
75
case 0x3:
76
/* Effectively a 16-bit load. */
77
- tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
78
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
79
len = 16;
80
goto one_insert;
81
82
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
83
case 0x2:
84
case 0x1:
85
/* Effectively an 8-bit load. */
86
- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
87
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
88
len = 8;
89
goto one_insert;
90
91
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
92
ccm = 0;
93
while (m3) {
94
if (m3 & 0x8) {
95
- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
96
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
97
tcg_gen_addi_i64(o->in2, o->in2, 1);
98
tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
99
ccm |= 0xffull << pos;
100
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
101
102
static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
103
{
104
- tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
105
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
106
return DISAS_NEXT;
107
}
108
109
static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
110
{
111
- tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
112
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
113
return DISAS_NEXT;
114
}
115
116
static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
117
{
118
- tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
119
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
120
return DISAS_NEXT;
121
}
122
123
static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
124
{
125
- tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
126
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
127
return DISAS_NEXT;
128
}
129
130
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
131
static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
132
{
133
TCGLabel *lab = gen_new_label();
134
- tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
135
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
136
/* The value is stored even in case of trap. */
137
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
138
gen_trap(s);
139
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
140
static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
141
{
142
TCGLabel *lab = gen_new_label();
143
- tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
32
+
144
+
33
+ return pc + get_ilen((insn >> 8) & 0xff);
145
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
34
+}
146
/* The value is stored even in case of trap. */
35
+
147
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
36
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
148
gen_trap(s);
37
{
149
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
38
CPUS390XState *env = cs->env_ptr;
150
tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
39
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
151
MO_TEUQ | MO_ALIGN_8);
40
152
tcg_gen_addi_i64(o->in2, o->in2, 8);
41
dc->base.is_jmp = translate_one(env, dc);
153
- tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
42
if (dc->base.is_jmp == DISAS_NEXT) {
154
+ tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
43
- uint64_t page_start;
155
gen_helper_load_psw(cpu_env, t1, t2);
44
-
156
return DISAS_NORETURN;
45
- page_start = dc->base.pc_first & TARGET_PAGE_MASK;
157
}
46
- if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
158
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
47
+ if (!is_same_page(dcbase, dc->base.pc_next) ||
159
/* Only one register to read. */
48
+ !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next)) ||
160
t1 = tcg_temp_new_i64();
49
+ dc->ex_value) {
161
if (unlikely(r1 == r3)) {
50
dc->base.is_jmp = DISAS_TOO_MANY;
162
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
163
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
164
store_reg32_i64(r1, t1);
165
return DISAS_NEXT;
166
}
167
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
168
/* First load the values of the first and last registers to trigger
169
possible page faults. */
170
t2 = tcg_temp_new_i64();
171
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
172
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
173
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
174
- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
175
+ tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
176
store_reg32_i64(r1, t1);
177
store_reg32_i64(r3, t2);
178
179
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
180
while (r1 != r3) {
181
r1 = (r1 + 1) & 15;
182
tcg_gen_add_i64(o->in2, o->in2, t2);
183
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
184
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
185
store_reg32_i64(r1, t1);
186
}
187
return DISAS_NEXT;
188
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
189
/* Only one register to read. */
190
t1 = tcg_temp_new_i64();
191
if (unlikely(r1 == r3)) {
192
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
193
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
194
store_reg32h_i64(r1, t1);
195
return DISAS_NEXT;
196
}
197
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
198
/* First load the values of the first and last registers to trigger
199
possible page faults. */
200
t2 = tcg_temp_new_i64();
201
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
202
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
203
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
204
- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
205
+ tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
206
store_reg32h_i64(r1, t1);
207
store_reg32h_i64(r3, t2);
208
209
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
210
while (r1 != r3) {
211
r1 = (r1 + 1) & 15;
212
tcg_gen_add_i64(o->in2, o->in2, t2);
213
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
214
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
215
store_reg32h_i64(r1, t1);
216
}
217
return DISAS_NEXT;
218
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
219
220
/* Only one register to read. */
221
if (unlikely(r1 == r3)) {
222
- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
223
+ tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
224
return DISAS_NEXT;
225
}
226
227
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
228
possible page faults. */
229
t1 = tcg_temp_new_i64();
230
t2 = tcg_temp_new_i64();
231
- tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
232
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
233
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
234
- tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
235
+ tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
236
tcg_gen_mov_i64(regs[r1], t1);
237
238
/* Only two registers to read. */
239
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
240
while (r1 != r3) {
241
r1 = (r1 + 1) & 15;
242
tcg_gen_add_i64(o->in2, o->in2, t1);
243
- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
244
+ tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
245
}
246
return DISAS_NEXT;
247
}
248
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
249
a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
250
switch (s->insn->data) {
251
case 1: /* STOCG */
252
- tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
253
+ tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
254
break;
255
case 0: /* STOC */
256
- tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
257
+ tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
258
break;
259
case 2: /* STOCFH */
260
h = tcg_temp_new_i64();
261
tcg_gen_shri_i64(h, regs[r1], 32);
262
- tcg_gen_qemu_st32(h, a, get_mem_index(s));
263
+ tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
264
break;
265
default:
266
g_assert_not_reached();
267
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
268
gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
269
270
/* load the third operand into r3 before modifying anything */
271
- tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
272
+ tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
273
274
/* subtract CPU timer from first operand and store in GR0 */
275
gen_helper_stpt(tmp, cpu_env);
276
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
277
tcg_gen_shri_i64(c1, c1, 8);
278
tcg_gen_ori_i64(c2, c2, 0x10000);
279
tcg_gen_or_i64(c2, c2, todpr);
280
- tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
281
+ tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
282
tcg_gen_addi_i64(o->in2, o->in2, 8);
283
- tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
284
+ tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
285
/* ??? We don't implement clock states. */
286
gen_op_movi_cc(s, 0);
287
return DISAS_NEXT;
288
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
289
restart, we'll have the wrong SYSTEM MASK in place. */
290
t = tcg_temp_new_i64();
291
tcg_gen_shri_i64(t, psw_mask, 56);
292
- tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
293
+ tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
294
295
if (s->fields.op == 0xac) {
296
tcg_gen_andi_i64(psw_mask, psw_mask,
297
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
298
299
static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
300
{
301
- tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
302
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
303
return DISAS_NEXT;
304
}
305
306
static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
307
{
308
- tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
309
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
310
return DISAS_NEXT;
311
}
312
313
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
314
case 0xf:
315
/* Effectively a 32-bit store. */
316
tcg_gen_shri_i64(tmp, o->in1, pos);
317
- tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
318
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
319
break;
320
321
case 0xc:
322
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
323
case 0x3:
324
/* Effectively a 16-bit store. */
325
tcg_gen_shri_i64(tmp, o->in1, pos);
326
- tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
327
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
328
break;
329
330
case 0x8:
331
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
332
case 0x1:
333
/* Effectively an 8-bit store. */
334
tcg_gen_shri_i64(tmp, o->in1, pos);
335
- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
336
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
337
break;
338
339
default:
340
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
341
while (m3) {
342
if (m3 & 0x8) {
343
tcg_gen_shri_i64(tmp, o->in1, pos);
344
- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
345
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
346
tcg_gen_addi_i64(o->in2, o->in2, 1);
347
}
348
m3 = (m3 << 1) & 0xf;
349
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
350
TCGv_i64 tsize = tcg_constant_i64(size);
351
352
while (1) {
353
- if (size == 8) {
354
- tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
355
- } else {
356
- tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
357
- }
358
+ tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
359
+ size == 8 ? MO_TEUQ : MO_TEUL);
360
if (r1 == r3) {
361
break;
51
}
362
}
52
}
363
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
53
diff --git a/tests/tcg/s390x/noexec.c b/tests/tcg/s390x/noexec.c
364
54
new file mode 100644
365
while (1) {
55
index XXXXXXX..XXXXXXX
366
tcg_gen_shl_i64(t, regs[r1], t32);
56
--- /dev/null
367
- tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
57
+++ b/tests/tcg/s390x/noexec.c
368
+ tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
58
@@ -XXX,XX +XXX,XX @@
369
if (r1 == r3) {
59
+#include "../multiarch/noexec.c.inc"
370
break;
60
+
371
}
61
+static void *arch_mcontext_pc(const mcontext_t *ctx)
372
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
62
+{
373
63
+ return (void *)ctx->psw.addr;
374
l++;
64
+}
375
while (l >= 8) {
65
+
376
- tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
66
+static int arch_mcontext_arg(const mcontext_t *ctx)
377
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
67
+{
378
l -= 8;
68
+ return ctx->gregs[2];
379
if (l > 0) {
69
+}
380
tcg_gen_addi_i64(o->addr1, o->addr1, 8);
70
+
381
}
71
+static void arch_flush(void *p, int len)
382
}
72
+{
383
if (l >= 4) {
73
+}
384
- tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
74
+
385
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
75
+extern char noexec_1[];
386
l -= 4;
76
+extern char noexec_2[];
387
if (l > 0) {
77
+extern char noexec_end[];
388
tcg_gen_addi_i64(o->addr1, o->addr1, 4);
78
+
389
}
79
+asm("noexec_1:\n"
390
}
80
+ " lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */
391
if (l >= 2) {
81
+ "noexec_2:\n"
392
- tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
82
+ " lgfi %r2,2\n" /* %r2 is 0/1; set 2. */
393
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
83
+ " br %r14\n" /* return */
394
l -= 2;
84
+ "noexec_end:");
395
if (l > 0) {
85
+
396
tcg_gen_addi_i64(o->addr1, o->addr1, 2);
86
+extern char exrl_1[];
397
}
87
+extern char exrl_2[];
398
}
88
+extern char exrl_end[];
399
if (l) {
89
+
400
- tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
90
+asm("exrl_1:\n"
401
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
91
+ " exrl %r0, exrl_2\n"
402
}
92
+ " br %r14\n"
403
gen_op_movi_cc(s, 0);
93
+ "exrl_2:\n"
404
return DISAS_NEXT;
94
+ " lgfi %r2,2\n"
405
@@ -XXX,XX +XXX,XX @@ static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
95
+ "exrl_end:");
406
96
+
407
static void wout_m1_8(DisasContext *s, DisasOps *o)
97
+int main(void)
408
{
98
+{
409
- tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
99
+ struct noexec_test noexec_tests[] = {
410
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
100
+ {
411
}
101
+ .name = "fallthrough",
412
#define SPEC_wout_m1_8 0
102
+ .test_code = noexec_1,
413
103
+ .test_len = noexec_end - noexec_1,
414
static void wout_m1_16(DisasContext *s, DisasOps *o)
104
+ .page_ofs = noexec_1 - noexec_2,
415
{
105
+ .entry_ofs = noexec_1 - noexec_2,
416
- tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
106
+ .expected_si_ofs = 0,
417
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
107
+ .expected_pc_ofs = 0,
418
}
108
+ .expected_arg = 1,
419
#define SPEC_wout_m1_16 0
109
+ },
420
110
+ {
421
@@ -XXX,XX +XXX,XX @@ static void wout_m1_16a(DisasContext *s, DisasOps *o)
111
+ .name = "jump",
422
112
+ .test_code = noexec_1,
423
static void wout_m1_32(DisasContext *s, DisasOps *o)
113
+ .test_len = noexec_end - noexec_1,
424
{
114
+ .page_ofs = noexec_1 - noexec_2,
425
- tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
115
+ .entry_ofs = 0,
426
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
116
+ .expected_si_ofs = 0,
427
}
117
+ .expected_pc_ofs = 0,
428
#define SPEC_wout_m1_32 0
118
+ .expected_arg = 0,
429
119
+ },
430
@@ -XXX,XX +XXX,XX @@ static void wout_m1_32a(DisasContext *s, DisasOps *o)
120
+ {
431
121
+ .name = "exrl",
432
static void wout_m1_64(DisasContext *s, DisasOps *o)
122
+ .test_code = exrl_1,
433
{
123
+ .test_len = exrl_end - exrl_1,
434
- tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
124
+ .page_ofs = exrl_1 - exrl_2,
435
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
125
+ .entry_ofs = exrl_1 - exrl_2,
436
}
126
+ .expected_si_ofs = 0,
437
#define SPEC_wout_m1_64 0
127
+ .expected_pc_ofs = exrl_1 - exrl_2,
438
128
+ .expected_arg = 0,
439
@@ -XXX,XX +XXX,XX @@ static void wout_m1_64a(DisasContext *s, DisasOps *o)
129
+ },
440
130
+ {
441
static void wout_m2_32(DisasContext *s, DisasOps *o)
131
+ .name = "fallthrough [cross]",
442
{
132
+ .test_code = noexec_1,
443
- tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
133
+ .test_len = noexec_end - noexec_1,
444
+ tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
134
+ .page_ofs = noexec_1 - noexec_2 - 2,
445
}
135
+ .entry_ofs = noexec_1 - noexec_2 - 2,
446
#define SPEC_wout_m2_32 0
136
+ .expected_si_ofs = 0,
447
137
+ .expected_pc_ofs = -2,
448
@@ -XXX,XX +XXX,XX @@ static void in1_m1_8u(DisasContext *s, DisasOps *o)
138
+ .expected_arg = 1,
449
{
139
+ },
450
in1_la1(s, o);
140
+ {
451
o->in1 = tcg_temp_new_i64();
141
+ .name = "jump [cross]",
452
- tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
142
+ .test_code = noexec_1,
453
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
143
+ .test_len = noexec_end - noexec_1,
454
}
144
+ .page_ofs = noexec_1 - noexec_2 - 2,
455
#define SPEC_in1_m1_8u 0
145
+ .entry_ofs = -2,
456
146
+ .expected_si_ofs = 0,
457
@@ -XXX,XX +XXX,XX @@ static void in1_m1_16s(DisasContext *s, DisasOps *o)
147
+ .expected_pc_ofs = -2,
458
{
148
+ .expected_arg = 0,
459
in1_la1(s, o);
149
+ },
460
o->in1 = tcg_temp_new_i64();
150
+ {
461
- tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
151
+ .name = "exrl [cross]",
462
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
152
+ .test_code = exrl_1,
463
}
153
+ .test_len = exrl_end - exrl_1,
464
#define SPEC_in1_m1_16s 0
154
+ .page_ofs = exrl_1 - exrl_2 - 2,
465
155
+ .entry_ofs = exrl_1 - exrl_2 - 2,
466
@@ -XXX,XX +XXX,XX @@ static void in1_m1_16u(DisasContext *s, DisasOps *o)
156
+ .expected_si_ofs = 0,
467
{
157
+ .expected_pc_ofs = exrl_1 - exrl_2 - 2,
468
in1_la1(s, o);
158
+ .expected_arg = 0,
469
o->in1 = tcg_temp_new_i64();
159
+ },
470
- tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
160
+ };
471
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
161
+
472
}
162
+ return test_noexec(noexec_tests,
473
#define SPEC_in1_m1_16u 0
163
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
474
164
+}
475
@@ -XXX,XX +XXX,XX @@ static void in1_m1_32s(DisasContext *s, DisasOps *o)
165
diff --git a/tests/tcg/multiarch/noexec.c.inc b/tests/tcg/multiarch/noexec.c.inc
476
{
166
new file mode 100644
477
in1_la1(s, o);
167
index XXXXXXX..XXXXXXX
478
o->in1 = tcg_temp_new_i64();
168
--- /dev/null
479
- tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
169
+++ b/tests/tcg/multiarch/noexec.c.inc
480
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
170
@@ -XXX,XX +XXX,XX @@
481
}
171
+/*
482
#define SPEC_in1_m1_32s 0
172
+ * Common code for arch-specific MMU_INST_FETCH fault testing.
483
173
+ */
484
@@ -XXX,XX +XXX,XX @@ static void in1_m1_32u(DisasContext *s, DisasOps *o)
174
+
485
{
175
+#define _GNU_SOURCE
486
in1_la1(s, o);
176
+
487
o->in1 = tcg_temp_new_i64();
177
+#include <assert.h>
488
- tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
178
+#include <signal.h>
489
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
179
+#include <stdio.h>
490
}
180
+#include <stdlib.h>
491
#define SPEC_in1_m1_32u 0
181
+#include <string.h>
492
182
+#include <errno.h>
493
@@ -XXX,XX +XXX,XX @@ static void in1_m1_64(DisasContext *s, DisasOps *o)
183
+#include <unistd.h>
494
{
184
+#include <sys/mman.h>
495
in1_la1(s, o);
185
+#include <sys/ucontext.h>
496
o->in1 = tcg_temp_new_i64();
186
+
497
- tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
187
+/* Forward declarations. */
498
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
188
+
499
}
189
+static void *arch_mcontext_pc(const mcontext_t *ctx);
500
#define SPEC_in1_m1_64 0
190
+static int arch_mcontext_arg(const mcontext_t *ctx);
501
191
+static void arch_flush(void *p, int len);
502
@@ -XXX,XX +XXX,XX @@ static void in2_sh(DisasContext *s, DisasOps *o)
192
+
503
static void in2_m2_8u(DisasContext *s, DisasOps *o)
193
+/* Testing infrastructure. */
504
{
194
+
505
in2_a2(s, o);
195
+struct noexec_test {
506
- tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
196
+ const char *name;
507
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
197
+ const char *test_code;
508
}
198
+ int test_len;
509
#define SPEC_in2_m2_8u 0
199
+ int page_ofs;
510
200
+ int entry_ofs;
511
static void in2_m2_16s(DisasContext *s, DisasOps *o)
201
+ int expected_si_ofs;
512
{
202
+ int expected_pc_ofs;
513
in2_a2(s, o);
203
+ int expected_arg;
514
- tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
204
+};
515
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
205
+
516
}
206
+static void *page_base;
517
#define SPEC_in2_m2_16s 0
207
+static int page_size;
518
208
+static const struct noexec_test *current_noexec_test;
519
static void in2_m2_16u(DisasContext *s, DisasOps *o)
209
+
520
{
210
+static void handle_err(const char *syscall)
521
in2_a2(s, o);
211
+{
522
- tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
212
+ printf("[ FAILED ] %s: %s\n", syscall, strerror(errno));
523
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
213
+ exit(EXIT_FAILURE);
524
}
214
+}
525
#define SPEC_in2_m2_16u 0
215
+
526
216
+static void handle_segv(int sig, siginfo_t *info, void *ucontext)
527
static void in2_m2_32s(DisasContext *s, DisasOps *o)
217
+{
528
{
218
+ const struct noexec_test *test = current_noexec_test;
529
in2_a2(s, o);
219
+ const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext;
530
- tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
220
+ void *expected_si;
531
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
221
+ void *expected_pc;
532
}
222
+ void *pc;
533
#define SPEC_in2_m2_32s 0
223
+ int arg;
534
224
+
535
static void in2_m2_32u(DisasContext *s, DisasOps *o)
225
+ if (test == NULL) {
536
{
226
+ printf("[ FAILED ] unexpected SEGV\n");
537
in2_a2(s, o);
227
+ exit(EXIT_FAILURE);
538
- tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
228
+ }
539
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
229
+ current_noexec_test = NULL;
540
}
230
+
541
#define SPEC_in2_m2_32u 0
231
+ expected_si = page_base + test->expected_si_ofs;
542
232
+ if (info->si_addr != expected_si) {
543
@@ -XXX,XX +XXX,XX @@ static void in2_m2_32ua(DisasContext *s, DisasOps *o)
233
+ printf("[ FAILED ] wrong si_addr (%p != %p)\n",
544
static void in2_m2_64(DisasContext *s, DisasOps *o)
234
+ info->si_addr, expected_si);
545
{
235
+ exit(EXIT_FAILURE);
546
in2_a2(s, o);
236
+ }
547
- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
237
+
548
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
238
+ pc = arch_mcontext_pc(mc);
549
}
239
+ expected_pc = page_base + test->expected_pc_ofs;
550
#define SPEC_in2_m2_64 0
240
+ if (pc != expected_pc) {
551
241
+ printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc);
552
static void in2_m2_64w(DisasContext *s, DisasOps *o)
242
+ exit(EXIT_FAILURE);
553
{
243
+ }
554
in2_a2(s, o);
244
+
555
- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
245
+ arg = arch_mcontext_arg(mc);
556
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
246
+ if (arg != test->expected_arg) {
557
gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
247
+ printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg);
558
}
248
+ exit(EXIT_FAILURE);
559
#define SPEC_in2_m2_64w 0
249
+ }
560
@@ -XXX,XX +XXX,XX @@ static void in2_m2_64a(DisasContext *s, DisasOps *o)
250
+
561
static void in2_mri2_16s(DisasContext *s, DisasOps *o)
251
+ if (mprotect(page_base, page_size,
562
{
252
+ PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
563
o->in2 = tcg_temp_new_i64();
253
+ handle_err("mprotect");
564
- tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s));
254
+ }
565
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
255
+}
566
}
256
+
567
#define SPEC_in2_mri2_16s 0
257
+static void test_noexec_1(const struct noexec_test *test)
568
258
+{
569
static void in2_mri2_16u(DisasContext *s, DisasOps *o)
259
+ void *start = page_base + test->page_ofs;
570
{
260
+ void (*fn)(int arg) = page_base + test->entry_ofs;
571
o->in2 = tcg_temp_new_i64();
261
+
572
- tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s));
262
+ memcpy(start, test->test_code, test->test_len);
573
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
263
+ arch_flush(start, test->test_len);
574
}
264
+
575
#define SPEC_in2_mri2_16u 0
265
+ /* Trigger TB creation in order to test invalidation. */
576
266
+ fn(0);
267
+
268
+ if (mprotect(page_base, page_size, PROT_NONE) < 0) {
269
+ handle_err("mprotect");
270
+ }
271
+
272
+ /* Trigger SEGV and check that handle_segv() ran. */
273
+ current_noexec_test = test;
274
+ fn(0);
275
+ assert(current_noexec_test == NULL);
276
+}
277
+
278
+static int test_noexec(struct noexec_test *tests, size_t n_tests)
279
+{
280
+ struct sigaction act;
281
+ size_t i;
282
+
283
+ memset(&act, 0, sizeof(act));
284
+ act.sa_sigaction = handle_segv;
285
+ act.sa_flags = SA_SIGINFO;
286
+ if (sigaction(SIGSEGV, &act, NULL) < 0) {
287
+ handle_err("sigaction");
288
+ }
289
+
290
+ page_size = getpagesize();
291
+ page_base = mmap(NULL, 2 * page_size,
292
+ PROT_READ | PROT_WRITE | PROT_EXEC,
293
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
294
+ if (page_base == MAP_FAILED) {
295
+ handle_err("mmap");
296
+ }
297
+ page_base += page_size;
298
+
299
+ for (i = 0; i < n_tests; i++) {
300
+ struct noexec_test *test = &tests[i];
301
+
302
+ printf("[ RUN ] %s\n", test->name);
303
+ test_noexec_1(test);
304
+ printf("[ OK ]\n");
305
+ }
306
+
307
+ printf("[ PASSED ]\n");
308
+ return EXIT_SUCCESS;
309
+}
310
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tests/tcg/s390x/Makefile.target
313
+++ b/tests/tcg/s390x/Makefile.target
314
@@ -XXX,XX +XXX,XX @@ TESTS+=shift
315
TESTS+=trap
316
TESTS+=signals-s390x
317
TESTS+=branch-relative-long
318
+TESTS+=noexec
319
320
Z14_TESTS=vfminmax
321
vfminmax: LDFLAGS+=-lm
322
--
577
--
323
2.34.1
578
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-8-richard.henderson@linaro.org>
7
---
8
target/sparc/translate.c | 43 ++++++++++++++++++++++++++--------------
9
1 file changed, 28 insertions(+), 15 deletions(-)
10
11
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/translate.c
14
+++ b/target/sparc/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
16
switch (xop) {
17
case 0x0: /* ld, V9 lduw, load unsigned word */
18
gen_address_mask(dc, cpu_addr);
19
- tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
20
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
21
+ dc->mem_idx, MO_TEUL);
22
break;
23
case 0x1: /* ldub, load unsigned byte */
24
gen_address_mask(dc, cpu_addr);
25
- tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
26
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
27
+ dc->mem_idx, MO_UB);
28
break;
29
case 0x2: /* lduh, load unsigned halfword */
30
gen_address_mask(dc, cpu_addr);
31
- tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
32
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
33
+ dc->mem_idx, MO_TEUW);
34
break;
35
case 0x3: /* ldd, load double word */
36
if (rd & 1)
37
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
38
39
gen_address_mask(dc, cpu_addr);
40
t64 = tcg_temp_new_i64();
41
- tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
42
+ tcg_gen_qemu_ld_i64(t64, cpu_addr,
43
+ dc->mem_idx, MO_TEUQ);
44
tcg_gen_trunc_i64_tl(cpu_val, t64);
45
tcg_gen_ext32u_tl(cpu_val, cpu_val);
46
gen_store_gpr(dc, rd + 1, cpu_val);
47
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
48
break;
49
case 0x9: /* ldsb, load signed byte */
50
gen_address_mask(dc, cpu_addr);
51
- tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
52
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
53
break;
54
case 0xa: /* ldsh, load signed halfword */
55
gen_address_mask(dc, cpu_addr);
56
- tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
57
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
58
+ dc->mem_idx, MO_TESW);
59
break;
60
case 0xd: /* ldstub */
61
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
62
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
63
#ifdef TARGET_SPARC64
64
case 0x08: /* V9 ldsw */
65
gen_address_mask(dc, cpu_addr);
66
- tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
67
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
68
+ dc->mem_idx, MO_TESL);
69
break;
70
case 0x0b: /* V9 ldx */
71
gen_address_mask(dc, cpu_addr);
72
- tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
73
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
74
+ dc->mem_idx, MO_TEUQ);
75
break;
76
case 0x18: /* V9 ldswa */
77
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
78
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
79
switch (xop) {
80
case 0x4: /* st, store word */
81
gen_address_mask(dc, cpu_addr);
82
- tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
83
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
84
+ dc->mem_idx, MO_TEUL);
85
break;
86
case 0x5: /* stb, store byte */
87
gen_address_mask(dc, cpu_addr);
88
- tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
89
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
90
break;
91
case 0x6: /* sth, store halfword */
92
gen_address_mask(dc, cpu_addr);
93
- tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
94
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
95
+ dc->mem_idx, MO_TEUW);
96
break;
97
case 0x7: /* std, store double word */
98
if (rd & 1)
99
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
100
lo = gen_load_gpr(dc, rd + 1);
101
t64 = tcg_temp_new_i64();
102
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
103
- tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
104
+ tcg_gen_qemu_st_i64(t64, cpu_addr,
105
+ dc->mem_idx, MO_TEUQ);
106
}
107
break;
108
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
109
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
110
#ifdef TARGET_SPARC64
111
case 0x0e: /* V9 stx */
112
gen_address_mask(dc, cpu_addr);
113
- tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
114
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
115
+ dc->mem_idx, MO_TEUQ);
116
break;
117
case 0x1e: /* V9 stxa */
118
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
119
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
120
#ifdef TARGET_SPARC64
121
gen_address_mask(dc, cpu_addr);
122
if (rd == 1) {
123
- tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
124
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
125
+ dc->mem_idx, MO_TEUQ);
126
break;
127
}
128
#endif
129
- tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
130
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
131
+ dc->mem_idx, MO_TEUL);
132
}
133
break;
134
case 0x26:
135
--
136
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
6
Message-Id: <20230502135741.1158035-9-richard.henderson@linaro.org>
7
---
8
target/xtensa/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/xtensa/translate.c
14
+++ b/target/xtensa/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
16
TCGv_i32 res = tcg_temp_new_i32();
17
18
tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
19
- tcg_gen_qemu_ld8u(res, addr, dc->cring);
20
+ tcg_gen_qemu_ld_i32(res, addr, dc->cring, MO_UB);
21
}
22
23
static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
24
@@ -XXX,XX +XXX,XX @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
25
} else {
26
tmp = tcg_constant_i32(arg[1].imm);
27
}
28
- tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring);
29
+ tcg_gen_qemu_ld_i32(arg[0].out, tmp, dc->cring, MO_TEUL);
30
}
31
32
static void translate_loop(DisasContext *dc, const OpcodeArg arg[],
33
--
34
2.34.1
diff view generated by jsdifflib
New patch
1
Remove the old interfaces with the implicit MemOp argument.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Acked-by: David Hildenbrand <david@redhat.com>
5
Message-Id: <20230502135741.1158035-10-richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op.h | 55 --------------------------------------------
8
1 file changed, 55 deletions(-)
9
10
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg-op.h
13
+++ b/include/tcg/tcg-op.h
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
15
void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp);
16
void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp);
17
18
-static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
19
-{
20
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
21
-}
22
-
23
-static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
24
-{
25
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
26
-}
27
-
28
-static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
29
-{
30
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
31
-}
32
-
33
-static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
34
-{
35
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
36
-}
37
-
38
-static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
39
-{
40
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
41
-}
42
-
43
-static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
44
-{
45
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
46
-}
47
-
48
-static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
49
-{
50
- tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
51
-}
52
-
53
-static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
54
-{
55
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
56
-}
57
-
58
-static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
59
-{
60
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
61
-}
62
-
63
-static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
64
-{
65
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
66
-}
67
-
68
-static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
69
-{
70
- tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
71
-}
72
-
73
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
74
TCGArg, MemOp);
75
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
76
--
77
2.34.1
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
3
Right now translator stops right *after* the end of a page, which
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
An implementation, like the one arm and s390x have, would require an
8
i386 length disassembler, which is burdensome to maintain. Another
9
alternative would be to single-step at the end of a guest page, but
10
this may come with a performance impact.
11
12
Fix by snapshotting disassembly state and restoring it after we figure
13
out we crossed a page boundary. This includes rolling back cc_op
14
updates and emitted ops.
15
16
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1143
19
Message-Id: <20220817150506.592862-4-iii@linux.ibm.com>
20
[rth: Simplify end-of-insn cross-page checks.]
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
---
2
---
23
target/i386/tcg/translate.c | 64 ++++++++++++++++-----------
3
target/alpha/translate.c | 2 +-
24
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++++++++++++++
4
1 file changed, 1 insertion(+), 1 deletion(-)
25
tests/tcg/x86_64/Makefile.target | 3 +-
26
3 files changed, 116 insertions(+), 26 deletions(-)
27
create mode 100644 tests/tcg/x86_64/noexec.c
28
5
29
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
6
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
30
index XXXXXXX..XXXXXXX 100644
7
index XXXXXXX..XXXXXXX 100644
31
--- a/target/i386/tcg/translate.c
8
--- a/target/alpha/translate.c
32
+++ b/target/i386/tcg/translate.c
9
+++ b/target/alpha/translate.c
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
10
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
34
TCGv_i64 tmp1_i64;
11
#ifdef CONFIG_USER_ONLY
35
12
#define UNALIGN(C) (C)->unalign
36
sigjmp_buf jmpbuf;
13
#else
37
+ TCGOp *prev_insn_end;
14
-#define UNALIGN(C) 0
38
} DisasContext;
15
+#define UNALIGN(C) MO_ALIGN
39
40
/* The environment in which user-only runs is constrained. */
41
@@ -XXX,XX +XXX,XX @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
42
{
43
uint64_t pc = s->pc;
44
45
+ /* This is a subsequent insn that crosses a page boundary. */
46
+ if (s->base.num_insns > 1 &&
47
+ !is_same_page(&s->base, s->pc + num_bytes - 1)) {
48
+ siglongjmp(s->jmpbuf, 2);
49
+ }
50
+
51
s->pc += num_bytes;
52
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
53
/* If the instruction's 16th byte is on a different page than the 1st, a
54
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
55
int modrm, reg, rm, mod, op, opreg, val;
56
target_ulong next_eip, tval;
57
target_ulong pc_start = s->base.pc_next;
58
+ bool orig_cc_op_dirty = s->cc_op_dirty;
59
+ CCOp orig_cc_op = s->cc_op;
60
61
s->pc_start = s->pc = pc_start;
62
s->override = -1;
63
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
64
s->rip_offset = 0; /* for relative ip address */
65
s->vex_l = 0;
66
s->vex_v = 0;
67
- if (sigsetjmp(s->jmpbuf, 0) != 0) {
68
+ switch (sigsetjmp(s->jmpbuf, 0)) {
69
+ case 0:
70
+ break;
71
+ case 1:
72
gen_exception_gpf(s);
73
return s->pc;
74
+ case 2:
75
+ /* Restore state that may affect the next instruction. */
76
+ s->cc_op_dirty = orig_cc_op_dirty;
77
+ s->cc_op = orig_cc_op;
78
+ s->base.num_insns--;
79
+ tcg_remove_ops_after(s->prev_insn_end);
80
+ s->base.is_jmp = DISAS_TOO_MANY;
81
+ return pc_start;
82
+ default:
83
+ g_assert_not_reached();
84
}
85
86
prefixes = 0;
87
@@ -XXX,XX +XXX,XX @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
88
{
89
DisasContext *dc = container_of(dcbase, DisasContext, base);
90
91
+ dc->prev_insn_end = tcg_last_op();
92
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
96
#endif
16
#endif
97
17
98
pc_next = disas_insn(dc, cpu);
18
/* Target-specific return values from translate_one, indicating the
99
-
100
- if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
101
- /* if single step mode, we generate only one instruction and
102
- generate an exception */
103
- /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
104
- the flag and abort the translation to give the irqs a
105
- chance to happen */
106
- dc->base.is_jmp = DISAS_TOO_MANY;
107
- } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
108
- && ((pc_next & TARGET_PAGE_MASK)
109
- != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
110
- & TARGET_PAGE_MASK)
111
- || (pc_next & ~TARGET_PAGE_MASK) == 0)) {
112
- /* Do not cross the boundary of the pages in icount mode,
113
- it can cause an exception. Do it only when boundary is
114
- crossed by the first instruction in the block.
115
- If current instruction already crossed the bound - it's ok,
116
- because an exception hasn't stopped this code.
117
- */
118
- dc->base.is_jmp = DISAS_TOO_MANY;
119
- } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
120
- dc->base.is_jmp = DISAS_TOO_MANY;
121
- }
122
-
123
dc->base.pc_next = pc_next;
124
+
125
+ if (dc->base.is_jmp == DISAS_NEXT) {
126
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
127
+ /*
128
+ * If single step mode, we generate only one instruction and
129
+ * generate an exception.
130
+ * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
131
+ * the flag and abort the translation to give the irqs a
132
+ * chance to happen.
133
+ */
134
+ dc->base.is_jmp = DISAS_TOO_MANY;
135
+ } else if (!is_same_page(&dc->base, pc_next)) {
136
+ dc->base.is_jmp = DISAS_TOO_MANY;
137
+ }
138
+ }
139
}
140
141
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
142
diff --git a/tests/tcg/x86_64/noexec.c b/tests/tcg/x86_64/noexec.c
143
new file mode 100644
144
index XXXXXXX..XXXXXXX
145
--- /dev/null
146
+++ b/tests/tcg/x86_64/noexec.c
147
@@ -XXX,XX +XXX,XX @@
148
+#include "../multiarch/noexec.c.inc"
149
+
150
+static void *arch_mcontext_pc(const mcontext_t *ctx)
151
+{
152
+ return (void *)ctx->gregs[REG_RIP];
153
+}
154
+
155
+int arch_mcontext_arg(const mcontext_t *ctx)
156
+{
157
+ return ctx->gregs[REG_RDI];
158
+}
159
+
160
+static void arch_flush(void *p, int len)
161
+{
162
+}
163
+
164
+extern char noexec_1[];
165
+extern char noexec_2[];
166
+extern char noexec_end[];
167
+
168
+asm("noexec_1:\n"
169
+ " movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
170
+ "noexec_2:\n"
171
+ " movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
172
+ " ret\n"
173
+ "noexec_end:");
174
+
175
+int main(void)
176
+{
177
+ struct noexec_test noexec_tests[] = {
178
+ {
179
+ .name = "fallthrough",
180
+ .test_code = noexec_1,
181
+ .test_len = noexec_end - noexec_1,
182
+ .page_ofs = noexec_1 - noexec_2,
183
+ .entry_ofs = noexec_1 - noexec_2,
184
+ .expected_si_ofs = 0,
185
+ .expected_pc_ofs = 0,
186
+ .expected_arg = 1,
187
+ },
188
+ {
189
+ .name = "jump",
190
+ .test_code = noexec_1,
191
+ .test_len = noexec_end - noexec_1,
192
+ .page_ofs = noexec_1 - noexec_2,
193
+ .entry_ofs = 0,
194
+ .expected_si_ofs = 0,
195
+ .expected_pc_ofs = 0,
196
+ .expected_arg = 0,
197
+ },
198
+ {
199
+ .name = "fallthrough [cross]",
200
+ .test_code = noexec_1,
201
+ .test_len = noexec_end - noexec_1,
202
+ .page_ofs = noexec_1 - noexec_2 - 2,
203
+ .entry_ofs = noexec_1 - noexec_2 - 2,
204
+ .expected_si_ofs = 0,
205
+ .expected_pc_ofs = -2,
206
+ .expected_arg = 1,
207
+ },
208
+ {
209
+ .name = "jump [cross]",
210
+ .test_code = noexec_1,
211
+ .test_len = noexec_end - noexec_1,
212
+ .page_ofs = noexec_1 - noexec_2 - 2,
213
+ .entry_ofs = -2,
214
+ .expected_si_ofs = 0,
215
+ .expected_pc_ofs = -2,
216
+ .expected_arg = 0,
217
+ },
218
+ };
219
+
220
+ return test_noexec(noexec_tests,
221
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
222
+}
223
diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target
224
index XXXXXXX..XXXXXXX 100644
225
--- a/tests/tcg/x86_64/Makefile.target
226
+++ b/tests/tcg/x86_64/Makefile.target
227
@@ -XXX,XX +XXX,XX @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
228
229
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
230
X86_64_TESTS += vsyscall
231
+X86_64_TESTS += noexec
232
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
233
else
234
TESTS=$(MULTIARCH_TESTS)
235
@@ -XXX,XX +XXX,XX @@ test-x86_64: LDFLAGS+=-lm -lc
236
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
237
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
238
239
-vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
240
+%: $(SRC_PATH)/tests/tcg/x86_64/%.c
241
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
242
--
19
--
243
2.34.1
20
2.34.1
diff view generated by jsdifflib
New patch
1
Mark all memory operations that are not already marked with UNALIGN.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/alpha/translate.c | 36 ++++++++++++++++++++----------------
6
1 file changed, 20 insertions(+), 16 deletions(-)
7
8
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/alpha/translate.c
11
+++ b/target/alpha/translate.c
12
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
13
switch ((insn >> 12) & 0xF) {
14
case 0x0:
15
/* Longword physical access (hw_ldl/p) */
16
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
17
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
18
break;
19
case 0x1:
20
/* Quadword physical access (hw_ldq/p) */
21
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
22
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
23
break;
24
case 0x2:
25
/* Longword physical access with lock (hw_ldl_l/p) */
26
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
27
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
28
tcg_gen_mov_i64(cpu_lock_addr, addr);
29
tcg_gen_mov_i64(cpu_lock_value, va);
30
break;
31
case 0x3:
32
/* Quadword physical access with lock (hw_ldq_l/p) */
33
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
34
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
35
tcg_gen_mov_i64(cpu_lock_addr, addr);
36
tcg_gen_mov_i64(cpu_lock_value, va);
37
break;
38
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
39
goto invalid_opc;
40
case 0xA:
41
/* Longword virtual access with protection check (hw_ldl/w) */
42
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
43
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
44
+ MO_LESL | MO_ALIGN);
45
break;
46
case 0xB:
47
/* Quadword virtual access with protection check (hw_ldq/w) */
48
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
49
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
50
+ MO_LEUQ | MO_ALIGN);
51
break;
52
case 0xC:
53
/* Longword virtual access with alt access mode (hw_ldl/a)*/
54
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
55
case 0xE:
56
/* Longword virtual access with alternate access mode and
57
protection checks (hw_ldl/wa) */
58
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
59
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
60
+ MO_LESL | MO_ALIGN);
61
break;
62
case 0xF:
63
/* Quadword virtual access with alternate access mode and
64
protection checks (hw_ldq/wa) */
65
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
66
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
67
+ MO_LEUQ | MO_ALIGN);
68
break;
69
}
70
break;
71
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
72
vb = load_gpr(ctx, rb);
73
tmp = tcg_temp_new();
74
tcg_gen_addi_i64(tmp, vb, disp12);
75
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
76
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
77
break;
78
case 0x1:
79
/* Quadword physical access */
80
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
81
vb = load_gpr(ctx, rb);
82
tmp = tcg_temp_new();
83
tcg_gen_addi_i64(tmp, vb, disp12);
84
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
85
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
86
break;
87
case 0x2:
88
/* Longword physical access with lock */
89
ret = gen_store_conditional(ctx, ra, rb, disp12,
90
- MMU_PHYS_IDX, MO_LESL);
91
+ MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
92
break;
93
case 0x3:
94
/* Quadword physical access with lock */
95
ret = gen_store_conditional(ctx, ra, rb, disp12,
96
- MMU_PHYS_IDX, MO_LEUQ);
97
+ MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
98
break;
99
case 0x4:
100
/* Longword virtual access */
101
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
102
break;
103
case 0x2A:
104
/* LDL_L */
105
- gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
106
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
107
break;
108
case 0x2B:
109
/* LDQ_L */
110
- gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
111
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
112
break;
113
case 0x2C:
114
/* STL */
115
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
116
case 0x2E:
117
/* STL_C */
118
ret = gen_store_conditional(ctx, ra, rb, disp16,
119
- ctx->mem_idx, MO_LESL);
120
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
121
break;
122
case 0x2F:
123
/* STQ_C */
124
ret = gen_store_conditional(ctx, ra, rb, disp16,
125
- ctx->mem_idx, MO_LEUQ);
126
+ ctx->mem_idx, MO_LEUQ | MO_ALIGN);
127
break;
128
case 0x30:
129
/* BR */
130
--
131
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
configs/targets/alpha-linux-user.mak | 1 -
4
configs/targets/alpha-softmmu.mak | 1 -
5
2 files changed, 2 deletions(-)
1
6
7
diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak
8
index XXXXXXX..XXXXXXX 100644
9
--- a/configs/targets/alpha-linux-user.mak
10
+++ b/configs/targets/alpha-linux-user.mak
11
@@ -XXX,XX +XXX,XX @@
12
TARGET_ARCH=alpha
13
TARGET_SYSTBL_ABI=common
14
TARGET_SYSTBL=syscall.tbl
15
-TARGET_ALIGNED_ONLY=y
16
diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak
17
index XXXXXXX..XXXXXXX 100644
18
--- a/configs/targets/alpha-softmmu.mak
19
+++ b/configs/targets/alpha-softmmu.mak
20
@@ -XXX,XX +XXX,XX @@
21
TARGET_ARCH=alpha
22
-TARGET_ALIGNED_ONLY=y
23
TARGET_SUPPORTS_MTTCG=y
24
--
25
2.34.1
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
2
that we've got to put this code into a section that is
3
both writable and executable.
4
5
Note that this test did not run on hardware beforehand either.
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
2
---
11
tests/tcg/i386/test-i386.c | 2 +-
3
target/hppa/translate.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
4
1 file changed, 1 insertion(+), 1 deletion(-)
13
5
14
diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c
6
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
15
index XXXXXXX..XXXXXXX 100644
7
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/tcg/i386/test-i386.c
8
--- a/target/hppa/translate.c
17
+++ b/tests/tcg/i386/test-i386.c
9
+++ b/target/hppa/translate.c
18
@@ -XXX,XX +XXX,XX @@ uint8_t code[] = {
10
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
19
0xc3, /* ret */
11
#ifdef CONFIG_USER_ONLY
20
};
12
#define UNALIGN(C) (C)->unalign
21
13
#else
22
-asm(".section \".data\"\n"
14
-#define UNALIGN(C) 0
23
+asm(".section \".data_x\",\"awx\"\n"
15
+#define UNALIGN(C) MO_ALIGN
24
"smc_code2:\n"
16
#endif
25
"movl 4(%esp), %eax\n"
17
26
"movl %eax, smc_patch_addr2 + 1\n"
18
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
27
--
19
--
28
2.34.1
20
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
configs/targets/hppa-linux-user.mak | 1 -
4
configs/targets/hppa-softmmu.mak | 1 -
5
2 files changed, 2 deletions(-)
1
6
7
diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak
8
index XXXXXXX..XXXXXXX 100644
9
--- a/configs/targets/hppa-linux-user.mak
10
+++ b/configs/targets/hppa-linux-user.mak
11
@@ -XXX,XX +XXX,XX @@
12
TARGET_ARCH=hppa
13
TARGET_SYSTBL_ABI=common,32
14
TARGET_SYSTBL=syscall.tbl
15
-TARGET_ALIGNED_ONLY=y
16
TARGET_BIG_ENDIAN=y
17
diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak
18
index XXXXXXX..XXXXXXX 100644
19
--- a/configs/targets/hppa-softmmu.mak
20
+++ b/configs/targets/hppa-softmmu.mak
21
@@ -XXX,XX +XXX,XX @@
22
TARGET_ARCH=hppa
23
-TARGET_ALIGNED_ONLY=y
24
TARGET_BIG_ENDIAN=y
25
TARGET_SUPPORTS_MTTCG=y
26
--
27
2.34.1
diff view generated by jsdifflib
1
Simplify the implementation of get_page_addr_code_hostp
1
Acked-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
2
by reusing the existing probe_access infrastructure.
3
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
accel/tcg/cputlb.c | 76 ++++++++++++++++------------------------------
4
target/sparc/translate.c | 66 +++++++++++++++++++++-------------------
9
1 file changed, 26 insertions(+), 50 deletions(-)
5
1 file changed, 34 insertions(+), 32 deletions(-)
10
6
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
7
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
9
--- a/target/sparc/translate.c
14
+++ b/accel/tcg/cputlb.c
10
+++ b/target/sparc/translate.c
15
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
11
@@ -XXX,XX +XXX,XX @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
16
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
12
TCGv addr, int mmu_idx, MemOp memop)
17
(ADDR) & TARGET_PAGE_MASK)
18
19
-/*
20
- * Return a ram_addr_t for the virtual address for execution.
21
- *
22
- * Return -1 if we can't translate and execute from an entire page
23
- * of RAM. This will force us to execute by loading and translating
24
- * one insn at a time, without caching.
25
- *
26
- * NOTE: This function will trigger an exception if the page is
27
- * not executable.
28
- */
29
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
30
- void **hostp)
31
-{
32
- uintptr_t mmu_idx = cpu_mmu_index(env, true);
33
- uintptr_t index = tlb_index(env, mmu_idx, addr);
34
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
35
- void *p;
36
-
37
- if (unlikely(!tlb_hit(entry->addr_code, addr))) {
38
- if (!VICTIM_TLB_HIT(addr_code, addr)) {
39
- tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
40
- index = tlb_index(env, mmu_idx, addr);
41
- entry = tlb_entry(env, mmu_idx, addr);
42
-
43
- if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
44
- /*
45
- * The MMU protection covers a smaller range than a target
46
- * page, so we must redo the MMU check for every insn.
47
- */
48
- return -1;
49
- }
50
- }
51
- assert(tlb_hit(entry->addr_code, addr));
52
- }
53
-
54
- if (unlikely(entry->addr_code & TLB_MMIO)) {
55
- /* The region is not backed by RAM. */
56
- if (hostp) {
57
- *hostp = NULL;
58
- }
59
- return -1;
60
- }
61
-
62
- p = (void *)((uintptr_t)addr + entry->addend);
63
- if (hostp) {
64
- *hostp = p;
65
- }
66
- return qemu_ram_addr_from_host_nofail(p);
67
-}
68
-
69
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
70
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
71
{
13
{
72
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
14
gen_address_mask(dc, addr);
73
return flags ? NULL : host;
15
- tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
16
+ tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
74
}
17
}
75
18
76
+/*
19
static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
77
+ * Return a ram_addr_t for the virtual address for execution.
20
@@ -XXX,XX +XXX,XX @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
78
+ *
21
break;
79
+ * Return -1 if we can't translate and execute from an entire page
22
case GET_ASI_DIRECT:
80
+ * of RAM. This will force us to execute by loading and translating
23
gen_address_mask(dc, addr);
81
+ * one insn at a time, without caching.
24
- tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
82
+ *
25
+ tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
83
+ * NOTE: This function will trigger an exception if the page is
26
break;
84
+ * not executable.
27
default:
85
+ */
28
{
86
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
29
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
87
+ void **hostp)
30
- TCGv_i32 r_mop = tcg_constant_i32(memop);
88
+{
31
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
89
+ void *p;
32
90
+
33
save_state(dc);
91
+ (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
34
#ifdef TARGET_SPARC64
92
+ cpu_mmu_index(env, true), false, &p, 0);
35
@@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
93
+ if (p == NULL) {
36
/* fall through */
94
+ return -1;
37
case GET_ASI_DIRECT:
95
+ }
38
gen_address_mask(dc, addr);
96
+ if (hostp) {
39
- tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
97
+ *hostp = p;
40
+ tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
98
+ }
41
break;
99
+ return qemu_ram_addr_from_host_nofail(p);
42
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
100
+}
43
case GET_ASI_BCOPY:
101
+
44
@@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
102
#ifdef CONFIG_PLUGIN
45
default:
103
/*
46
{
104
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
47
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
48
- TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE);
49
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
50
51
save_state(dc);
52
#ifdef TARGET_SPARC64
53
@@ -XXX,XX +XXX,XX @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
54
case GET_ASI_DIRECT:
55
oldv = tcg_temp_new();
56
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
57
- da.mem_idx, da.memop);
58
+ da.mem_idx, da.memop | MO_ALIGN);
59
gen_store_gpr(dc, rd, oldv);
60
break;
61
default:
62
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
63
switch (size) {
64
case 4:
65
d32 = gen_dest_fpr_F(dc);
66
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
67
+ tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
68
gen_store_fpr_F(dc, rd, d32);
69
break;
70
case 8:
71
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
72
/* Valid for lddfa only. */
73
if (size == 8) {
74
gen_address_mask(dc, addr);
75
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
76
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
77
+ da.memop | MO_ALIGN);
78
} else {
79
gen_exception(dc, TT_ILL_INSN);
80
}
81
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
82
default:
83
{
84
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
85
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
86
+ TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
87
88
save_state(dc);
89
/* According to the table in the UA2011 manual, the only
90
@@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
91
switch (size) {
92
case 4:
93
d32 = gen_load_fpr_F(dc, rd);
94
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
95
+ tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
96
break;
97
case 8:
98
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
99
@@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
100
/* Valid for stdfa only. */
101
if (size == 8) {
102
gen_address_mask(dc, addr);
103
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
104
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
105
+ da.memop | MO_ALIGN);
106
} else {
107
gen_exception(dc, TT_ILL_INSN);
108
}
109
@@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
110
TCGv_i64 tmp = tcg_temp_new_i64();
111
112
gen_address_mask(dc, addr);
113
- tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
114
+ tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
115
116
/* Note that LE ldda acts as if each 32-bit register
117
result is byte swapped. Having just performed one
118
@@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
119
tcg_gen_concat32_i64(t64, hi, lo);
120
}
121
gen_address_mask(dc, addr);
122
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
123
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
124
}
125
break;
126
127
@@ -XXX,XX +XXX,XX @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
128
case GET_ASI_DIRECT:
129
oldv = tcg_temp_new();
130
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
131
- da.mem_idx, da.memop);
132
+ da.mem_idx, da.memop | MO_ALIGN);
133
gen_store_gpr(dc, rd, oldv);
134
break;
135
default:
136
@@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
137
return;
138
case GET_ASI_DIRECT:
139
gen_address_mask(dc, addr);
140
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
141
+ tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
142
break;
143
default:
144
{
145
@@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
146
break;
147
case GET_ASI_DIRECT:
148
gen_address_mask(dc, addr);
149
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
150
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
151
break;
152
case GET_ASI_BFILL:
153
/* Store 32 bytes of T64 to ADDR. */
154
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
155
case 0x0: /* ld, V9 lduw, load unsigned word */
156
gen_address_mask(dc, cpu_addr);
157
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
158
- dc->mem_idx, MO_TEUL);
159
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
160
break;
161
case 0x1: /* ldub, load unsigned byte */
162
gen_address_mask(dc, cpu_addr);
163
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
164
case 0x2: /* lduh, load unsigned halfword */
165
gen_address_mask(dc, cpu_addr);
166
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
167
- dc->mem_idx, MO_TEUW);
168
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
169
break;
170
case 0x3: /* ldd, load double word */
171
if (rd & 1)
172
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
173
gen_address_mask(dc, cpu_addr);
174
t64 = tcg_temp_new_i64();
175
tcg_gen_qemu_ld_i64(t64, cpu_addr,
176
- dc->mem_idx, MO_TEUQ);
177
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
178
tcg_gen_trunc_i64_tl(cpu_val, t64);
179
tcg_gen_ext32u_tl(cpu_val, cpu_val);
180
gen_store_gpr(dc, rd + 1, cpu_val);
181
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
182
case 0xa: /* ldsh, load signed halfword */
183
gen_address_mask(dc, cpu_addr);
184
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
185
- dc->mem_idx, MO_TESW);
186
+ dc->mem_idx, MO_TESW | MO_ALIGN);
187
break;
188
case 0xd: /* ldstub */
189
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
190
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
191
case 0x08: /* V9 ldsw */
192
gen_address_mask(dc, cpu_addr);
193
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
194
- dc->mem_idx, MO_TESL);
195
+ dc->mem_idx, MO_TESL | MO_ALIGN);
196
break;
197
case 0x0b: /* V9 ldx */
198
gen_address_mask(dc, cpu_addr);
199
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
200
- dc->mem_idx, MO_TEUQ);
201
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
202
break;
203
case 0x18: /* V9 ldswa */
204
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
205
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
206
gen_address_mask(dc, cpu_addr);
207
cpu_dst_32 = gen_dest_fpr_F(dc);
208
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
209
- dc->mem_idx, MO_TEUL);
210
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
211
gen_store_fpr_F(dc, rd, cpu_dst_32);
212
break;
213
case 0x21: /* ldfsr, V9 ldxfsr */
214
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
215
if (rd == 1) {
216
TCGv_i64 t64 = tcg_temp_new_i64();
217
tcg_gen_qemu_ld_i64(t64, cpu_addr,
218
- dc->mem_idx, MO_TEUQ);
219
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
220
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
221
break;
222
}
223
#endif
224
cpu_dst_32 = tcg_temp_new_i32();
225
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
226
- dc->mem_idx, MO_TEUL);
227
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
228
gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
229
break;
230
case 0x22: /* ldqf, load quad fpreg */
231
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
232
case 0x4: /* st, store word */
233
gen_address_mask(dc, cpu_addr);
234
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
235
- dc->mem_idx, MO_TEUL);
236
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
237
break;
238
case 0x5: /* stb, store byte */
239
gen_address_mask(dc, cpu_addr);
240
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
241
case 0x6: /* sth, store halfword */
242
gen_address_mask(dc, cpu_addr);
243
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
244
- dc->mem_idx, MO_TEUW);
245
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
246
break;
247
case 0x7: /* std, store double word */
248
if (rd & 1)
249
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
250
t64 = tcg_temp_new_i64();
251
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
252
tcg_gen_qemu_st_i64(t64, cpu_addr,
253
- dc->mem_idx, MO_TEUQ);
254
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
255
}
256
break;
257
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
258
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
259
case 0x0e: /* V9 stx */
260
gen_address_mask(dc, cpu_addr);
261
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
262
- dc->mem_idx, MO_TEUQ);
263
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
264
break;
265
case 0x1e: /* V9 stxa */
266
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
267
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
268
gen_address_mask(dc, cpu_addr);
269
cpu_src1_32 = gen_load_fpr_F(dc, rd);
270
tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
271
- dc->mem_idx, MO_TEUL);
272
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
273
break;
274
case 0x25: /* stfsr, V9 stxfsr */
275
{
276
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
277
gen_address_mask(dc, cpu_addr);
278
if (rd == 1) {
279
tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
280
- dc->mem_idx, MO_TEUQ);
281
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
282
break;
283
}
284
#endif
285
tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
286
- dc->mem_idx, MO_TEUL);
287
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
288
}
289
break;
290
case 0x26:
105
--
291
--
106
2.34.1
292
2.34.1
diff view generated by jsdifflib
1
It was non-obvious to me why we can raise an exception in
1
This passes on the memop as given as argument to
2
the middle of a comparison function, but it works.
2
helper_ld_asi to the ultimate load primitive.
3
While nearby, use TARGET_PAGE_ALIGN instead of open-coding.
4
3
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/cpu-exec.c | 11 ++++++++++-
7
target/sparc/ldst_helper.c | 10 ++++++----
9
1 file changed, 10 insertions(+), 1 deletion(-)
8
1 file changed, 6 insertions(+), 4 deletions(-)
10
9
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cpu-exec.c
12
--- a/target/sparc/ldst_helper.c
14
+++ b/accel/tcg/cpu-exec.c
13
+++ b/target/sparc/ldst_helper.c
15
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
14
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
16
tb_page_addr_t phys_page2;
15
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
17
target_ulong virt_page2;
16
uint32_t last_addr = addr;
18
17
#endif
19
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
18
+ MemOpIdx oi;
20
+ /*
19
21
+ * We know that the first page matched, and an otherwise valid TB
20
do_check_align(env, addr, size - 1, GETPC());
22
+ * encountered an incomplete instruction at the end of that page,
21
switch (asi) {
23
+ * therefore we know that generating a new TB from the current PC
22
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
24
+ * must also require reading from the next page -- even if the
23
case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
25
+ * second pages do not match, and therefore the resulting insn
24
break;
26
+ * is different for the new TB. Therefore any exception raised
25
case ASI_KERNELTXT: /* Supervisor code access */
27
+ * here by the faulting lookup is not premature.
26
+ oi = make_memop_idx(memop, cpu_mmu_index(env, true));
28
+ */
27
switch (size) {
29
+ virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
28
case 1:
30
phys_page2 = get_page_addr_code(desc->env, virt_page2);
29
- ret = cpu_ldub_code(env, addr);
31
if (tb->page_addr[1] == phys_page2) {
30
+ ret = cpu_ldb_code_mmu(env, addr, oi, GETPC());
32
return true;
31
break;
32
case 2:
33
- ret = cpu_lduw_code(env, addr);
34
+ ret = cpu_ldw_code_mmu(env, addr, oi, GETPC());
35
break;
36
default:
37
case 4:
38
- ret = cpu_ldl_code(env, addr);
39
+ ret = cpu_ldl_code_mmu(env, addr, oi, GETPC());
40
break;
41
case 8:
42
- ret = cpu_ldq_code(env, addr);
43
+ ret = cpu_ldq_code_mmu(env, addr, oi, GETPC());
44
break;
45
}
46
break;
33
--
47
--
34
2.34.1
48
2.34.1
diff view generated by jsdifflib
1
The mmap_lock is held around tb_gen_code. While the comment
1
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
2
is correct that the lock is dropped when tb_gen_code runs out
3
of memory, the lock is *not* dropped when an exception is
4
raised reading code for translation.
5
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
3
---
11
accel/tcg/cpu-exec.c | 12 ++++++------
4
configs/targets/sparc-linux-user.mak | 1 -
12
accel/tcg/user-exec.c | 3 ---
5
configs/targets/sparc-softmmu.mak | 1 -
13
2 files changed, 6 insertions(+), 9 deletions(-)
6
configs/targets/sparc32plus-linux-user.mak | 1 -
7
configs/targets/sparc64-linux-user.mak | 1 -
8
configs/targets/sparc64-softmmu.mak | 1 -
9
5 files changed, 5 deletions(-)
14
10
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
11
diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
13
--- a/configs/targets/sparc-linux-user.mak
18
+++ b/accel/tcg/cpu-exec.c
14
+++ b/configs/targets/sparc-linux-user.mak
19
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
15
@@ -XXX,XX +XXX,XX @@
20
cpu_tb_exec(cpu, tb, &tb_exit);
16
TARGET_ARCH=sparc
21
cpu_exec_exit(cpu);
17
TARGET_SYSTBL_ABI=common,32
22
} else {
18
TARGET_SYSTBL=syscall.tbl
23
- /*
19
-TARGET_ALIGNED_ONLY=y
24
- * The mmap_lock is dropped by tb_gen_code if it runs out of
20
TARGET_BIG_ENDIAN=y
25
- * memory.
21
diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak
26
- */
27
#ifndef CONFIG_SOFTMMU
28
clear_helper_retaddr();
29
- tcg_debug_assert(!have_mmap_lock());
30
+ if (have_mmap_lock()) {
31
+ mmap_unlock();
32
+ }
33
#endif
34
if (qemu_mutex_iothread_locked()) {
35
qemu_mutex_unlock_iothread();
36
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
37
38
#ifndef CONFIG_SOFTMMU
39
clear_helper_retaddr();
40
- tcg_debug_assert(!have_mmap_lock());
41
+ if (have_mmap_lock()) {
42
+ mmap_unlock();
43
+ }
44
#endif
45
if (qemu_mutex_iothread_locked()) {
46
qemu_mutex_unlock_iothread();
47
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
48
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/user-exec.c
23
--- a/configs/targets/sparc-softmmu.mak
50
+++ b/accel/tcg/user-exec.c
24
+++ b/configs/targets/sparc-softmmu.mak
51
@@ -XXX,XX +XXX,XX @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
25
@@ -XXX,XX +XXX,XX @@
52
* (and if the translator doesn't handle page boundaries correctly
26
TARGET_ARCH=sparc
53
* there's little we can do about that here). Therefore, do not
27
-TARGET_ALIGNED_ONLY=y
54
* trigger the unwinder.
28
TARGET_BIG_ENDIAN=y
55
- *
29
diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak
56
- * Like tb_gen_code, release the memory lock before cpu_loop_exit.
30
index XXXXXXX..XXXXXXX 100644
57
*/
31
--- a/configs/targets/sparc32plus-linux-user.mak
58
- mmap_unlock();
32
+++ b/configs/targets/sparc32plus-linux-user.mak
59
*pc = 0;
33
@@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc
60
return MMU_INST_FETCH;
34
TARGET_ABI_DIR=sparc
61
}
35
TARGET_SYSTBL_ABI=common,32
36
TARGET_SYSTBL=syscall.tbl
37
-TARGET_ALIGNED_ONLY=y
38
TARGET_BIG_ENDIAN=y
39
diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak
40
index XXXXXXX..XXXXXXX 100644
41
--- a/configs/targets/sparc64-linux-user.mak
42
+++ b/configs/targets/sparc64-linux-user.mak
43
@@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc
44
TARGET_ABI_DIR=sparc
45
TARGET_SYSTBL_ABI=common,64
46
TARGET_SYSTBL=syscall.tbl
47
-TARGET_ALIGNED_ONLY=y
48
TARGET_BIG_ENDIAN=y
49
diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak
50
index XXXXXXX..XXXXXXX 100644
51
--- a/configs/targets/sparc64-softmmu.mak
52
+++ b/configs/targets/sparc64-softmmu.mak
53
@@ -XXX,XX +XXX,XX @@
54
TARGET_ARCH=sparc64
55
TARGET_BASE_ARCH=sparc
56
-TARGET_ALIGNED_ONLY=y
57
TARGET_BIG_ENDIAN=y
62
--
58
--
63
2.34.1
59
2.34.1
diff view generated by jsdifflib
New patch
1
1
Interpret the variable argument placement in the caller. Pass data_type
2
instead of is64 -- there are several places where we already convert back
3
from bool to type. Clean things up by using type throughout.
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 111 +++++++++++++++++---------------------
9
1 file changed, 50 insertions(+), 61 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
16
* Record the context of a call to the out of line helper code for the slow path
17
* for a load or store, so that we can later generate the correct helper code
18
*/
19
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
20
- MemOpIdx oi,
21
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
22
+ TCGType type, MemOpIdx oi,
23
TCGReg datalo, TCGReg datahi,
24
TCGReg addrlo, TCGReg addrhi,
25
tcg_insn_unit *raddr,
26
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
27
28
label->is_ld = is_ld;
29
label->oi = oi;
30
- label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
31
+ label->type = type;
32
label->datalo_reg = datalo;
33
label->datahi_reg = datahi;
34
label->addrlo_reg = addrlo;
35
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
36
37
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
38
TCGReg base, int index, intptr_t ofs,
39
- int seg, bool is64, MemOp memop)
40
+ int seg, TCGType type, MemOp memop)
41
{
42
- TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
43
bool use_movbe = false;
44
- int rexw = is64 * P_REXW;
45
+ int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
46
int movop = OPC_MOVL_GvEv;
47
48
/* Do big-endian loads with movbe. */
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
50
}
51
}
52
53
-/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
54
- EAX. It will be useful once fixed registers globals are less
55
- common. */
56
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
57
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
58
+ TCGReg addrlo, TCGReg addrhi,
59
+ MemOpIdx oi, TCGType data_type)
60
{
61
- TCGReg datalo, datahi, addrlo;
62
- TCGReg addrhi __attribute__((unused));
63
- MemOpIdx oi;
64
- MemOp opc;
65
+ MemOp opc = get_memop(oi);
66
+
67
#if defined(CONFIG_SOFTMMU)
68
- int mem_index;
69
tcg_insn_unit *label_ptr[2];
70
-#else
71
- unsigned a_bits;
72
-#endif
73
74
- datalo = *args++;
75
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
76
- addrlo = *args++;
77
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
78
- oi = *args++;
79
- opc = get_memop(oi);
80
-
81
-#if defined(CONFIG_SOFTMMU)
82
- mem_index = get_mmuidx(oi);
83
-
84
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
85
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
86
label_ptr, offsetof(CPUTLBEntry, addr_read));
87
88
/* TLB Hit. */
89
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
90
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1,
91
+ -1, 0, 0, data_type, opc);
92
93
/* Record the current context of a load into ldst label */
94
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
95
- s->code_ptr, label_ptr);
96
+ add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
97
+ addrlo, addrhi, s->code_ptr, label_ptr);
98
#else
99
- a_bits = get_alignment_bits(opc);
100
+ unsigned a_bits = get_alignment_bits(opc);
101
if (a_bits) {
102
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
103
}
104
105
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
106
x86_guest_base_offset, x86_guest_base_seg,
107
- is64, opc);
108
+ data_type, opc);
109
#endif
110
}
111
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
113
}
114
}
115
116
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
117
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
118
+ TCGReg addrlo, TCGReg addrhi,
119
+ MemOpIdx oi, TCGType data_type)
120
{
121
- TCGReg datalo, datahi, addrlo;
122
- TCGReg addrhi __attribute__((unused));
123
- MemOpIdx oi;
124
- MemOp opc;
125
+ MemOp opc = get_memop(oi);
126
+
127
#if defined(CONFIG_SOFTMMU)
128
- int mem_index;
129
tcg_insn_unit *label_ptr[2];
130
-#else
131
- unsigned a_bits;
132
-#endif
133
134
- datalo = *args++;
135
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
136
- addrlo = *args++;
137
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
138
- oi = *args++;
139
- opc = get_memop(oi);
140
-
141
-#if defined(CONFIG_SOFTMMU)
142
- mem_index = get_mmuidx(oi);
143
-
144
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
145
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
146
label_ptr, offsetof(CPUTLBEntry, addr_write));
147
148
/* TLB Hit. */
149
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
150
151
/* Record the current context of a store into ldst label */
152
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
153
- s->code_ptr, label_ptr);
154
+ add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
155
+ addrlo, addrhi, s->code_ptr, label_ptr);
156
#else
157
- a_bits = get_alignment_bits(opc);
158
+ unsigned a_bits = get_alignment_bits(opc);
159
if (a_bits) {
160
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
161
}
162
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
163
break;
164
165
case INDEX_op_qemu_ld_i32:
166
- tcg_out_qemu_ld(s, args, 0);
167
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
168
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
169
+ } else {
170
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
171
+ }
172
break;
173
case INDEX_op_qemu_ld_i64:
174
- tcg_out_qemu_ld(s, args, 1);
175
+ if (TCG_TARGET_REG_BITS == 64) {
176
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
177
+ } else if (TARGET_LONG_BITS == 32) {
178
+ tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
179
+ } else {
180
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
181
+ }
182
break;
183
case INDEX_op_qemu_st_i32:
184
case INDEX_op_qemu_st8_i32:
185
- tcg_out_qemu_st(s, args, 0);
186
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
187
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
188
+ } else {
189
+ tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
190
+ }
191
break;
192
case INDEX_op_qemu_st_i64:
193
- tcg_out_qemu_st(s, args, 1);
194
+ if (TCG_TARGET_REG_BITS == 64) {
195
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
196
+ } else if (TARGET_LONG_BITS == 32) {
197
+ tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
198
+ } else {
199
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
200
+ }
201
break;
202
203
OP_32_64(mulu2):
204
--
205
2.34.1
206
207
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Test for both base and index; use datahi as a temporary, overwritten
2
by the final load. Always perform the loads in ascending order, so
3
that any (user-only) fault sees the correct address.
2
4
3
Currently it's possible to execute pages that do not have PAGE_EXEC
4
if there is an existing translation block. Fix by invalidating TBs
5
that touch the affected pages.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Message-Id: <20220817150506.592862-2-iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
linux-user/mmap.c | 6 ++++--
7
tcg/i386/tcg-target.c.inc | 31 +++++++++++++++----------------
12
1 file changed, 4 insertions(+), 2 deletions(-)
8
1 file changed, 15 insertions(+), 16 deletions(-)
13
9
14
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/mmap.c
12
--- a/tcg/i386/tcg-target.c.inc
17
+++ b/linux-user/mmap.c
13
+++ b/tcg/i386/tcg-target.c.inc
18
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
19
goto error;
15
if (TCG_TARGET_REG_BITS == 64) {
16
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
17
base, index, 0, ofs);
18
+ break;
19
+ }
20
+ if (use_movbe) {
21
+ TCGReg t = datalo;
22
+ datalo = datahi;
23
+ datahi = t;
24
+ }
25
+ if (base == datalo || index == datalo) {
26
+ tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs);
27
+ tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0);
28
+ tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4);
29
} else {
30
- if (use_movbe) {
31
- TCGReg t = datalo;
32
- datalo = datahi;
33
- datahi = t;
34
- }
35
- if (base != datalo) {
36
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
37
- base, index, 0, ofs);
38
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
39
- base, index, 0, ofs + 4);
40
- } else {
41
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
42
- base, index, 0, ofs + 4);
43
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
44
- base, index, 0, ofs);
45
- }
46
+ tcg_out_modrm_sib_offset(s, movop + seg, datalo,
47
+ base, index, 0, ofs);
48
+ tcg_out_modrm_sib_offset(s, movop + seg, datahi,
49
+ base, index, 0, ofs + 4);
20
}
50
}
21
}
51
break;
22
+
52
default:
23
page_set_flags(start, start + len, page_flags);
24
- mmap_unlock();
25
- return 0;
26
+ tb_invalidate_phys_range(start, start + len);
27
+ ret = 0;
28
+
29
error:
30
mmap_unlock();
31
return ret;
32
--
53
--
33
2.34.1
54
2.34.1
diff view generated by jsdifflib
New patch
1
Collect the 4 potential parts of the host address into a struct.
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
Reorg guest_base handling to use it.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 165 +++++++++++++++++++++-----------------
9
1 file changed, 90 insertions(+), 75 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
16
tcg_out8(s, 0x90);
17
}
18
19
+typedef struct {
20
+ TCGReg base;
21
+ int index;
22
+ int ofs;
23
+ int seg;
24
+} HostAddress;
25
+
26
#if defined(CONFIG_SOFTMMU)
27
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
28
* int mmu_idx, uintptr_t ra)
29
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
30
return tcg_out_fail_alignment(s, l);
31
}
32
33
-#if TCG_TARGET_REG_BITS == 32
34
-# define x86_guest_base_seg 0
35
-# define x86_guest_base_index -1
36
-# define x86_guest_base_offset guest_base
37
-#else
38
-static int x86_guest_base_seg;
39
-static int x86_guest_base_index = -1;
40
-static int32_t x86_guest_base_offset;
41
-# if defined(__x86_64__) && defined(__linux__)
42
-# include <asm/prctl.h>
43
-# include <sys/prctl.h>
44
+static HostAddress x86_guest_base = {
45
+ .index = -1
46
+};
47
+
48
+#if defined(__x86_64__) && defined(__linux__)
49
+# include <asm/prctl.h>
50
+# include <sys/prctl.h>
51
int arch_prctl(int code, unsigned long addr);
52
static inline int setup_guest_base_seg(void)
53
{
54
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
55
}
56
return 0;
57
}
58
-# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
59
-# include <machine/sysarch.h>
60
+#elif defined(__x86_64__) && \
61
+ (defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
62
+# include <machine/sysarch.h>
63
static inline int setup_guest_base_seg(void)
64
{
65
if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) {
66
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
67
}
68
return 0;
69
}
70
-# else
71
+#else
72
static inline int setup_guest_base_seg(void)
73
{
74
return 0;
75
}
76
-# endif
77
-#endif
78
+#endif /* setup_guest_base_seg */
79
#endif /* SOFTMMU */
80
81
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
82
- TCGReg base, int index, intptr_t ofs,
83
- int seg, TCGType type, MemOp memop)
84
+ HostAddress h, TCGType type, MemOp memop)
85
{
86
bool use_movbe = false;
87
int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
88
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
89
90
switch (memop & MO_SSIZE) {
91
case MO_UB:
92
- tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
93
- base, index, 0, ofs);
94
+ tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo,
95
+ h.base, h.index, 0, h.ofs);
96
break;
97
case MO_SB:
98
- tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
99
- base, index, 0, ofs);
100
+ tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo,
101
+ h.base, h.index, 0, h.ofs);
102
break;
103
case MO_UW:
104
if (use_movbe) {
105
/* There is no extending movbe; only low 16-bits are modified. */
106
- if (datalo != base && datalo != index) {
107
+ if (datalo != h.base && datalo != h.index) {
108
/* XOR breaks dependency chains. */
109
tgen_arithr(s, ARITH_XOR, datalo, datalo);
110
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
111
- datalo, base, index, 0, ofs);
112
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
113
+ datalo, h.base, h.index, 0, h.ofs);
114
} else {
115
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
116
- datalo, base, index, 0, ofs);
117
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
118
+ datalo, h.base, h.index, 0, h.ofs);
119
tcg_out_ext16u(s, datalo, datalo);
120
}
121
} else {
122
- tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
123
- base, index, 0, ofs);
124
+ tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo,
125
+ h.base, h.index, 0, h.ofs);
126
}
127
break;
128
case MO_SW:
129
if (use_movbe) {
130
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
131
- datalo, base, index, 0, ofs);
132
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
133
+ datalo, h.base, h.index, 0, h.ofs);
134
tcg_out_ext16s(s, type, datalo, datalo);
135
} else {
136
- tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
137
- datalo, base, index, 0, ofs);
138
+ tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg,
139
+ datalo, h.base, h.index, 0, h.ofs);
140
}
141
break;
142
case MO_UL:
143
- tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
144
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
145
+ h.base, h.index, 0, h.ofs);
146
break;
147
#if TCG_TARGET_REG_BITS == 64
148
case MO_SL:
149
if (use_movbe) {
150
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo,
151
- base, index, 0, ofs);
152
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + h.seg, datalo,
153
+ h.base, h.index, 0, h.ofs);
154
tcg_out_ext32s(s, datalo, datalo);
155
} else {
156
- tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
157
- base, index, 0, ofs);
158
+ tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + h.seg, datalo,
159
+ h.base, h.index, 0, h.ofs);
160
}
161
break;
162
#endif
163
case MO_UQ:
164
if (TCG_TARGET_REG_BITS == 64) {
165
- tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
166
- base, index, 0, ofs);
167
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
168
+ h.base, h.index, 0, h.ofs);
169
break;
170
}
171
if (use_movbe) {
172
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
173
datalo = datahi;
174
datahi = t;
175
}
176
- if (base == datalo || index == datalo) {
177
- tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs);
178
- tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0);
179
- tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4);
180
+ if (h.base == datalo || h.index == datalo) {
181
+ tcg_out_modrm_sib_offset(s, OPC_LEA, datahi,
182
+ h.base, h.index, 0, h.ofs);
183
+ tcg_out_modrm_offset(s, movop + h.seg, datalo, datahi, 0);
184
+ tcg_out_modrm_offset(s, movop + h.seg, datahi, datahi, 4);
185
} else {
186
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
187
- base, index, 0, ofs);
188
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
189
- base, index, 0, ofs + 4);
190
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
191
+ h.base, h.index, 0, h.ofs);
192
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
193
+ h.base, h.index, 0, h.ofs + 4);
194
}
195
break;
196
default:
197
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
198
MemOpIdx oi, TCGType data_type)
199
{
200
MemOp opc = get_memop(oi);
201
+ HostAddress h;
202
203
#if defined(CONFIG_SOFTMMU)
204
tcg_insn_unit *label_ptr[2];
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
206
label_ptr, offsetof(CPUTLBEntry, addr_read));
207
208
/* TLB Hit. */
209
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1,
210
- -1, 0, 0, data_type, opc);
211
+ h.base = TCG_REG_L1;
212
+ h.index = -1;
213
+ h.ofs = 0;
214
+ h.seg = 0;
215
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
216
217
/* Record the current context of a load into ldst label */
218
add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
219
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
220
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
221
}
222
223
- tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
224
- x86_guest_base_offset, x86_guest_base_seg,
225
- data_type, opc);
226
+ h = x86_guest_base;
227
+ h.base = addrlo;
228
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
229
#endif
230
}
231
232
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
233
- TCGReg base, int index, intptr_t ofs,
234
- int seg, MemOp memop)
235
+ HostAddress h, MemOp memop)
236
{
237
bool use_movbe = false;
238
int movop = OPC_MOVL_EvGv;
239
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
240
case MO_8:
241
/* This is handled with constraints on INDEX_op_qemu_st8_i32. */
242
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
243
- tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
244
- datalo, base, index, 0, ofs);
245
+ tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
246
+ datalo, h.base, h.index, 0, h.ofs);
247
break;
248
case MO_16:
249
- tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
250
- base, index, 0, ofs);
251
+ tcg_out_modrm_sib_offset(s, movop + P_DATA16 + h.seg, datalo,
252
+ h.base, h.index, 0, h.ofs);
253
break;
254
case MO_32:
255
- tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
256
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
257
+ h.base, h.index, 0, h.ofs);
258
break;
259
case MO_64:
260
if (TCG_TARGET_REG_BITS == 64) {
261
- tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
262
- base, index, 0, ofs);
263
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
264
+ h.base, h.index, 0, h.ofs);
265
} else {
266
if (use_movbe) {
267
TCGReg t = datalo;
268
datalo = datahi;
269
datahi = t;
270
}
271
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
272
- base, index, 0, ofs);
273
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
274
- base, index, 0, ofs + 4);
275
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
276
+ h.base, h.index, 0, h.ofs);
277
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
278
+ h.base, h.index, 0, h.ofs + 4);
279
}
280
break;
281
default:
282
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
283
MemOpIdx oi, TCGType data_type)
284
{
285
MemOp opc = get_memop(oi);
286
+ HostAddress h;
287
288
#if defined(CONFIG_SOFTMMU)
289
tcg_insn_unit *label_ptr[2];
290
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
291
label_ptr, offsetof(CPUTLBEntry, addr_write));
292
293
/* TLB Hit. */
294
- tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
295
+ h.base = TCG_REG_L1;
296
+ h.index = -1;
297
+ h.ofs = 0;
298
+ h.seg = 0;
299
+ tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
300
301
/* Record the current context of a store into ldst label */
302
add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
303
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
304
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
305
}
306
307
- tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
308
- x86_guest_base_offset, x86_guest_base_seg, opc);
309
+ h = x86_guest_base;
310
+ h.base = addrlo;
311
+
312
+ tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
313
#endif
314
}
315
316
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
317
(ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
318
+ stack_addend);
319
#else
320
-# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
321
+# if !defined(CONFIG_SOFTMMU)
322
if (guest_base) {
323
int seg = setup_guest_base_seg();
324
if (seg != 0) {
325
- x86_guest_base_seg = seg;
326
+ x86_guest_base.seg = seg;
327
} else if (guest_base == (int32_t)guest_base) {
328
- x86_guest_base_offset = guest_base;
329
+ x86_guest_base.ofs = guest_base;
330
} else {
331
/* Choose R12 because, as a base, it requires a SIB byte. */
332
- x86_guest_base_index = TCG_REG_R12;
333
- tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base);
334
- tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index);
335
+ x86_guest_base.index = TCG_REG_R12;
336
+ tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
337
+ tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
338
}
339
}
340
# endif
341
--
342
2.34.1
343
344
diff view generated by jsdifflib
1
This bit is not saved across interrupts, so we must
1
Use TCG_REG_L[01] constants directly.
2
delay delivering the interrupt until the skip has
3
been processed.
4
2
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1118
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
target/avr/helper.c | 9 +++++++++
6
tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++----------------
11
target/avr/translate.c | 26 ++++++++++++++++++++++----
7
1 file changed, 16 insertions(+), 16 deletions(-)
12
2 files changed, 31 insertions(+), 4 deletions(-)
13
8
14
diff --git a/target/avr/helper.c b/target/avr/helper.c
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/target/avr/helper.c
11
--- a/tcg/i386/tcg-target.c.inc
17
+++ b/target/avr/helper.c
12
+++ b/tcg/i386/tcg-target.c.inc
18
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
13
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
19
AVRCPU *cpu = AVR_CPU(cs);
14
int mem_index, MemOp opc,
20
CPUAVRState *env = &cpu->env;
15
tcg_insn_unit **label_ptr, int which)
21
16
{
22
+ /*
17
- const TCGReg r0 = TCG_REG_L0;
23
+ * We cannot separate a skip from the next instruction,
18
- const TCGReg r1 = TCG_REG_L1;
24
+ * as the skip would not be preserved across the interrupt.
19
TCGType ttype = TCG_TYPE_I32;
25
+ * Separating the two insn normally only happens at page boundaries.
20
TCGType tlbtype = TCG_TYPE_I32;
26
+ */
21
int trexw = 0, hrexw = 0, tlbrexw = 0;
27
+ if (env->skip) {
22
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
28
+ return false;
29
+ }
30
+
31
if (interrupt_request & CPU_INTERRUPT_RESET) {
32
if (cpu_interrupts_enabled(env)) {
33
cs->exception_index = EXCP_RESET;
34
diff --git a/target/avr/translate.c b/target/avr/translate.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/avr/translate.c
37
+++ b/target/avr/translate.c
38
@@ -XXX,XX +XXX,XX @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
39
if (skip_label) {
40
canonicalize_skip(ctx);
41
gen_set_label(skip_label);
42
- if (ctx->base.is_jmp == DISAS_NORETURN) {
43
+
44
+ switch (ctx->base.is_jmp) {
45
+ case DISAS_NORETURN:
46
ctx->base.is_jmp = DISAS_CHAIN;
47
+ break;
48
+ case DISAS_NEXT:
49
+ if (ctx->base.tb->flags & TB_FLAGS_SKIP) {
50
+ ctx->base.is_jmp = DISAS_TOO_MANY;
51
+ }
52
+ break;
53
+ default:
54
+ break;
55
}
23
}
56
}
24
}
57
25
58
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
26
- tcg_out_mov(s, tlbtype, r0, addrlo);
59
{
27
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
60
DisasContext *ctx = container_of(dcbase, DisasContext, base);
28
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
61
bool nonconst_skip = canonicalize_skip(ctx);
29
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
62
+ /*
30
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
63
+ * Because we disable interrupts while env->skip is set,
31
64
+ * we must return to the main loop to re-evaluate afterward.
32
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0,
65
+ */
33
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
66
+ bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP;
34
TLB_MASK_TABLE_OFS(mem_index) +
67
35
offsetof(CPUTLBDescFast, mask));
68
switch (ctx->base.is_jmp) {
36
69
case DISAS_NORETURN:
37
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0,
70
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
38
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
71
case DISAS_NEXT:
39
TLB_MASK_TABLE_OFS(mem_index) +
72
case DISAS_TOO_MANY:
40
offsetof(CPUTLBDescFast, table));
73
case DISAS_CHAIN:
41
74
- if (!nonconst_skip) {
42
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
75
+ if (!nonconst_skip && !force_exit) {
43
copy the address and mask. For lesser alignments, check that we don't
76
/* Note gen_goto_tb checks singlestep. */
44
cross pages for the complete access. */
77
gen_goto_tb(ctx, 1, ctx->npc);
45
if (a_bits >= s_bits) {
78
break;
46
- tcg_out_mov(s, ttype, r1, addrlo);
79
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
47
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
80
tcg_gen_movi_tl(cpu_pc, ctx->npc);
48
} else {
81
/* fall through */
49
- tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
82
case DISAS_LOOKUP:
50
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
83
- tcg_gen_lookup_and_goto_ptr();
51
+ addrlo, s_mask - a_mask);
84
- break;
52
}
85
+ if (!force_exit) {
53
tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
86
+ tcg_gen_lookup_and_goto_ptr();
54
- tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
87
+ break;
55
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
88
+ }
56
89
+ /* fall through */
57
- /* cmp 0(r0), r1 */
90
case DISAS_EXIT:
58
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which);
91
tcg_gen_exit_tb(NULL, 0);
59
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
92
break;
60
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
61
+ TCG_REG_L1, TCG_REG_L0, which);
62
63
/* Prepare for both the fast path add of the tlb addend, and the slow
64
path function argument setup. */
65
- tcg_out_mov(s, ttype, r1, addrlo);
66
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
67
68
/* jne slow_path */
69
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
70
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
71
s->code_ptr += 4;
72
73
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
74
- /* cmp 4(r0), addrhi */
75
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4);
76
+ /* cmp 4(TCG_REG_L0), addrhi */
77
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, which + 4);
78
79
/* jne slow_path */
80
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
81
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
82
83
/* TLB Hit. */
84
85
- /* add addend(r0), r1 */
86
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
87
+ /* add addend(TCG_REG_L0), TCG_REG_L1 */
88
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L1, TCG_REG_L0,
89
offsetof(CPUTLBEntry, addend));
90
}
91
93
--
92
--
94
2.34.1
93
2.34.1
95
94
96
95
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
Split out a helper for choosing testb vs testl.
2
got to mark the vsyscall page executable. We had been special
3
casing this entirely within translate.
4
2
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
linux-user/elfload.c | 23 +++++++++++++++++++++++
6
tcg/i386/tcg-target.c.inc | 30 ++++++++++++++++++------------
10
1 file changed, 23 insertions(+)
7
1 file changed, 18 insertions(+), 12 deletions(-)
11
8
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
11
--- a/tcg/i386/tcg-target.c.inc
15
+++ b/linux-user/elfload.c
12
+++ b/tcg/i386/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
17
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
14
tcg_out8(s, 0x90);
18
}
15
}
19
16
20
+#if ULONG_MAX >= TARGET_VSYSCALL_PAGE
17
+/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */
21
+#define INIT_GUEST_COMMPAGE
18
+static void __attribute__((unused))
22
+static bool init_guest_commpage(void)
19
+tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i)
23
+{
20
+{
24
+ /*
21
+ /*
25
+ * The vsyscall page is at a high negative address aka kernel space,
22
+ * This is used for testing alignment, so we can usually use testb.
26
+ * which means that we cannot actually allocate it with target_mmap.
23
+ * For i686, we have to use testl for %esi/%edi.
27
+ * We still should be able to use page_set_flags, unless the user
28
+ * has specified -R reserved_va, which would trigger an assert().
29
+ */
24
+ */
30
+ if (reserved_va != 0 &&
25
+ if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) {
31
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
26
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r);
32
+ error_report("Cannot allocate vsyscall page");
27
+ tcg_out8(s, i);
33
+ exit(EXIT_FAILURE);
28
+ } else {
29
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r);
30
+ tcg_out32(s, i);
34
+ }
31
+ }
35
+ page_set_flags(TARGET_VSYSCALL_PAGE,
36
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
37
+ PAGE_EXEC | PAGE_VALID);
38
+ return true;
39
+}
32
+}
40
+#endif
33
+
41
#else
34
typedef struct {
42
35
TCGReg base;
43
#define ELF_START_MMAP 0x80000000
36
int index;
44
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
45
#else
38
unsigned a_mask = (1 << a_bits) - 1;
46
#define HI_COMMPAGE 0
39
TCGLabelQemuLdst *label;
47
#define LO_COMMPAGE -1
40
48
+#ifndef INIT_GUEST_COMMPAGE
41
- /*
49
#define init_guest_commpage() true
42
- * We are expecting a_bits to max out at 7, so we can usually use testb.
50
#endif
43
- * For i686, we have to use testl for %esi/%edi.
51
+#endif
44
- */
52
45
- if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
53
static void pgb_fail_in_use(const char *image_name)
46
- tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
54
{
47
- tcg_out8(s, a_mask);
48
- } else {
49
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
50
- tcg_out32(s, a_mask);
51
- }
52
-
53
+ tcg_out_testi(s, addrlo, a_mask);
54
/* jne slow_path */
55
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
56
55
--
57
--
56
2.34.1
58
2.34.1
59
60
diff view generated by jsdifflib
New patch
1
Rename the 'ext' parameter 'data_type' to make the use clearer;
2
pass it to tcg_out_qemu_st as well to even out the interfaces.
3
Rename the 'otype' local 'addr_type' to make the use clearer.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target.c.inc | 36 +++++++++++++++++-------------------
9
1 file changed, 17 insertions(+), 19 deletions(-)
10
11
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/aarch64/tcg-target.c.inc
14
+++ b/tcg/aarch64/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
16
}
17
18
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
19
- MemOpIdx oi, TCGType ext)
20
+ MemOpIdx oi, TCGType data_type)
21
{
22
MemOp memop = get_memop(oi);
23
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
24
+ TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
25
26
/* Byte swapping is left to middle-end expansion. */
27
tcg_debug_assert((memop & MO_BSWAP) == 0);
28
29
#ifdef CONFIG_SOFTMMU
30
- unsigned mem_index = get_mmuidx(oi);
31
tcg_insn_unit *label_ptr;
32
33
- tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
34
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
35
- TCG_REG_X1, otype, addr_reg);
36
- add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
37
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
38
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
39
+ TCG_REG_X1, addr_type, addr_reg);
40
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
41
s->code_ptr, label_ptr);
42
#else /* !CONFIG_SOFTMMU */
43
unsigned a_bits = get_alignment_bits(memop);
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
45
tcg_out_test_alignment(s, true, addr_reg, a_bits);
46
}
47
if (USE_GUEST_BASE) {
48
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
49
- TCG_REG_GUEST_BASE, otype, addr_reg);
50
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
51
+ TCG_REG_GUEST_BASE, addr_type, addr_reg);
52
} else {
53
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
54
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
55
addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
56
}
57
#endif /* CONFIG_SOFTMMU */
58
}
59
60
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
61
- MemOpIdx oi)
62
+ MemOpIdx oi, TCGType data_type)
63
{
64
MemOp memop = get_memop(oi);
65
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
66
+ TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
67
68
/* Byte swapping is left to middle-end expansion. */
69
tcg_debug_assert((memop & MO_BSWAP) == 0);
70
71
#ifdef CONFIG_SOFTMMU
72
- unsigned mem_index = get_mmuidx(oi);
73
tcg_insn_unit *label_ptr;
74
75
- tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
76
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
77
tcg_out_qemu_st_direct(s, memop, data_reg,
78
- TCG_REG_X1, otype, addr_reg);
79
- add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
80
- data_reg, addr_reg, s->code_ptr, label_ptr);
81
+ TCG_REG_X1, addr_type, addr_reg);
82
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
83
+ s->code_ptr, label_ptr);
84
#else /* !CONFIG_SOFTMMU */
85
unsigned a_bits = get_alignment_bits(memop);
86
if (a_bits) {
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
88
}
89
if (USE_GUEST_BASE) {
90
tcg_out_qemu_st_direct(s, memop, data_reg,
91
- TCG_REG_GUEST_BASE, otype, addr_reg);
92
+ TCG_REG_GUEST_BASE, addr_type, addr_reg);
93
} else {
94
tcg_out_qemu_st_direct(s, memop, data_reg,
95
addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
97
break;
98
case INDEX_op_qemu_st_i32:
99
case INDEX_op_qemu_st_i64:
100
- tcg_out_qemu_st(s, REG0(0), a1, a2);
101
+ tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
102
break;
103
104
case INDEX_op_bswap64_i64:
105
--
106
2.34.1
107
108
diff view generated by jsdifflib
1
The current implementation is a no-op, simply returning addr.
1
Collect the 3 potential parts of the host address into a struct.
2
This is incorrect, because we ought to be checking the page
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
permissions for execution.
4
3
5
Make get_page_addr_code inline for both implementations.
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/exec/exec-all.h | 85 ++++++++++++++---------------------------
7
tcg/aarch64/tcg-target.c.inc | 86 +++++++++++++++++++++++++-----------
13
accel/tcg/cputlb.c | 5 ---
8
1 file changed, 59 insertions(+), 27 deletions(-)
14
accel/tcg/user-exec.c | 14 +++++++
15
3 files changed, 42 insertions(+), 62 deletions(-)
16
9
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
12
--- a/tcg/aarch64/tcg-target.c.inc
20
+++ b/include/exec/exec-all.h
13
+++ b/tcg/aarch64/tcg-target.c.inc
21
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
22
hwaddr index, MemTxAttrs attrs);
15
tcg_out_insn(s, 3406, ADR, rd, offset);
23
#endif
16
}
24
17
25
-#if defined(CONFIG_USER_ONLY)
18
+typedef struct {
26
-void mmap_lock(void);
19
+ TCGReg base;
27
-void mmap_unlock(void);
20
+ TCGReg index;
28
-bool have_mmap_lock(void);
21
+ TCGType index_ext;
29
-
22
+} HostAddress;
30
/**
31
- * get_page_addr_code() - user-mode version
32
+ * get_page_addr_code_hostp()
33
* @env: CPUArchState
34
* @addr: guest virtual address of guest code
35
*
36
- * Returns @addr.
37
+ * See get_page_addr_code() (full-system version) for documentation on the
38
+ * return value.
39
+ *
40
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
41
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
42
+ * to the host address where @addr's content is kept.
43
+ *
44
+ * Note: this function can trigger an exception.
45
+ */
46
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
47
+ void **hostp);
48
+
23
+
49
+/**
24
#ifdef CONFIG_SOFTMMU
50
+ * get_page_addr_code()
25
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
51
+ * @env: CPUArchState
26
* MemOpIdx oi, uintptr_t ra)
52
+ * @addr: guest virtual address of guest code
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
53
+ *
28
#endif /* CONFIG_SOFTMMU */
54
+ * If we cannot translate and execute from the entire RAM page, or if
29
55
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
30
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
56
+ * ram_addr_t corresponding to the guest code at @addr.
31
- TCGReg data_r, TCGReg addr_r,
57
+ *
32
- TCGType otype, TCGReg off_r)
58
+ * Note: this function can trigger an exception.
33
+ TCGReg data_r, HostAddress h)
59
*/
60
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
61
target_ulong addr)
62
{
34
{
63
- return addr;
35
switch (memop & MO_SSIZE) {
64
+ return get_page_addr_code_hostp(env, addr, NULL);
36
case MO_UB:
37
- tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
38
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index);
39
break;
40
case MO_SB:
41
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
42
- data_r, addr_r, otype, off_r);
43
+ data_r, h.base, h.index_ext, h.index);
44
break;
45
case MO_UW:
46
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
47
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index);
48
break;
49
case MO_SW:
50
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
51
- data_r, addr_r, otype, off_r);
52
+ data_r, h.base, h.index_ext, h.index);
53
break;
54
case MO_UL:
55
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
56
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index);
57
break;
58
case MO_SL:
59
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
60
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index);
61
break;
62
case MO_UQ:
63
- tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
64
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index);
65
break;
66
default:
67
g_assert_not_reached();
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
65
}
69
}
66
70
67
-/**
71
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
68
- * get_page_addr_code_hostp() - user-mode version
72
- TCGReg data_r, TCGReg addr_r,
69
- * @env: CPUArchState
73
- TCGType otype, TCGReg off_r)
70
- * @addr: guest virtual address of guest code
74
+ TCGReg data_r, HostAddress h)
71
- *
75
{
72
- * Returns @addr.
76
switch (memop & MO_SIZE) {
73
- *
77
case MO_8:
74
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
78
- tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
75
- * is kept.
79
+ tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index);
76
- */
80
break;
77
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
81
case MO_16:
78
- target_ulong addr,
82
- tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
79
- void **hostp)
83
+ tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index);
80
-{
84
break;
81
- if (hostp) {
85
case MO_32:
82
- *hostp = g2h_untagged(addr);
86
- tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
83
- }
87
+ tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index);
84
- return addr;
88
break;
85
-}
89
case MO_64:
86
+#if defined(CONFIG_USER_ONLY)
90
- tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
87
+void mmap_lock(void);
91
+ tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index);
88
+void mmap_unlock(void);
92
break;
89
+bool have_mmap_lock(void);
93
default:
90
94
g_assert_not_reached();
91
/**
95
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
92
* adjust_signal_pc:
96
{
93
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
97
MemOp memop = get_memop(oi);
94
static inline void mmap_lock(void) {}
98
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
95
static inline void mmap_unlock(void) {}
99
+ HostAddress h;
96
100
97
-/**
101
/* Byte swapping is left to middle-end expansion. */
98
- * get_page_addr_code() - full-system version
102
tcg_debug_assert((memop & MO_BSWAP) == 0);
99
- * @env: CPUArchState
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
100
- * @addr: guest virtual address of guest code
104
tcg_insn_unit *label_ptr;
101
- *
105
102
- * If we cannot translate and execute from the entire RAM page, or if
106
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
103
- * the region is not backed by RAM, returns -1. Otherwise, returns the
107
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
104
- * ram_addr_t corresponding to the guest code at @addr.
108
- TCG_REG_X1, addr_type, addr_reg);
105
- *
109
+
106
- * Note: this function can trigger an exception.
110
+ h = (HostAddress){
107
- */
111
+ .base = TCG_REG_X1,
108
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
112
+ .index = addr_reg,
109
-
113
+ .index_ext = addr_type
110
-/**
114
+ };
111
- * get_page_addr_code_hostp() - full-system version
115
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
112
- * @env: CPUArchState
116
+
113
- * @addr: guest virtual address of guest code
117
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
114
- *
118
s->code_ptr, label_ptr);
115
- * See get_page_addr_code() (full-system version) for documentation on the
119
#else /* !CONFIG_SOFTMMU */
116
- * return value.
120
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
117
- *
121
tcg_out_test_alignment(s, true, addr_reg, a_bits);
118
- * Sets *@hostp (when @hostp is non-NULL) as follows.
122
}
119
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
123
if (USE_GUEST_BASE) {
120
- * to the host address where @addr's content is kept.
124
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
121
- *
125
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
122
- * Note: this function can trigger an exception.
126
+ h = (HostAddress){
123
- */
127
+ .base = TCG_REG_GUEST_BASE,
124
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
128
+ .index = addr_reg,
125
- void **hostp);
129
+ .index_ext = addr_type
126
-
130
+ };
127
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
131
} else {
128
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
132
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
129
133
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
130
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
134
+ h = (HostAddress){
131
index XXXXXXX..XXXXXXX 100644
135
+ .base = addr_reg,
132
--- a/accel/tcg/cputlb.c
136
+ .index = TCG_REG_XZR,
133
+++ b/accel/tcg/cputlb.c
137
+ .index_ext = TCG_TYPE_I64
134
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
138
+ };
135
return qemu_ram_addr_from_host_nofail(p);
139
}
140
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
141
#endif /* CONFIG_SOFTMMU */
136
}
142
}
137
143
138
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
139
-{
140
- return get_page_addr_code_hostp(env, addr, NULL);
141
-}
142
-
143
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
144
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
145
{
145
{
146
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
146
MemOp memop = get_memop(oi);
147
index XXXXXXX..XXXXXXX 100644
147
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
148
--- a/accel/tcg/user-exec.c
148
+ HostAddress h;
149
+++ b/accel/tcg/user-exec.c
149
150
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
150
/* Byte swapping is left to middle-end expansion. */
151
return size ? g2h(env_cpu(env), addr) : NULL;
151
tcg_debug_assert((memop & MO_BSWAP) == 0);
152
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
153
tcg_insn_unit *label_ptr;
154
155
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
156
- tcg_out_qemu_st_direct(s, memop, data_reg,
157
- TCG_REG_X1, addr_type, addr_reg);
158
+
159
+ h = (HostAddress){
160
+ .base = TCG_REG_X1,
161
+ .index = addr_reg,
162
+ .index_ext = addr_type
163
+ };
164
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
165
+
166
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
167
s->code_ptr, label_ptr);
168
#else /* !CONFIG_SOFTMMU */
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
170
tcg_out_test_alignment(s, false, addr_reg, a_bits);
171
}
172
if (USE_GUEST_BASE) {
173
- tcg_out_qemu_st_direct(s, memop, data_reg,
174
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
175
+ h = (HostAddress){
176
+ .base = TCG_REG_GUEST_BASE,
177
+ .index = addr_reg,
178
+ .index_ext = addr_type
179
+ };
180
} else {
181
- tcg_out_qemu_st_direct(s, memop, data_reg,
182
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
183
+ h = (HostAddress){
184
+ .base = addr_reg,
185
+ .index = TCG_REG_XZR,
186
+ .index_ext = TCG_TYPE_I64
187
+ };
188
}
189
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
190
#endif /* CONFIG_SOFTMMU */
152
}
191
}
153
192
154
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
155
+ void **hostp)
156
+{
157
+ int flags;
158
+
159
+ flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
160
+ g_assert(flags == 0);
161
+
162
+ if (hostp) {
163
+ *hostp = g2h_untagged(addr);
164
+ }
165
+ return addr;
166
+}
167
+
168
/* The softmmu versions of these helpers are in cputlb.c. */
169
170
/*
171
--
193
--
172
2.34.1
194
2.34.1
195
196
diff view generated by jsdifflib
New patch
1
Interpret the variable argument placement in the caller.
2
Pass data_type instead of is_64. We need to set this in
3
TCGLabelQemuLdst, so plumb this all the way through from tcg_out_op.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/arm/tcg-target.c.inc | 113 +++++++++++++++++++--------------------
9
1 file changed, 56 insertions(+), 57 deletions(-)
10
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
14
+++ b/tcg/arm/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
16
/* Record the context of a call to the out of line helper code for the slow
17
path for a load or store, so that we can later generate the correct
18
helper code. */
19
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
20
- TCGReg datalo, TCGReg datahi, TCGReg addrlo,
21
- TCGReg addrhi, tcg_insn_unit *raddr,
22
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
23
+ MemOpIdx oi, TCGType type,
24
+ TCGReg datalo, TCGReg datahi,
25
+ TCGReg addrlo, TCGReg addrhi,
26
+ tcg_insn_unit *raddr,
27
tcg_insn_unit *label_ptr)
28
{
29
TCGLabelQemuLdst *label = new_ldst_label(s);
30
31
label->is_ld = is_ld;
32
label->oi = oi;
33
+ label->type = type;
34
label->datalo_reg = datalo;
35
label->datahi_reg = datahi;
36
label->addrlo_reg = addrlo;
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
38
}
39
#endif
40
41
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
42
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
43
+ TCGReg addrlo, TCGReg addrhi,
44
+ MemOpIdx oi, TCGType data_type)
45
{
46
- TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
47
- MemOpIdx oi;
48
- MemOp opc;
49
-#ifdef CONFIG_SOFTMMU
50
- int mem_index;
51
- TCGReg addend;
52
- tcg_insn_unit *label_ptr;
53
-#else
54
- unsigned a_bits;
55
-#endif
56
-
57
- datalo = *args++;
58
- datahi = (is64 ? *args++ : 0);
59
- addrlo = *args++;
60
- addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
61
- oi = *args++;
62
- opc = get_memop(oi);
63
+ MemOp opc = get_memop(oi);
64
65
#ifdef CONFIG_SOFTMMU
66
- mem_index = get_mmuidx(oi);
67
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
68
+ TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
69
70
- /* This a conditional BL only to load a pointer within this opcode into LR
71
- for the slow path. We will not be using the value for a tail call. */
72
- label_ptr = s->code_ptr;
73
+ /*
74
+ * This a conditional BL only to load a pointer within this opcode into
75
+ * LR for the slow path. We will not be using the value for a tail call.
76
+ */
77
+ tcg_insn_unit *label_ptr = s->code_ptr;
78
tcg_out_bl_imm(s, COND_NE, 0);
79
80
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
81
82
- add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
83
- s->code_ptr, label_ptr);
84
+ add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
85
+ addrlo, addrhi, s->code_ptr, label_ptr);
86
#else /* !CONFIG_SOFTMMU */
87
- a_bits = get_alignment_bits(opc);
88
+ unsigned a_bits = get_alignment_bits(opc);
89
if (a_bits) {
90
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
91
}
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
93
}
94
#endif
95
96
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
97
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
98
+ TCGReg addrlo, TCGReg addrhi,
99
+ MemOpIdx oi, TCGType data_type)
100
{
101
- TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
102
- MemOpIdx oi;
103
- MemOp opc;
104
-#ifdef CONFIG_SOFTMMU
105
- int mem_index;
106
- TCGReg addend;
107
- tcg_insn_unit *label_ptr;
108
-#else
109
- unsigned a_bits;
110
-#endif
111
-
112
- datalo = *args++;
113
- datahi = (is64 ? *args++ : 0);
114
- addrlo = *args++;
115
- addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
116
- oi = *args++;
117
- opc = get_memop(oi);
118
+ MemOp opc = get_memop(oi);
119
120
#ifdef CONFIG_SOFTMMU
121
- mem_index = get_mmuidx(oi);
122
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
123
+ TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
124
125
tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
126
addrlo, addend, true);
127
128
/* The conditional call must come last, as we're going to return here. */
129
- label_ptr = s->code_ptr;
130
+ tcg_insn_unit *label_ptr = s->code_ptr;
131
tcg_out_bl_imm(s, COND_NE, 0);
132
133
- add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
134
- s->code_ptr, label_ptr);
135
+ add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
136
+ addrlo, addrhi, s->code_ptr, label_ptr);
137
#else /* !CONFIG_SOFTMMU */
138
- a_bits = get_alignment_bits(opc);
139
+ unsigned a_bits = get_alignment_bits(opc);
140
if (a_bits) {
141
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
142
}
143
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
144
break;
145
146
case INDEX_op_qemu_ld_i32:
147
- tcg_out_qemu_ld(s, args, 0);
148
+ if (TARGET_LONG_BITS == 32) {
149
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
150
+ args[2], TCG_TYPE_I32);
151
+ } else {
152
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
153
+ args[3], TCG_TYPE_I32);
154
+ }
155
break;
156
case INDEX_op_qemu_ld_i64:
157
- tcg_out_qemu_ld(s, args, 1);
158
+ if (TARGET_LONG_BITS == 32) {
159
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
160
+ args[3], TCG_TYPE_I64);
161
+ } else {
162
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
163
+ args[4], TCG_TYPE_I64);
164
+ }
165
break;
166
case INDEX_op_qemu_st_i32:
167
- tcg_out_qemu_st(s, args, 0);
168
+ if (TARGET_LONG_BITS == 32) {
169
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
170
+ args[2], TCG_TYPE_I32);
171
+ } else {
172
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
173
+ args[3], TCG_TYPE_I32);
174
+ }
175
break;
176
case INDEX_op_qemu_st_i64:
177
- tcg_out_qemu_st(s, args, 1);
178
+ if (TARGET_LONG_BITS == 32) {
179
+ tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
180
+ args[3], TCG_TYPE_I64);
181
+ } else {
182
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
183
+ args[4], TCG_TYPE_I64);
184
+ }
185
break;
186
187
case INDEX_op_bswap16_i32:
188
--
189
2.34.1
190
191
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
1
Collect the parts of the host address, and condition, into a struct.
2
that we've got to mark the commpage executable. We had
2
Merge tcg_out_qemu_*_{index,direct} and use it.
3
been placing the commpage outside of reserved_va, which
4
was incorrect and lead to an abort.
5
3
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
linux-user/arm/target_cpu.h | 4 ++--
6
tcg/arm/tcg-target.c.inc | 248 ++++++++++++++++++---------------------
11
linux-user/elfload.c | 6 +++++-
7
1 file changed, 115 insertions(+), 133 deletions(-)
12
2 files changed, 7 insertions(+), 3 deletions(-)
13
8
14
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
9
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/arm/target_cpu.h
11
--- a/tcg/arm/tcg-target.c.inc
17
+++ b/linux-user/arm/target_cpu.h
12
+++ b/tcg/arm/tcg-target.c.inc
18
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
19
} else {
14
tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
20
/*
15
}
21
* We need to be able to map the commpage.
16
22
- * See validate_guest_space in linux-user/elfload.c.
17
+typedef struct {
23
+ * See init_guest_commpage in linux-user/elfload.c.
18
+ ARMCond cond;
24
*/
19
+ TCGReg base;
25
- return 0xffff0000ul;
20
+ int index;
26
+ return 0xfffffffful;
21
+ bool index_scratch;
22
+} HostAddress;
23
+
24
#ifdef CONFIG_SOFTMMU
25
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
26
* int mmu_idx, uintptr_t ra)
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
28
}
29
#endif /* SOFTMMU */
30
31
-static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
32
- TCGReg datalo, TCGReg datahi,
33
- TCGReg addrlo, TCGReg addend,
34
- bool scratch_addend)
35
+static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
36
+ TCGReg datahi, HostAddress h)
37
{
38
+ TCGReg base;
39
+
40
/* Byte swapping is left to middle-end expansion. */
41
tcg_debug_assert((opc & MO_BSWAP) == 0);
42
43
switch (opc & MO_SSIZE) {
44
case MO_UB:
45
- tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
46
+ if (h.index < 0) {
47
+ tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
48
+ } else {
49
+ tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
50
+ }
51
break;
52
case MO_SB:
53
- tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
54
+ if (h.index < 0) {
55
+ tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
56
+ } else {
57
+ tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
58
+ }
59
break;
60
case MO_UW:
61
- tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
62
+ if (h.index < 0) {
63
+ tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
64
+ } else {
65
+ tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
66
+ }
67
break;
68
case MO_SW:
69
- tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
70
+ if (h.index < 0) {
71
+ tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
72
+ } else {
73
+ tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
74
+ }
75
break;
76
case MO_UL:
77
- tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
78
+ if (h.index < 0) {
79
+ tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
80
+ } else {
81
+ tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
82
+ }
83
break;
84
case MO_UQ:
85
/* We used pair allocation for datalo, so already should be aligned. */
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
87
tcg_debug_assert(datahi == datalo + 1);
88
/* LDRD requires alignment; double-check that. */
89
if (get_alignment_bits(opc) >= MO_64) {
90
+ if (h.index < 0) {
91
+ tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
92
+ break;
93
+ }
94
/*
95
* Rm (the second address op) must not overlap Rt or Rt + 1.
96
* Since datalo is aligned, we can simplify the test via alignment.
97
* Flip the two address arguments if that works.
98
*/
99
- if ((addend & ~1) != datalo) {
100
- tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
101
+ if ((h.index & ~1) != datalo) {
102
+ tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
103
break;
104
}
105
- if ((addrlo & ~1) != datalo) {
106
- tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
107
+ if ((h.base & ~1) != datalo) {
108
+ tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
109
break;
110
}
111
}
112
- if (scratch_addend) {
113
- tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
114
- tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
115
+ if (h.index < 0) {
116
+ base = h.base;
117
+ if (datalo == h.base) {
118
+ tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
119
+ base = TCG_REG_TMP;
120
+ }
121
+ } else if (h.index_scratch) {
122
+ tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
123
+ tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
124
+ break;
125
} else {
126
- tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
127
- addend, addrlo, SHIFT_IMM_LSL(0));
128
- tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
129
- tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
130
+ tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
131
+ h.base, h.index, SHIFT_IMM_LSL(0));
132
+ base = TCG_REG_TMP;
133
}
134
+ tcg_out_ld32_12(s, h.cond, datalo, base, 0);
135
+ tcg_out_ld32_12(s, h.cond, datahi, base, 4);
136
break;
137
default:
138
g_assert_not_reached();
27
}
139
}
28
}
140
}
29
#define MAX_RESERVED_VA arm_max_reserved_va
141
30
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
142
-#ifndef CONFIG_SOFTMMU
31
index XXXXXXX..XXXXXXX 100644
143
-static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
32
--- a/linux-user/elfload.c
144
- TCGReg datahi, TCGReg addrlo)
33
+++ b/linux-user/elfload.c
145
-{
34
@@ -XXX,XX +XXX,XX @@ enum {
146
- /* Byte swapping is left to middle-end expansion. */
35
147
- tcg_debug_assert((opc & MO_BSWAP) == 0);
36
static bool init_guest_commpage(void)
148
-
149
- switch (opc & MO_SSIZE) {
150
- case MO_UB:
151
- tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
152
- break;
153
- case MO_SB:
154
- tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
155
- break;
156
- case MO_UW:
157
- tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
158
- break;
159
- case MO_SW:
160
- tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
161
- break;
162
- case MO_UL:
163
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
164
- break;
165
- case MO_UQ:
166
- /* We used pair allocation for datalo, so already should be aligned. */
167
- tcg_debug_assert((datalo & 1) == 0);
168
- tcg_debug_assert(datahi == datalo + 1);
169
- /* LDRD requires alignment; double-check that. */
170
- if (get_alignment_bits(opc) >= MO_64) {
171
- tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
172
- } else if (datalo == addrlo) {
173
- tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
174
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
175
- } else {
176
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
177
- tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
178
- }
179
- break;
180
- default:
181
- g_assert_not_reached();
182
- }
183
-}
184
-#endif
185
-
186
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
187
TCGReg addrlo, TCGReg addrhi,
188
MemOpIdx oi, TCGType data_type)
37
{
189
{
38
- void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
190
MemOp opc = get_memop(oi);
39
+ abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
191
+ HostAddress h;
40
+ void *want = g2h_untagged(commpage);
192
41
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
193
#ifdef CONFIG_SOFTMMU
42
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
194
- TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
43
195
+ h.cond = COND_AL;
44
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
196
+ h.base = addrlo;
45
perror("Protecting guest commpage");
197
+ h.index_scratch = true;
46
exit(EXIT_FAILURE);
198
+ h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
199
200
/*
201
* This a conditional BL only to load a pointer within this opcode into
202
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
203
tcg_insn_unit *label_ptr = s->code_ptr;
204
tcg_out_bl_imm(s, COND_NE, 0);
205
206
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
207
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
208
209
add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
210
addrlo, addrhi, s->code_ptr, label_ptr);
211
-#else /* !CONFIG_SOFTMMU */
212
+#else
213
unsigned a_bits = get_alignment_bits(opc);
214
if (a_bits) {
215
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
47
}
216
}
48
+
217
- if (guest_base) {
49
+ page_set_flags(commpage, commpage + qemu_host_page_size,
218
- tcg_out_qemu_ld_index(s, opc, datalo, datahi,
50
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
219
- addrlo, TCG_REG_GUEST_BASE, false);
51
return true;
220
- } else {
52
}
221
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
222
- }
223
+
224
+ h.cond = COND_AL;
225
+ h.base = addrlo;
226
+ h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
227
+ h.index_scratch = false;
228
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
229
#endif
230
}
231
232
-static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
233
- TCGReg datalo, TCGReg datahi,
234
- TCGReg addrlo, TCGReg addend,
235
- bool scratch_addend)
236
-{
237
- /* Byte swapping is left to middle-end expansion. */
238
- tcg_debug_assert((opc & MO_BSWAP) == 0);
239
-
240
- switch (opc & MO_SIZE) {
241
- case MO_8:
242
- tcg_out_st8_r(s, cond, datalo, addrlo, addend);
243
- break;
244
- case MO_16:
245
- tcg_out_st16_r(s, cond, datalo, addrlo, addend);
246
- break;
247
- case MO_32:
248
- tcg_out_st32_r(s, cond, datalo, addrlo, addend);
249
- break;
250
- case MO_64:
251
- /* We used pair allocation for datalo, so already should be aligned. */
252
- tcg_debug_assert((datalo & 1) == 0);
253
- tcg_debug_assert(datahi == datalo + 1);
254
- /* STRD requires alignment; double-check that. */
255
- if (get_alignment_bits(opc) >= MO_64) {
256
- tcg_out_strd_r(s, cond, datalo, addrlo, addend);
257
- } else if (scratch_addend) {
258
- tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
259
- tcg_out_st32_12(s, cond, datahi, addend, 4);
260
- } else {
261
- tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
262
- addend, addrlo, SHIFT_IMM_LSL(0));
263
- tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
264
- tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
265
- }
266
- break;
267
- default:
268
- g_assert_not_reached();
269
- }
270
-}
271
-
272
-#ifndef CONFIG_SOFTMMU
273
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
274
- TCGReg datahi, TCGReg addrlo)
275
+ TCGReg datahi, HostAddress h)
276
{
277
/* Byte swapping is left to middle-end expansion. */
278
tcg_debug_assert((opc & MO_BSWAP) == 0);
279
280
switch (opc & MO_SIZE) {
281
case MO_8:
282
- tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
283
+ if (h.index < 0) {
284
+ tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
285
+ } else {
286
+ tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
287
+ }
288
break;
289
case MO_16:
290
- tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
291
+ if (h.index < 0) {
292
+ tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
293
+ } else {
294
+ tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
295
+ }
296
break;
297
case MO_32:
298
- tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
299
+ if (h.index < 0) {
300
+ tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
301
+ } else {
302
+ tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
303
+ }
304
break;
305
case MO_64:
306
/* We used pair allocation for datalo, so already should be aligned. */
307
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
308
tcg_debug_assert(datahi == datalo + 1);
309
/* STRD requires alignment; double-check that. */
310
if (get_alignment_bits(opc) >= MO_64) {
311
- tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
312
+ if (h.index < 0) {
313
+ tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
314
+ } else {
315
+ tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
316
+ }
317
+ } else if (h.index_scratch) {
318
+ tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
319
+ tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
320
} else {
321
- tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
322
- tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
323
+ tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
324
+ h.base, h.index, SHIFT_IMM_LSL(0));
325
+ tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
326
+ tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
327
}
328
break;
329
default:
330
g_assert_not_reached();
331
}
332
}
333
-#endif
334
335
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
336
TCGReg addrlo, TCGReg addrhi,
337
MemOpIdx oi, TCGType data_type)
338
{
339
MemOp opc = get_memop(oi);
340
+ HostAddress h;
341
342
#ifdef CONFIG_SOFTMMU
343
- TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
344
-
345
- tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
346
- addrlo, addend, true);
347
+ h.cond = COND_EQ;
348
+ h.base = addrlo;
349
+ h.index_scratch = true;
350
+ h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
351
+ tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
352
353
/* The conditional call must come last, as we're going to return here. */
354
tcg_insn_unit *label_ptr = s->code_ptr;
355
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
356
357
add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
358
addrlo, addrhi, s->code_ptr, label_ptr);
359
-#else /* !CONFIG_SOFTMMU */
360
+#else
361
unsigned a_bits = get_alignment_bits(opc);
362
+
363
+ h.cond = COND_AL;
364
if (a_bits) {
365
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
366
+ h.cond = COND_EQ;
367
}
368
- if (guest_base) {
369
- tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
370
- addrlo, TCG_REG_GUEST_BASE, false);
371
- } else {
372
- tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
373
- }
374
+
375
+ h.base = addrlo;
376
+ h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
377
+ h.index_scratch = false;
378
+ tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
379
#endif
380
}
53
381
54
--
382
--
55
2.34.1
383
2.34.1
diff view generated by jsdifflib
1
The function is not used outside of cpu-exec.c. Move it and
1
Interpret the variable argument placement in the caller. Shift some
2
its subroutines up in the file, before the first use.
2
code around slightly to share more between softmmu and user-only.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/exec-all.h | 3 -
7
tcg/loongarch64/tcg-target.c.inc | 100 +++++++++++++------------------
10
accel/tcg/cpu-exec.c | 122 ++++++++++++++++++++--------------------
8
1 file changed, 42 insertions(+), 58 deletions(-)
11
2 files changed, 61 insertions(+), 64 deletions(-)
12
9
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/exec-all.h
12
--- a/tcg/loongarch64/tcg-target.c.inc
16
+++ b/include/exec/exec-all.h
13
+++ b/tcg/loongarch64/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
15
}
16
}
17
18
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
19
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
20
+ MemOpIdx oi, TCGType data_type)
21
{
22
- TCGReg addr_regl;
23
- TCGReg data_regl;
24
- MemOpIdx oi;
25
- MemOp opc;
26
-#if defined(CONFIG_SOFTMMU)
27
+ MemOp opc = get_memop(oi);
28
+ TCGReg base, index;
29
+
30
+#ifdef CONFIG_SOFTMMU
31
tcg_insn_unit *label_ptr[1];
32
-#else
33
- unsigned a_bits;
34
-#endif
35
- TCGReg base;
36
37
- data_regl = *args++;
38
- addr_regl = *args++;
39
- oi = *args++;
40
- opc = get_memop(oi);
41
-
42
-#if defined(CONFIG_SOFTMMU)
43
- tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
44
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
45
- tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
46
- add_qemu_ldst_label(s, 1, oi, type,
47
- data_regl, addr_regl,
48
- s->code_ptr, label_ptr);
49
+ tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
50
+ index = TCG_REG_TMP2;
51
#else
52
- a_bits = get_alignment_bits(opc);
53
+ unsigned a_bits = get_alignment_bits(opc);
54
if (a_bits) {
55
- tcg_out_test_alignment(s, true, addr_regl, a_bits);
56
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
57
}
58
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
59
- TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
60
- tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
61
+ index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
62
+#endif
63
+
64
+ base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
65
+ tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type);
66
+
67
+#ifdef CONFIG_SOFTMMU
68
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
69
+ s->code_ptr, label_ptr);
18
#endif
70
#endif
19
void tb_flush(CPUState *cpu);
20
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
21
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
22
- target_ulong cs_base, uint32_t flags,
23
- uint32_t cflags);
24
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
25
26
/* GETPC is the true target of the return instruction that we'll execute. */
27
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/cpu-exec.c
30
+++ b/accel/tcg/cpu-exec.c
31
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu)
32
return cflags;
33
}
71
}
34
72
35
+struct tb_desc {
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
36
+ target_ulong pc;
74
}
37
+ target_ulong cs_base;
75
}
38
+ CPUArchState *env;
76
39
+ tb_page_addr_t phys_page1;
77
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
40
+ uint32_t flags;
78
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
41
+ uint32_t cflags;
79
+ MemOpIdx oi, TCGType data_type)
42
+ uint32_t trace_vcpu_dstate;
80
{
43
+};
81
- TCGReg addr_regl;
82
- TCGReg data_regl;
83
- MemOpIdx oi;
84
- MemOp opc;
85
-#if defined(CONFIG_SOFTMMU)
86
+ MemOp opc = get_memop(oi);
87
+ TCGReg base, index;
44
+
88
+
45
+static bool tb_lookup_cmp(const void *p, const void *d)
89
+#ifdef CONFIG_SOFTMMU
46
+{
90
tcg_insn_unit *label_ptr[1];
47
+ const TranslationBlock *tb = p;
91
-#else
48
+ const struct tb_desc *desc = d;
92
- unsigned a_bits;
93
-#endif
94
- TCGReg base;
95
96
- data_regl = *args++;
97
- addr_regl = *args++;
98
- oi = *args++;
99
- opc = get_memop(oi);
100
-
101
-#if defined(CONFIG_SOFTMMU)
102
- tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
103
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
104
- tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
105
- add_qemu_ldst_label(s, 0, oi, type,
106
- data_regl, addr_regl,
107
- s->code_ptr, label_ptr);
108
+ tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
109
+ index = TCG_REG_TMP2;
110
#else
111
- a_bits = get_alignment_bits(opc);
112
+ unsigned a_bits = get_alignment_bits(opc);
113
if (a_bits) {
114
- tcg_out_test_alignment(s, false, addr_regl, a_bits);
115
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
116
}
117
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
118
- TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
119
- tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
120
+ index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
121
+#endif
49
+
122
+
50
+ if (tb->pc == desc->pc &&
123
+ base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
51
+ tb->page_addr[0] == desc->phys_page1 &&
124
+ tcg_out_qemu_st_indexed(s, data_reg, base, index, opc);
52
+ tb->cs_base == desc->cs_base &&
53
+ tb->flags == desc->flags &&
54
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
55
+ tb_cflags(tb) == desc->cflags) {
56
+ /* check next page if needed */
57
+ if (tb->page_addr[1] == -1) {
58
+ return true;
59
+ } else {
60
+ tb_page_addr_t phys_page2;
61
+ target_ulong virt_page2;
62
+
125
+
63
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
126
+#ifdef CONFIG_SOFTMMU
64
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
127
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
65
+ if (tb->page_addr[1] == phys_page2) {
128
+ s->code_ptr, label_ptr);
66
+ return true;
129
#endif
67
+ }
68
+ }
69
+ }
70
+ return false;
71
+}
72
+
73
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
74
+ target_ulong cs_base, uint32_t flags,
75
+ uint32_t cflags)
76
+{
77
+ tb_page_addr_t phys_pc;
78
+ struct tb_desc desc;
79
+ uint32_t h;
80
+
81
+ desc.env = cpu->env_ptr;
82
+ desc.cs_base = cs_base;
83
+ desc.flags = flags;
84
+ desc.cflags = cflags;
85
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
86
+ desc.pc = pc;
87
+ phys_pc = get_page_addr_code(desc.env, pc);
88
+ if (phys_pc == -1) {
89
+ return NULL;
90
+ }
91
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
92
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
93
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
94
+}
95
+
96
/* Might cause an exception, so have a longjmp destination ready */
97
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
98
target_ulong cs_base,
99
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
100
end_exclusive();
101
}
130
}
102
131
103
-struct tb_desc {
132
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
104
- target_ulong pc;
133
break;
105
- target_ulong cs_base;
134
106
- CPUArchState *env;
135
case INDEX_op_qemu_ld_i32:
107
- tb_page_addr_t phys_page1;
136
- tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
108
- uint32_t flags;
137
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
109
- uint32_t cflags;
138
break;
110
- uint32_t trace_vcpu_dstate;
139
case INDEX_op_qemu_ld_i64:
111
-};
140
- tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
112
-
141
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
113
-static bool tb_lookup_cmp(const void *p, const void *d)
142
break;
114
-{
143
case INDEX_op_qemu_st_i32:
115
- const TranslationBlock *tb = p;
144
- tcg_out_qemu_st(s, args, TCG_TYPE_I32);
116
- const struct tb_desc *desc = d;
145
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
117
-
146
break;
118
- if (tb->pc == desc->pc &&
147
case INDEX_op_qemu_st_i64:
119
- tb->page_addr[0] == desc->phys_page1 &&
148
- tcg_out_qemu_st(s, args, TCG_TYPE_I64);
120
- tb->cs_base == desc->cs_base &&
149
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
121
- tb->flags == desc->flags &&
150
break;
122
- tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
151
123
- tb_cflags(tb) == desc->cflags) {
152
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
124
- /* check next page if needed */
125
- if (tb->page_addr[1] == -1) {
126
- return true;
127
- } else {
128
- tb_page_addr_t phys_page2;
129
- target_ulong virt_page2;
130
-
131
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
132
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
133
- if (tb->page_addr[1] == phys_page2) {
134
- return true;
135
- }
136
- }
137
- }
138
- return false;
139
-}
140
-
141
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
142
- target_ulong cs_base, uint32_t flags,
143
- uint32_t cflags)
144
-{
145
- tb_page_addr_t phys_pc;
146
- struct tb_desc desc;
147
- uint32_t h;
148
-
149
- desc.env = cpu->env_ptr;
150
- desc.cs_base = cs_base;
151
- desc.flags = flags;
152
- desc.cflags = cflags;
153
- desc.trace_vcpu_dstate = *cpu->trace_dstate;
154
- desc.pc = pc;
155
- phys_pc = get_page_addr_code(desc.env, pc);
156
- if (phys_pc == -1) {
157
- return NULL;
158
- }
159
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
160
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
161
- return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
162
-}
163
-
164
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
165
{
166
if (TCG_TARGET_HAS_direct_jump) {
167
--
153
--
168
2.34.1
154
2.34.1
155
156
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
Collect the 2 parts of the host address into a struct.
2
got to mark page zero executable. We had been special casing this
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
entirely within translate.
4
3
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
linux-user/elfload.c | 34 +++++++++++++++++++++++++++++++---
7
tcg/loongarch64/tcg-target.c.inc | 55 +++++++++++++++++---------------
10
1 file changed, 31 insertions(+), 3 deletions(-)
8
1 file changed, 30 insertions(+), 25 deletions(-)
11
9
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
12
--- a/tcg/loongarch64/tcg-target.c.inc
15
+++ b/linux-user/elfload.c
13
+++ b/tcg/loongarch64/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
14
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
17
regs->gr[31] = infop->entry;
15
return addr;
18
}
16
}
19
17
20
+#define LO_COMMPAGE 0
18
-static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
19
- TCGReg rk, MemOp opc, TCGType type)
20
+typedef struct {
21
+ TCGReg base;
22
+ TCGReg index;
23
+} HostAddress;
21
+
24
+
22
+static bool init_guest_commpage(void)
25
+static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
23
+{
26
+ TCGReg rd, HostAddress h)
24
+ void *want = g2h_untagged(LO_COMMPAGE);
27
{
25
+ void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
28
/* Byte swapping is left to middle-end expansion. */
26
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
29
tcg_debug_assert((opc & MO_BSWAP) == 0);
27
+
30
28
+ if (addr == MAP_FAILED) {
31
switch (opc & MO_SSIZE) {
29
+ perror("Allocating guest commpage");
32
case MO_UB:
30
+ exit(EXIT_FAILURE);
33
- tcg_out_opc_ldx_bu(s, rd, rj, rk);
31
+ }
34
+ tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
32
+ if (addr != want) {
35
break;
33
+ return false;
36
case MO_SB:
34
+ }
37
- tcg_out_opc_ldx_b(s, rd, rj, rk);
35
+
38
+ tcg_out_opc_ldx_b(s, rd, h.base, h.index);
36
+ /*
39
break;
37
+ * On Linux, page zero is normally marked execute only + gateway.
40
case MO_UW:
38
+ * Normal read or write is supposed to fail (thus PROT_NONE above),
41
- tcg_out_opc_ldx_hu(s, rd, rj, rk);
39
+ * but specific offsets have kernel code mapped to raise permissions
42
+ tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
40
+ * and implement syscalls. Here, simply mark the page executable.
43
break;
41
+ * Special case the entry points during translation (see do_page_zero).
44
case MO_SW:
42
+ */
45
- tcg_out_opc_ldx_h(s, rd, rj, rk);
43
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
46
+ tcg_out_opc_ldx_h(s, rd, h.base, h.index);
44
+ PAGE_EXEC | PAGE_VALID);
47
break;
45
+ return true;
48
case MO_UL:
46
+}
49
if (type == TCG_TYPE_I64) {
47
+
50
- tcg_out_opc_ldx_wu(s, rd, rj, rk);
48
#endif /* TARGET_HPPA */
51
+ tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
49
52
break;
50
#ifdef TARGET_XTENSA
53
}
51
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
54
/* fallthrough */
55
case MO_SL:
56
- tcg_out_opc_ldx_w(s, rd, rj, rk);
57
+ tcg_out_opc_ldx_w(s, rd, h.base, h.index);
58
break;
59
case MO_UQ:
60
- tcg_out_opc_ldx_d(s, rd, rj, rk);
61
+ tcg_out_opc_ldx_d(s, rd, h.base, h.index);
62
break;
63
default:
64
g_assert_not_reached();
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
66
MemOpIdx oi, TCGType data_type)
67
{
68
MemOp opc = get_memop(oi);
69
- TCGReg base, index;
70
+ HostAddress h;
71
72
#ifdef CONFIG_SOFTMMU
73
tcg_insn_unit *label_ptr[1];
74
75
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
76
- index = TCG_REG_TMP2;
77
+ h.index = TCG_REG_TMP2;
78
#else
79
unsigned a_bits = get_alignment_bits(opc);
80
if (a_bits) {
81
tcg_out_test_alignment(s, true, addr_reg, a_bits);
82
}
83
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
84
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
85
#endif
86
87
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
88
- tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type);
89
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
90
+ tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h);
91
92
#ifdef CONFIG_SOFTMMU
93
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
95
#endif
52
}
96
}
53
97
54
#if defined(HI_COMMPAGE)
98
-static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
55
-#define LO_COMMPAGE 0
99
- TCGReg rj, TCGReg rk, MemOp opc)
56
+#define LO_COMMPAGE -1
100
+static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
57
#elif defined(LO_COMMPAGE)
101
+ TCGReg rd, HostAddress h)
58
#define HI_COMMPAGE 0
102
{
103
/* Byte swapping is left to middle-end expansion. */
104
tcg_debug_assert((opc & MO_BSWAP) == 0);
105
106
switch (opc & MO_SIZE) {
107
case MO_8:
108
- tcg_out_opc_stx_b(s, data, rj, rk);
109
+ tcg_out_opc_stx_b(s, rd, h.base, h.index);
110
break;
111
case MO_16:
112
- tcg_out_opc_stx_h(s, data, rj, rk);
113
+ tcg_out_opc_stx_h(s, rd, h.base, h.index);
114
break;
115
case MO_32:
116
- tcg_out_opc_stx_w(s, data, rj, rk);
117
+ tcg_out_opc_stx_w(s, rd, h.base, h.index);
118
break;
119
case MO_64:
120
- tcg_out_opc_stx_d(s, data, rj, rk);
121
+ tcg_out_opc_stx_d(s, rd, h.base, h.index);
122
break;
123
default:
124
g_assert_not_reached();
125
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
126
MemOpIdx oi, TCGType data_type)
127
{
128
MemOp opc = get_memop(oi);
129
- TCGReg base, index;
130
+ HostAddress h;
131
132
#ifdef CONFIG_SOFTMMU
133
tcg_insn_unit *label_ptr[1];
134
135
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
136
- index = TCG_REG_TMP2;
137
+ h.index = TCG_REG_TMP2;
59
#else
138
#else
60
#define HI_COMMPAGE 0
139
unsigned a_bits = get_alignment_bits(opc);
61
-#define LO_COMMPAGE 0
140
if (a_bits) {
62
+#define LO_COMMPAGE -1
141
tcg_out_test_alignment(s, false, addr_reg, a_bits);
63
#define init_guest_commpage() true
142
}
143
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
144
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
64
#endif
145
#endif
65
146
66
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
147
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
67
} else {
148
- tcg_out_qemu_st_indexed(s, data_reg, base, index, opc);
68
offset = -(HI_COMMPAGE & -align);
149
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
69
}
150
+ tcg_out_qemu_st_indexed(s, opc, data_reg, h);
70
- } else if (LO_COMMPAGE != 0) {
151
71
+ } else if (LO_COMMPAGE != -1) {
152
#ifdef CONFIG_SOFTMMU
72
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
153
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
73
}
74
75
--
154
--
76
2.34.1
155
2.34.1
156
157
diff view generated by jsdifflib
1
The only user can easily use translator_lduw and
1
Interpret the variable argument placement in the caller. There are
2
adjust the type to signed during the return.
2
several places where we already convert back from bool to type.
3
Clean things up by using type throughout.
3
4
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/exec/translator.h | 1 -
8
tcg/mips/tcg-target.c.inc | 186 +++++++++++++++++++-------------------
10
target/i386/tcg/translate.c | 2 +-
9
1 file changed, 95 insertions(+), 91 deletions(-)
11
2 files changed, 1 insertion(+), 2 deletions(-)
12
10
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/translator.h
13
--- a/tcg/mips/tcg-target.c.inc
16
+++ b/include/exec/translator.h
14
+++ b/tcg/mips/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
15
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
18
16
#endif /* SOFTMMU */
19
#define FOR_EACH_TRANSLATOR_LD(F) \
17
20
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
18
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
21
- F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
19
- TCGReg base, MemOp opc, bool is_64)
22
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
20
+ TCGReg base, MemOp opc, TCGType type)
23
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
24
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
25
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/i386/tcg/translate.c
28
+++ b/target/i386/tcg/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
30
31
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
32
{
21
{
33
- return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
22
switch (opc & (MO_SSIZE | MO_BSWAP)) {
34
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
23
case MO_UB:
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
25
tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
26
break;
27
case MO_UL | MO_BSWAP:
28
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
29
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
30
if (use_mips32r2_instructions) {
31
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
32
tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
34
}
35
break;
36
case MO_UL:
37
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
38
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
39
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
40
break;
41
}
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
35
}
43
}
36
44
37
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
45
static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
46
- TCGReg base, MemOp opc, bool is_64)
47
+ TCGReg base, MemOp opc, TCGType type)
48
{
49
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
50
const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
52
case MO_UL:
53
tcg_out_opc_imm(s, lw1, lo, base, 0);
54
tcg_out_opc_imm(s, lw2, lo, base, 3);
55
- if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
56
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) {
57
tcg_out_ext32u(s, lo, lo);
58
}
59
break;
60
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
61
tcg_out_opc_imm(s, lw1, lo, base, 0);
62
tcg_out_opc_imm(s, lw2, lo, base, 3);
63
tcg_out_bswap32(s, lo, lo,
64
- TCG_TARGET_REG_BITS == 64 && is_64
65
+ TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64
66
? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
67
} else {
68
const tcg_insn_unit *subr =
69
- (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
70
+ (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn
71
? bswap32u_addr : bswap32_addr);
72
73
tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
74
tcg_out_bswap_subr(s, subr);
75
/* delay slot */
76
tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
77
- tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
78
+ tcg_out_mov(s, type, lo, TCG_TMP3);
79
}
80
break;
81
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
83
}
84
}
85
86
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
87
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
88
+ TCGReg addrlo, TCGReg addrhi,
89
+ MemOpIdx oi, TCGType data_type)
90
{
91
- TCGReg addr_regl, addr_regh __attribute__((unused));
92
- TCGReg data_regl, data_regh;
93
- MemOpIdx oi;
94
- MemOp opc;
95
-#if defined(CONFIG_SOFTMMU)
96
- tcg_insn_unit *label_ptr[2];
97
-#else
98
-#endif
99
- unsigned a_bits, s_bits;
100
- TCGReg base = TCG_REG_A0;
101
-
102
- data_regl = *args++;
103
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
104
- addr_regl = *args++;
105
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
106
- oi = *args++;
107
- opc = get_memop(oi);
108
- a_bits = get_alignment_bits(opc);
109
- s_bits = opc & MO_SIZE;
110
+ MemOp opc = get_memop(oi);
111
+ unsigned a_bits = get_alignment_bits(opc);
112
+ unsigned s_bits = opc & MO_SIZE;
113
+ TCGReg base;
114
115
/*
116
* R6 removes the left/right instructions but requires the
117
* system to support misaligned memory accesses.
118
*/
119
#if defined(CONFIG_SOFTMMU)
120
- tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
121
+ tcg_insn_unit *label_ptr[2];
122
+
123
+ base = TCG_REG_A0;
124
+ tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 1);
125
if (use_mips32r6_instructions || a_bits >= s_bits) {
126
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
127
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
128
} else {
129
- tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
130
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
131
}
132
- add_qemu_ldst_label(s, 1, oi,
133
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
134
- data_regl, data_regh, addr_regl, addr_regh,
135
- s->code_ptr, label_ptr);
136
+ add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
137
+ addrlo, addrhi, s->code_ptr, label_ptr);
138
#else
139
+ base = addrlo;
140
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
141
- tcg_out_ext32u(s, base, addr_regl);
142
- addr_regl = base;
143
+ tcg_out_ext32u(s, TCG_REG_A0, base);
144
+ base = TCG_REG_A0;
145
}
146
- if (guest_base == 0 && data_regl != addr_regl) {
147
- base = addr_regl;
148
- } else if (guest_base == (int16_t)guest_base) {
149
- tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
150
- } else {
151
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
152
+ if (guest_base) {
153
+ if (guest_base == (int16_t)guest_base) {
154
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
155
+ } else {
156
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
157
+ TCG_GUEST_BASE_REG);
158
+ }
159
+ base = TCG_REG_A0;
160
}
161
if (use_mips32r6_instructions) {
162
if (a_bits) {
163
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
164
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
165
}
166
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
167
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
168
} else {
169
if (a_bits && a_bits != s_bits) {
170
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
171
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
172
}
173
if (a_bits >= s_bits) {
174
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
175
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
176
} else {
177
- tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
178
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
179
}
180
}
181
#endif
182
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
183
g_assert_not_reached();
184
}
185
}
186
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
187
-{
188
- TCGReg addr_regl, addr_regh __attribute__((unused));
189
- TCGReg data_regl, data_regh;
190
- MemOpIdx oi;
191
- MemOp opc;
192
-#if defined(CONFIG_SOFTMMU)
193
- tcg_insn_unit *label_ptr[2];
194
-#endif
195
- unsigned a_bits, s_bits;
196
- TCGReg base = TCG_REG_A0;
197
198
- data_regl = *args++;
199
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
200
- addr_regl = *args++;
201
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
202
- oi = *args++;
203
- opc = get_memop(oi);
204
- a_bits = get_alignment_bits(opc);
205
- s_bits = opc & MO_SIZE;
206
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
207
+ TCGReg addrlo, TCGReg addrhi,
208
+ MemOpIdx oi, TCGType data_type)
209
+{
210
+ MemOp opc = get_memop(oi);
211
+ unsigned a_bits = get_alignment_bits(opc);
212
+ unsigned s_bits = opc & MO_SIZE;
213
+ TCGReg base;
214
215
/*
216
* R6 removes the left/right instructions but requires the
217
* system to support misaligned memory accesses.
218
*/
219
#if defined(CONFIG_SOFTMMU)
220
- tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
221
+ tcg_insn_unit *label_ptr[2];
222
+
223
+ base = TCG_REG_A0;
224
+ tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 0);
225
if (use_mips32r6_instructions || a_bits >= s_bits) {
226
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
227
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
228
} else {
229
- tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
230
+ tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
231
}
232
- add_qemu_ldst_label(s, 0, oi,
233
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
234
- data_regl, data_regh, addr_regl, addr_regh,
235
- s->code_ptr, label_ptr);
236
+ add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
237
+ addrlo, addrhi, s->code_ptr, label_ptr);
238
#else
239
+ base = addrlo;
240
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
241
- tcg_out_ext32u(s, base, addr_regl);
242
- addr_regl = base;
243
+ tcg_out_ext32u(s, TCG_REG_A0, base);
244
+ base = TCG_REG_A0;
245
}
246
- if (guest_base == 0) {
247
- base = addr_regl;
248
- } else if (guest_base == (int16_t)guest_base) {
249
- tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
250
- } else {
251
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
252
+ if (guest_base) {
253
+ if (guest_base == (int16_t)guest_base) {
254
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
255
+ } else {
256
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
257
+ TCG_GUEST_BASE_REG);
258
+ }
259
+ base = TCG_REG_A0;
260
}
261
if (use_mips32r6_instructions) {
262
if (a_bits) {
263
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
264
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
265
}
266
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
267
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
268
} else {
269
if (a_bits && a_bits != s_bits) {
270
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
271
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
272
}
273
if (a_bits >= s_bits) {
274
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
275
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
276
} else {
277
- tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
278
+ tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
279
}
280
}
281
#endif
282
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
283
break;
284
285
case INDEX_op_qemu_ld_i32:
286
- tcg_out_qemu_ld(s, args, false);
287
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
288
+ tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
289
+ } else {
290
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
291
+ }
292
break;
293
case INDEX_op_qemu_ld_i64:
294
- tcg_out_qemu_ld(s, args, true);
295
+ if (TCG_TARGET_REG_BITS == 64) {
296
+ tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
297
+ } else if (TARGET_LONG_BITS == 32) {
298
+ tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
299
+ } else {
300
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
301
+ }
302
break;
303
case INDEX_op_qemu_st_i32:
304
- tcg_out_qemu_st(s, args, false);
305
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
306
+ tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
307
+ } else {
308
+ tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
309
+ }
310
break;
311
case INDEX_op_qemu_st_i64:
312
- tcg_out_qemu_st(s, args, true);
313
+ if (TCG_TARGET_REG_BITS == 64) {
314
+ tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
315
+ } else if (TARGET_LONG_BITS == 32) {
316
+ tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
317
+ } else {
318
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
319
+ }
320
break;
321
322
case INDEX_op_add2_i32:
38
--
323
--
39
2.34.1
324
2.34.1
325
326
diff view generated by jsdifflib
1
Cache the translation from guest to host address, so we may
1
Interpret the variable argument placement in the caller. Pass data_type
2
use direct loads when we hit on the primary translation page.
2
instead of is64 -- there are several places where we already convert back
3
3
from bool to type. Clean things up by using type throughout.
4
Look up the second translation page only once, during translation.
4
5
This obviates another lookup of the second page within tb_gen_code
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
after translation.
6
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
7
8
Fixes a bug in that plugin_insn_append should be passed the bytes
9
in the original memory order, not bswapped by pieces.
10
11
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
12
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
8
---
15
include/exec/translator.h | 63 +++++++++++--------
9
tcg/ppc/tcg-target.c.inc | 110 +++++++++++++++++++++------------------
16
accel/tcg/translate-all.c | 23 +++----
10
1 file changed, 59 insertions(+), 51 deletions(-)
17
accel/tcg/translator.c | 126 +++++++++++++++++++++++++++++---------
11
18
3 files changed, 141 insertions(+), 71 deletions(-)
12
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
19
20
diff --git a/include/exec/translator.h b/include/exec/translator.h
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/translator.h
14
--- a/tcg/ppc/tcg-target.c.inc
23
+++ b/include/exec/translator.h
15
+++ b/tcg/ppc/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
16
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
25
* Architecture-agnostic disassembly context.
17
/* Record the context of a call to the out of line helper code for the slow
26
*/
18
path for a load or store, so that we can later generate the correct
27
typedef struct DisasContextBase {
19
helper code. */
28
- const TranslationBlock *tb;
20
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
29
+ TranslationBlock *tb;
21
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
30
target_ulong pc_first;
22
+ TCGType type, MemOpIdx oi,
31
target_ulong pc_next;
23
TCGReg datalo_reg, TCGReg datahi_reg,
32
DisasJumpType is_jmp;
24
TCGReg addrlo_reg, TCGReg addrhi_reg,
33
int num_insns;
25
tcg_insn_unit *raddr, tcg_insn_unit *lptr)
34
int max_insns;
26
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
35
bool singlestep_enabled;
27
TCGLabelQemuLdst *label = new_ldst_label(s);
36
-#ifdef CONFIG_USER_ONLY
28
37
- /*
29
label->is_ld = is_ld;
38
- * Guest address of the last byte of the last protected page.
30
+ label->type = type;
39
- *
31
label->oi = oi;
40
- * Pages containing the translated instructions are made non-writable in
32
label->datalo_reg = datalo_reg;
41
- * order to achieve consistency in case another thread is modifying the
33
label->datahi_reg = datahi_reg;
42
- * code while translate_insn() fetches the instruction bytes piecemeal.
34
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
43
- * Such writer threads are blocked on mmap_lock() in page_unprotect().
35
44
- */
36
#endif /* SOFTMMU */
45
- target_ulong page_protect_end;
37
38
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
39
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
40
+ TCGReg addrlo, TCGReg addrhi,
41
+ MemOpIdx oi, TCGType data_type)
42
{
43
- TCGReg datalo, datahi, addrlo, rbase;
44
- TCGReg addrhi __attribute__((unused));
45
- MemOpIdx oi;
46
- MemOp opc, s_bits;
47
+ MemOp opc = get_memop(oi);
48
+ MemOp s_bits = opc & MO_SIZE;
49
+ TCGReg rbase;
50
+
51
#ifdef CONFIG_SOFTMMU
52
- int mem_index;
53
tcg_insn_unit *label_ptr;
54
-#else
55
- unsigned a_bits;
46
-#endif
56
-#endif
47
+ void *host_addr[2];
57
48
} DisasContextBase;
58
- datalo = *args++;
49
59
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
50
/**
60
- addrlo = *args++;
51
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
61
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
52
* the relevant information at translation time.
62
- oi = *args++;
53
*/
63
- opc = get_memop(oi);
54
64
- s_bits = opc & MO_SIZE;
55
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
56
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
57
- abi_ptr pc, bool do_swap); \
58
- static inline type fullname(CPUArchState *env, \
59
- DisasContextBase *dcbase, abi_ptr pc) \
60
- { \
61
- return fullname ## _swap(env, dcbase, pc, false); \
62
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
63
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
64
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
65
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
66
+
67
+static inline uint16_t
68
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
69
+ abi_ptr pc, bool do_swap)
70
+{
71
+ uint16_t ret = translator_lduw(env, db, pc);
72
+ if (do_swap) {
73
+ ret = bswap16(ret);
74
}
75
+ return ret;
76
+}
77
78
-#define FOR_EACH_TRANSLATOR_LD(F) \
79
- F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
80
- F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
81
- F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
82
- F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
83
+static inline uint32_t
84
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
85
+ abi_ptr pc, bool do_swap)
86
+{
87
+ uint32_t ret = translator_ldl(env, db, pc);
88
+ if (do_swap) {
89
+ ret = bswap32(ret);
90
+ }
91
+ return ret;
92
+}
93
94
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
95
-
65
-
96
-#undef GEN_TRANSLATOR_LD
66
-#ifdef CONFIG_SOFTMMU
97
+static inline uint64_t
67
- mem_index = get_mmuidx(oi);
98
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
68
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
99
+ abi_ptr pc, bool do_swap)
69
+ addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
100
+{
70
101
+ uint64_t ret = translator_ldq_swap(env, db, pc, false);
71
/* Load a pointer into the current opcode w/conditional branch-link. */
102
+ if (do_swap) {
72
label_ptr = s->code_ptr;
103
+ ret = bswap64(ret);
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
104
+ }
74
105
+ return ret;
75
rbase = TCG_REG_R3;
106
+}
76
#else /* !CONFIG_SOFTMMU */
107
77
- a_bits = get_alignment_bits(opc);
108
/*
78
+ unsigned a_bits = get_alignment_bits(opc);
109
* Return whether addr is on the same page as where disassembly started.
79
if (a_bits) {
110
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
80
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
111
index XXXXXXX..XXXXXXX 100644
81
}
112
--- a/accel/tcg/translate-all.c
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
113
+++ b/accel/tcg/translate-all.c
83
}
114
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
84
115
{
85
#ifdef CONFIG_SOFTMMU
116
CPUArchState *env = cpu->env_ptr;
86
- add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
117
TranslationBlock *tb, *existing_tb;
87
- s->code_ptr, label_ptr);
118
- tb_page_addr_t phys_pc, phys_page2;
88
+ add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
119
- target_ulong virt_page2;
89
+ addrlo, addrhi, s->code_ptr, label_ptr);
120
+ tb_page_addr_t phys_pc;
121
tcg_insn_unit *gen_code_buf;
122
int gen_code_size, search_size, max_insns;
123
#ifdef CONFIG_PROFILER
124
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
125
tb->flags = flags;
126
tb->cflags = cflags;
127
tb->trace_vcpu_dstate = *cpu->trace_dstate;
128
+ tb->page_addr[0] = phys_pc;
129
+ tb->page_addr[1] = -1;
130
tcg_ctx->tb_cflags = cflags;
131
tb_overflow:
132
133
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
134
}
135
136
/*
137
- * If the TB is not associated with a physical RAM page then
138
- * it must be a temporary one-insn TB, and we have nothing to do
139
- * except fill in the page_addr[] fields. Return early before
140
- * attempting to link to other TBs or add to the lookup table.
141
+ * If the TB is not associated with a physical RAM page then it must be
142
+ * a temporary one-insn TB, and we have nothing left to do. Return early
143
+ * before attempting to link to other TBs or add to the lookup table.
144
*/
145
- if (phys_pc == -1) {
146
- tb->page_addr[0] = tb->page_addr[1] = -1;
147
+ if (tb->page_addr[0] == -1) {
148
return tb;
149
}
150
151
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
152
*/
153
tcg_tb_insert(tb);
154
155
- /* check next page if needed */
156
- virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
157
- phys_page2 = -1;
158
- if ((pc & TARGET_PAGE_MASK) != virt_page2) {
159
- phys_page2 = get_page_addr_code(env, virt_page2);
160
- }
161
/*
162
* No explicit memory barrier is required -- tb_link_page() makes the
163
* TB visible in a consistent state.
164
*/
165
- existing_tb = tb_link_page(tb, phys_pc, phys_page2);
166
+ existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
167
/* if the TB already exists, discard what we just translated */
168
if (unlikely(existing_tb != tb)) {
169
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
170
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/accel/tcg/translator.c
173
+++ b/accel/tcg/translator.c
174
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
175
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
176
}
177
178
-static inline void translator_page_protect(DisasContextBase *dcbase,
179
- target_ulong pc)
180
-{
181
-#ifdef CONFIG_USER_ONLY
182
- dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
183
- page_protect(pc);
184
-#endif
185
-}
186
-
187
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
188
target_ulong pc, void *host_pc,
189
const TranslatorOps *ops, DisasContextBase *db)
190
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
191
db->num_insns = 0;
192
db->max_insns = max_insns;
193
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
194
- translator_page_protect(db, db->pc_next);
195
+ db->host_addr[0] = host_pc;
196
+ db->host_addr[1] = NULL;
197
+
198
+#ifdef CONFIG_USER_ONLY
199
+ page_protect(pc);
200
+#endif
201
202
ops->init_disas_context(db, cpu);
203
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
204
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
205
#endif
90
#endif
206
}
91
}
207
92
208
-static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
93
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
209
- target_ulong pc, size_t len)
94
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
210
+static void *translator_access(CPUArchState *env, DisasContextBase *db,
95
+ TCGReg addrlo, TCGReg addrhi,
211
+ target_ulong pc, size_t len)
96
+ MemOpIdx oi, TCGType data_type)
212
{
97
{
213
-#ifdef CONFIG_USER_ONLY
98
- TCGReg datalo, datahi, addrlo, rbase;
214
- target_ulong end = pc + len - 1;
99
- TCGReg addrhi __attribute__((unused));
215
+ void *host;
100
- MemOpIdx oi;
216
+ target_ulong base, end;
101
- MemOp opc, s_bits;
217
+ TranslationBlock *tb;
102
+ MemOp opc = get_memop(oi);
218
103
+ MemOp s_bits = opc & MO_SIZE;
219
- if (end > dcbase->page_protect_end) {
104
+ TCGReg rbase;
220
- translator_page_protect(dcbase, end);
221
+ tb = db->tb;
222
+
105
+
223
+ /* Use slow path if first page is MMIO. */
106
#ifdef CONFIG_SOFTMMU
224
+ if (unlikely(tb->page_addr[0] == -1)) {
107
- int mem_index;
225
+ return NULL;
108
tcg_insn_unit *label_ptr;
226
}
109
-#else
227
+
110
- unsigned a_bits;
228
+ end = pc + len - 1;
111
-#endif
229
+ if (likely(is_same_page(db, end))) {
112
230
+ host = db->host_addr[0];
113
- datalo = *args++;
231
+ base = db->pc_first;
114
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
232
+ } else {
115
- addrlo = *args++;
233
+ host = db->host_addr[1];
116
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
234
+ base = TARGET_PAGE_ALIGN(db->pc_first);
117
- oi = *args++;
235
+ if (host == NULL) {
118
- opc = get_memop(oi);
236
+ tb->page_addr[1] =
119
- s_bits = opc & MO_SIZE;
237
+ get_page_addr_code_hostp(env, base, &db->host_addr[1]);
120
-
238
+#ifdef CONFIG_USER_ONLY
121
-#ifdef CONFIG_SOFTMMU
239
+ page_protect(end);
122
- mem_index = get_mmuidx(oi);
123
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
124
+ addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
125
126
/* Load a pointer into the current opcode w/conditional branch-link. */
127
label_ptr = s->code_ptr;
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
129
130
rbase = TCG_REG_R3;
131
#else /* !CONFIG_SOFTMMU */
132
- a_bits = get_alignment_bits(opc);
133
+ unsigned a_bits = get_alignment_bits(opc);
134
if (a_bits) {
135
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
136
}
137
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
138
}
139
140
#ifdef CONFIG_SOFTMMU
141
- add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
142
- s->code_ptr, label_ptr);
143
+ add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
144
+ addrlo, addrhi, s->code_ptr, label_ptr);
240
#endif
145
#endif
241
+ /* We cannot handle MMIO as second page. */
242
+ assert(tb->page_addr[1] != -1);
243
+ host = db->host_addr[1];
244
+ }
245
+
246
+ /* Use slow path when crossing pages. */
247
+ if (is_same_page(db, pc)) {
248
+ return NULL;
249
+ }
250
+ }
251
+
252
+ tcg_debug_assert(pc >= base);
253
+ return host + (pc - base);
254
}
146
}
255
147
256
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
257
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
149
break;
258
- abi_ptr pc, bool do_swap) \
150
259
- { \
151
case INDEX_op_qemu_ld_i32:
260
- translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
152
- tcg_out_qemu_ld(s, args, false);
261
- type ret = load_fn(env, pc); \
153
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
262
- if (do_swap) { \
154
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
263
- ret = swap_fn(ret); \
155
+ args[2], TCG_TYPE_I32);
264
- } \
156
+ } else {
265
- plugin_insn_append(pc, &ret, sizeof(ret)); \
157
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
266
- return ret; \
158
+ args[3], TCG_TYPE_I32);
267
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
159
+ }
268
+{
160
break;
269
+ uint8_t ret;
161
case INDEX_op_qemu_ld_i64:
270
+ void *p = translator_access(env, db, pc, sizeof(ret));
162
- tcg_out_qemu_ld(s, args, true);
271
+
163
+ if (TCG_TARGET_REG_BITS == 64) {
272
+ if (p) {
164
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
273
+ plugin_insn_append(pc, p, sizeof(ret));
165
+ args[2], TCG_TYPE_I64);
274
+ return ldub_p(p);
166
+ } else if (TARGET_LONG_BITS == 32) {
275
}
167
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
276
+ ret = cpu_ldub_code(env, pc);
168
+ args[3], TCG_TYPE_I64);
277
+ plugin_insn_append(pc, &ret, sizeof(ret));
169
+ } else {
278
+ return ret;
170
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
279
+}
171
+ args[4], TCG_TYPE_I64);
280
172
+ }
281
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
173
break;
282
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
174
case INDEX_op_qemu_st_i32:
283
+{
175
- tcg_out_qemu_st(s, args, false);
284
+ uint16_t ret, plug;
176
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
285
+ void *p = translator_access(env, db, pc, sizeof(ret));
177
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
286
178
+ args[2], TCG_TYPE_I32);
287
-#undef GEN_TRANSLATOR_LD
179
+ } else {
288
+ if (p) {
180
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
289
+ plugin_insn_append(pc, p, sizeof(ret));
181
+ args[3], TCG_TYPE_I32);
290
+ return lduw_p(p);
182
+ }
291
+ }
183
break;
292
+ ret = cpu_lduw_code(env, pc);
184
case INDEX_op_qemu_st_i64:
293
+ plug = tswap16(ret);
185
- tcg_out_qemu_st(s, args, true);
294
+ plugin_insn_append(pc, &plug, sizeof(ret));
186
+ if (TCG_TARGET_REG_BITS == 64) {
295
+ return ret;
187
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
296
+}
188
+ args[2], TCG_TYPE_I64);
297
+
189
+ } else if (TARGET_LONG_BITS == 32) {
298
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
190
+ tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
299
+{
191
+ args[3], TCG_TYPE_I64);
300
+ uint32_t ret, plug;
192
+ } else {
301
+ void *p = translator_access(env, db, pc, sizeof(ret));
193
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
302
+
194
+ args[4], TCG_TYPE_I64);
303
+ if (p) {
195
+ }
304
+ plugin_insn_append(pc, p, sizeof(ret));
196
break;
305
+ return ldl_p(p);
197
306
+ }
198
case INDEX_op_setcond_i32:
307
+ ret = cpu_ldl_code(env, pc);
308
+ plug = tswap32(ret);
309
+ plugin_insn_append(pc, &plug, sizeof(ret));
310
+ return ret;
311
+}
312
+
313
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
314
+{
315
+ uint64_t ret, plug;
316
+ void *p = translator_access(env, db, pc, sizeof(ret));
317
+
318
+ if (p) {
319
+ plugin_insn_append(pc, p, sizeof(ret));
320
+ return ldq_p(p);
321
+ }
322
+ ret = cpu_ldq_code(env, pc);
323
+ plug = tswap64(ret);
324
+ plugin_insn_append(pc, &plug, sizeof(ret));
325
+ return ret;
326
+}
327
--
199
--
328
2.34.1
200
2.34.1
201
202
diff view generated by jsdifflib
1
Map the stack executable if required by default or on demand.
1
Collect the parts of the host address into a struct.
2
Reorg tcg_out_qemu_{ld,st} to use it.
2
3
3
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/elf.h | 1 +
7
tcg/ppc/tcg-target.c.inc | 90 +++++++++++++++++++++-------------------
8
linux-user/qemu.h | 1 +
8
1 file changed, 47 insertions(+), 43 deletions(-)
9
linux-user/elfload.c | 19 ++++++++++++++++++-
10
3 files changed, 20 insertions(+), 1 deletion(-)
11
9
12
diff --git a/include/elf.h b/include/elf.h
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/include/elf.h
12
--- a/tcg/ppc/tcg-target.c.inc
15
+++ b/include/elf.h
13
+++ b/tcg/ppc/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ typedef int64_t Elf64_Sxword;
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
17
#define PT_LOPROC 0x70000000
18
#define PT_HIPROC 0x7fffffff
19
20
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
21
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
22
23
#define PT_MIPS_REGINFO 0x70000000
24
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/qemu.h
27
+++ b/linux-user/qemu.h
28
@@ -XXX,XX +XXX,XX @@ struct image_info {
29
uint32_t elf_flags;
30
int personality;
31
abi_ulong alignment;
32
+ bool exec_stack;
33
34
/* Generic semihosting knows about these pointers. */
35
abi_ulong arg_strings; /* strings for argv */
36
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/linux-user/elfload.c
39
+++ b/linux-user/elfload.c
40
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
41
#define ELF_ARCH EM_386
42
43
#define ELF_PLATFORM get_elf_platform()
44
+#define EXSTACK_DEFAULT true
45
46
static const char *get_elf_platform(void)
47
{
15
{
48
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
16
return tcg_out_fail_alignment(s, l);
49
17
}
50
#define ELF_ARCH EM_ARM
18
-
51
#define ELF_CLASS ELFCLASS32
19
#endif /* SOFTMMU */
52
+#define EXSTACK_DEFAULT true
20
53
21
+typedef struct {
54
static inline void init_thread(struct target_pt_regs *regs,
22
+ TCGReg base;
55
struct image_info *infop)
23
+ TCGReg index;
56
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
24
+} HostAddress;
57
#else
25
+
58
26
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
59
#define ELF_CLASS ELFCLASS32
27
TCGReg addrlo, TCGReg addrhi,
60
+#define EXSTACK_DEFAULT true
28
MemOpIdx oi, TCGType data_type)
61
29
{
30
MemOp opc = get_memop(oi);
31
MemOp s_bits = opc & MO_SIZE;
32
- TCGReg rbase;
33
+ HostAddress h;
34
35
#ifdef CONFIG_SOFTMMU
36
tcg_insn_unit *label_ptr;
37
38
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
39
+ h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
40
+ h.base = TCG_REG_R3;
41
42
/* Load a pointer into the current opcode w/conditional branch-link. */
43
label_ptr = s->code_ptr;
44
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
45
-
46
- rbase = TCG_REG_R3;
47
#else /* !CONFIG_SOFTMMU */
48
unsigned a_bits = get_alignment_bits(opc);
49
if (a_bits) {
50
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
51
}
52
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
53
+ h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
54
+ h.index = addrlo;
55
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
56
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
57
- addrlo = TCG_REG_TMP1;
58
+ h.index = TCG_REG_TMP1;
59
}
62
#endif
60
#endif
63
61
64
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
62
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
65
63
if (opc & MO_BSWAP) {
66
#define ELF_CLASS ELFCLASS64
64
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
67
#define ELF_ARCH EM_LOONGARCH
65
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
68
+#define EXSTACK_DEFAULT true
66
- tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
69
67
- } else if (rbase != 0) {
70
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
68
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
71
69
- tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
70
- tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
73
#define ELF_CLASS ELFCLASS32
71
- } else if (addrlo == datahi) {
72
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
73
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
74
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
75
+ tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
76
+ tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
77
+ } else if (h.base != 0) {
78
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
79
+ tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
80
+ tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
81
+ } else if (h.index == datahi) {
82
+ tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
83
+ tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
84
} else {
85
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
86
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
87
+ tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
88
+ tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
89
}
90
} else {
91
uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
92
if (!have_isa_2_06 && insn == LDBRX) {
93
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
94
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
95
- tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
96
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
97
+ tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
98
+ tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
99
tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
100
} else if (insn) {
101
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
102
+ tcg_out32(s, insn | TAB(datalo, h.base, h.index));
103
} else {
104
insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
105
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
106
+ tcg_out32(s, insn | TAB(datalo, h.base, h.index));
107
tcg_out_movext(s, TCG_TYPE_REG, datalo,
108
TCG_TYPE_REG, opc & MO_SSIZE, datalo);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
111
{
112
MemOp opc = get_memop(oi);
113
MemOp s_bits = opc & MO_SIZE;
114
- TCGReg rbase;
115
+ HostAddress h;
116
117
#ifdef CONFIG_SOFTMMU
118
tcg_insn_unit *label_ptr;
119
120
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
121
+ h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
122
+ h.base = TCG_REG_R3;
123
124
/* Load a pointer into the current opcode w/conditional branch-link. */
125
label_ptr = s->code_ptr;
126
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
127
-
128
- rbase = TCG_REG_R3;
129
#else /* !CONFIG_SOFTMMU */
130
unsigned a_bits = get_alignment_bits(opc);
131
if (a_bits) {
132
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
133
}
134
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
135
+ h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
136
+ h.index = addrlo;
137
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
138
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
139
- addrlo = TCG_REG_TMP1;
140
+ h.index = TCG_REG_TMP1;
141
}
74
#endif
142
#endif
75
#define ELF_ARCH EM_MIPS
143
76
+#define EXSTACK_DEFAULT true
144
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
77
145
if (opc & MO_BSWAP) {
78
#ifdef TARGET_ABI_MIPSN32
146
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
79
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
147
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
80
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
148
- tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
81
#define bswaptls(ptr) bswap32s(ptr)
149
- } else if (rbase != 0) {
82
#endif
150
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
83
151
- tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
84
+#ifndef EXSTACK_DEFAULT
152
- tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
85
+#define EXSTACK_DEFAULT false
153
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
86
+#endif
154
+ tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
87
+
155
+ tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
88
#include "elf.h"
156
+ } else if (h.base != 0) {
89
157
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
90
/* We must delay the following stanzas until after "elf.h". */
158
+ tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
159
+ tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
92
struct image_info *info)
160
} else {
93
{
161
- tcg_out32(s, STW | TAI(datahi, addrlo, 0));
94
abi_ulong size, error, guard;
162
- tcg_out32(s, STW | TAI(datalo, addrlo, 4));
95
+ int prot;
163
+ tcg_out32(s, STW | TAI(datahi, h.index, 0));
96
164
+ tcg_out32(s, STW | TAI(datalo, h.index, 4));
97
size = guest_stack_size;
165
}
98
if (size < STACK_LOWER_LIMIT) {
166
} else {
99
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
167
uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
100
guard = qemu_real_host_page_size();
168
if (!have_isa_2_06 && insn == STDBRX) {
101
}
169
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
102
170
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
103
- error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
171
+ tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
104
+ prot = PROT_READ | PROT_WRITE;
172
+ tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
105
+ if (info->exec_stack) {
173
tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
106
+ prot |= PROT_EXEC;
174
- tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
107
+ }
175
+ tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
108
+ error = target_mmap(0, size + guard, prot,
176
} else {
109
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
177
- tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
110
if (error == -1) {
178
+ tcg_out32(s, insn | SAB(datalo, h.base, h.index));
111
perror("mmap stack");
112
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
113
*/
114
loaddr = -1, hiaddr = 0;
115
info->alignment = 0;
116
+ info->exec_stack = EXSTACK_DEFAULT;
117
for (i = 0; i < ehdr->e_phnum; ++i) {
118
struct elf_phdr *eppnt = phdr + i;
119
if (eppnt->p_type == PT_LOAD) {
120
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
121
if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
122
goto exit_errmsg;
123
}
124
+ } else if (eppnt->p_type == PT_GNU_STACK) {
125
+ info->exec_stack = eppnt->p_flags & PF_X;
126
}
179
}
127
}
180
}
128
181
129
--
182
--
130
2.34.1
183
2.34.1
184
185
diff view generated by jsdifflib
New patch
1
The port currently does not support "oversize" guests, which
2
means riscv32 can only target 32-bit guests. We will soon be
3
building TCG once for all guests. This implies that we can
4
only support riscv64.
1
5
6
Since all Linux distributions target riscv64 not riscv32,
7
this is not much of a restriction and simplifies the code.
8
9
The brcond2 and setcond2 opcodes are exclusive to 32-bit hosts,
10
so we can and should remove the stubs.
11
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
tcg/riscv/tcg-target-con-set.h | 8 --
17
tcg/riscv/tcg-target.h | 22 ++--
18
tcg/riscv/tcg-target.c.inc | 232 +++++++++------------------------
19
3 files changed, 72 insertions(+), 190 deletions(-)
20
21
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/riscv/tcg-target-con-set.h
24
+++ b/tcg/riscv/tcg-target-con-set.h
25
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
26
C_O0_I2(LZ, L)
27
C_O0_I2(rZ, r)
28
C_O0_I2(rZ, rZ)
29
-C_O0_I3(LZ, L, L)
30
-C_O0_I3(LZ, LZ, L)
31
-C_O0_I4(LZ, LZ, L, L)
32
-C_O0_I4(rZ, rZ, rZ, rZ)
33
C_O1_I1(r, L)
34
C_O1_I1(r, r)
35
-C_O1_I2(r, L, L)
36
C_O1_I2(r, r, ri)
37
C_O1_I2(r, r, rI)
38
C_O1_I2(r, rZ, rN)
39
C_O1_I2(r, rZ, rZ)
40
-C_O1_I4(r, rZ, rZ, rZ, rZ)
41
-C_O2_I1(r, r, L)
42
-C_O2_I2(r, r, L, L)
43
C_O2_I4(r, r, rZ, rZ, rM, rM)
44
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/riscv/tcg-target.h
47
+++ b/tcg/riscv/tcg-target.h
48
@@ -XXX,XX +XXX,XX @@
49
#ifndef RISCV_TCG_TARGET_H
50
#define RISCV_TCG_TARGET_H
51
52
-#if __riscv_xlen == 32
53
-# define TCG_TARGET_REG_BITS 32
54
-#elif __riscv_xlen == 64
55
-# define TCG_TARGET_REG_BITS 64
56
+/*
57
+ * We don't support oversize guests.
58
+ * Since we will only build tcg once, this in turn requires a 64-bit host.
59
+ */
60
+#if __riscv_xlen != 64
61
+#error "unsupported code generation mode"
62
#endif
63
+#define TCG_TARGET_REG_BITS 64
64
65
#define TCG_TARGET_INSN_UNIT_SIZE 4
66
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20
67
@@ -XXX,XX +XXX,XX @@ typedef enum {
68
#define TCG_TARGET_STACK_ALIGN 16
69
#define TCG_TARGET_CALL_STACK_OFFSET 0
70
#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
71
-#if TCG_TARGET_REG_BITS == 32
72
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
73
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
74
-#else
75
#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
76
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
77
-#endif
78
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
79
80
/* optional instructions */
81
@@ -XXX,XX +XXX,XX @@ typedef enum {
82
#define TCG_TARGET_HAS_sub2_i32 1
83
#define TCG_TARGET_HAS_mulu2_i32 0
84
#define TCG_TARGET_HAS_muls2_i32 0
85
-#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32)
86
-#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32)
87
+#define TCG_TARGET_HAS_muluh_i32 0
88
+#define TCG_TARGET_HAS_mulsh_i32 0
89
#define TCG_TARGET_HAS_ext8s_i32 1
90
#define TCG_TARGET_HAS_ext16s_i32 1
91
#define TCG_TARGET_HAS_ext8u_i32 1
92
@@ -XXX,XX +XXX,XX @@ typedef enum {
93
#define TCG_TARGET_HAS_setcond2 1
94
#define TCG_TARGET_HAS_qemu_st8_i32 0
95
96
-#if TCG_TARGET_REG_BITS == 64
97
#define TCG_TARGET_HAS_movcond_i64 0
98
#define TCG_TARGET_HAS_div_i64 1
99
#define TCG_TARGET_HAS_rem_i64 1
100
@@ -XXX,XX +XXX,XX @@ typedef enum {
101
#define TCG_TARGET_HAS_muls2_i64 0
102
#define TCG_TARGET_HAS_muluh_i64 1
103
#define TCG_TARGET_HAS_mulsh_i64 1
104
-#endif
105
106
#define TCG_TARGET_DEFAULT_MO (0)
107
108
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/riscv/tcg-target.c.inc
111
+++ b/tcg/riscv/tcg-target.c.inc
112
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
113
#define SOFTMMU_RESERVE_REGS 0
114
#endif
115
116
-
117
-static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
118
-{
119
- if (TCG_TARGET_REG_BITS == 32) {
120
- return sextract32(val, pos, len);
121
- } else {
122
- return sextract64(val, pos, len);
123
- }
124
-}
125
+#define sextreg sextract64
126
127
/* test if a constant matches the constraint */
128
static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
129
@@ -XXX,XX +XXX,XX @@ typedef enum {
130
OPC_XOR = 0x4033,
131
OPC_XORI = 0x4013,
132
133
-#if TCG_TARGET_REG_BITS == 64
134
OPC_ADDIW = 0x1b,
135
OPC_ADDW = 0x3b,
136
OPC_DIVUW = 0x200503b,
137
@@ -XXX,XX +XXX,XX @@ typedef enum {
138
OPC_SRLIW = 0x501b,
139
OPC_SRLW = 0x503b,
140
OPC_SUBW = 0x4000003b,
141
-#else
142
- /* Simplify code throughout by defining aliases for RV32. */
143
- OPC_ADDIW = OPC_ADDI,
144
- OPC_ADDW = OPC_ADD,
145
- OPC_DIVUW = OPC_DIVU,
146
- OPC_DIVW = OPC_DIV,
147
- OPC_MULW = OPC_MUL,
148
- OPC_REMUW = OPC_REMU,
149
- OPC_REMW = OPC_REM,
150
- OPC_SLLIW = OPC_SLLI,
151
- OPC_SLLW = OPC_SLL,
152
- OPC_SRAIW = OPC_SRAI,
153
- OPC_SRAW = OPC_SRA,
154
- OPC_SRLIW = OPC_SRLI,
155
- OPC_SRLW = OPC_SRL,
156
- OPC_SUBW = OPC_SUB,
157
-#endif
158
159
OPC_FENCE = 0x0000000f,
160
OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
161
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
162
tcg_target_long lo, hi, tmp;
163
int shift, ret;
164
165
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
166
+ if (type == TCG_TYPE_I32) {
167
val = (int32_t)val;
168
}
169
170
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
171
}
172
173
hi = val - lo;
174
- if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) {
175
+ if (val == (int32_t)val) {
176
tcg_out_opc_upper(s, OPC_LUI, rd, hi);
177
if (lo != 0) {
178
tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
179
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
180
return;
181
}
182
183
- /* We can only be here if TCG_TARGET_REG_BITS != 32 */
184
tmp = tcg_pcrel_diff(s, (void *)val);
185
if (tmp == (int32_t)tmp) {
186
tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
188
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
189
TCGReg arg1, intptr_t arg2)
190
{
191
- bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
192
- tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2);
193
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
194
+ tcg_out_ldst(s, insn, arg, arg1, arg2);
195
}
196
197
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
198
TCGReg arg1, intptr_t arg2)
199
{
200
- bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
201
- tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2);
202
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
203
+ tcg_out_ldst(s, insn, arg, arg1, arg2);
204
}
205
206
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
207
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
208
}
209
}
210
211
-static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
212
- TCGReg bl, TCGReg bh, TCGLabel *l)
213
-{
214
- /* todo */
215
- g_assert_not_reached();
216
-}
217
-
218
-static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
219
- TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
220
-{
221
- /* todo */
222
- g_assert_not_reached();
223
-}
224
-
225
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
226
{
227
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
228
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
229
if (offset == sextreg(offset, 0, 20)) {
230
/* short jump: -2097150 to 2097152 */
231
tcg_out_opc_jump(s, OPC_JAL, link, offset);
232
- } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
233
+ } else if (offset == (int32_t)offset) {
234
/* long jump: -2147483646 to 2147483648 */
235
tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
236
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
237
ret = reloc_call(s->code_ptr - 2, arg);
238
tcg_debug_assert(ret == true);
239
- } else if (TCG_TARGET_REG_BITS == 64) {
240
+ } else {
241
/* far jump: 64-bit */
242
tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
243
tcg_target_long base = (tcg_target_long)arg - imm;
244
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
245
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
246
- } else {
247
- g_assert_not_reached();
248
}
249
}
250
251
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
252
#endif
253
};
254
255
-/* We don't support oversize guests */
256
-QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
257
-
258
/* We expect to use a 12-bit negative offset from ENV. */
259
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
260
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
261
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
262
tcg_debug_assert(ok);
263
}
264
265
-static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
266
- TCGReg addrh, MemOpIdx oi,
267
+static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi,
268
tcg_insn_unit **label_ptr, bool is_load)
269
{
270
MemOp opc = get_memop(oi);
271
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
272
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
273
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
274
275
- tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl,
276
+ tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr,
277
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
278
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
279
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
280
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
281
/* Clear the non-page, non-alignment bits from the address. */
282
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
283
if (compare_mask == sextreg(compare_mask, 0, 12)) {
284
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask);
285
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask);
286
} else {
287
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
288
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
289
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr);
290
}
291
292
/* Compare masked address with the TLB entry. */
293
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
294
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
295
296
/* TLB Hit - translate address using addend. */
297
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
298
- tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
299
- addrl = TCG_REG_TMP0;
300
+ if (TARGET_LONG_BITS == 32) {
301
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr);
302
+ addr = TCG_REG_TMP0;
303
}
304
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
305
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr);
306
return TCG_REG_TMP0;
307
}
308
309
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
310
- TCGType ext,
311
- TCGReg datalo, TCGReg datahi,
312
- TCGReg addrlo, TCGReg addrhi,
313
- void *raddr, tcg_insn_unit **label_ptr)
314
+ TCGType data_type, TCGReg data_reg,
315
+ TCGReg addr_reg, void *raddr,
316
+ tcg_insn_unit **label_ptr)
317
{
318
TCGLabelQemuLdst *label = new_ldst_label(s);
319
320
label->is_ld = is_ld;
321
label->oi = oi;
322
- label->type = ext;
323
- label->datalo_reg = datalo;
324
- label->datahi_reg = datahi;
325
- label->addrlo_reg = addrlo;
326
- label->addrhi_reg = addrhi;
327
+ label->type = data_type;
328
+ label->datalo_reg = data_reg;
329
+ label->addrlo_reg = addr_reg;
330
label->raddr = tcg_splitwx_to_rx(raddr);
331
label->label_ptr[0] = label_ptr[0];
332
}
333
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
334
TCGReg a2 = tcg_target_call_iarg_regs[2];
335
TCGReg a3 = tcg_target_call_iarg_regs[3];
336
337
- /* We don't support oversize guests */
338
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
339
- g_assert_not_reached();
340
- }
341
-
342
/* resolve label address */
343
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
344
return false;
345
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
346
TCGReg a3 = tcg_target_call_iarg_regs[3];
347
TCGReg a4 = tcg_target_call_iarg_regs[4];
348
349
- /* We don't support oversize guests */
350
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
351
- g_assert_not_reached();
352
- }
353
-
354
/* resolve label address */
355
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
356
return false;
357
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
358
359
#endif /* CONFIG_SOFTMMU */
360
361
-static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
362
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
363
TCGReg base, MemOp opc, bool is_64)
364
{
365
/* Byte swapping is left to middle-end expansion. */
366
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
367
368
switch (opc & (MO_SSIZE)) {
369
case MO_UB:
370
- tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
371
+ tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
372
break;
373
case MO_SB:
374
- tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
375
+ tcg_out_opc_imm(s, OPC_LB, val, base, 0);
376
break;
377
case MO_UW:
378
- tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
379
+ tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
380
break;
381
case MO_SW:
382
- tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
383
+ tcg_out_opc_imm(s, OPC_LH, val, base, 0);
384
break;
385
case MO_UL:
386
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
387
- tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
388
+ if (is_64) {
389
+ tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
390
break;
391
}
392
/* FALLTHRU */
393
case MO_SL:
394
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
395
+ tcg_out_opc_imm(s, OPC_LW, val, base, 0);
396
break;
397
case MO_UQ:
398
- /* Prefer to load from offset 0 first, but allow for overlap. */
399
- if (TCG_TARGET_REG_BITS == 64) {
400
- tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
401
- } else if (lo != base) {
402
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
403
- tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
404
- } else {
405
- tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
406
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
407
- }
408
+ tcg_out_opc_imm(s, OPC_LD, val, base, 0);
409
break;
410
default:
411
g_assert_not_reached();
412
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
413
414
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
415
{
416
- TCGReg addr_regl, addr_regh __attribute__((unused));
417
- TCGReg data_regl, data_regh;
418
+ TCGReg addr_reg, data_reg;
419
MemOpIdx oi;
420
MemOp opc;
421
#if defined(CONFIG_SOFTMMU)
422
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
423
#endif
424
TCGReg base;
425
426
- data_regl = *args++;
427
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
428
- addr_regl = *args++;
429
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
430
+ data_reg = *args++;
431
+ addr_reg = *args++;
432
oi = *args++;
433
opc = get_memop(oi);
434
435
#if defined(CONFIG_SOFTMMU)
436
- base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1);
437
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
438
- add_qemu_ldst_label(s, 1, oi,
439
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
440
- data_regl, data_regh, addr_regl, addr_regh,
441
- s->code_ptr, label_ptr);
442
+ base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
443
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
444
+ add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
445
+ data_reg, addr_reg, s->code_ptr, label_ptr);
446
#else
447
a_bits = get_alignment_bits(opc);
448
if (a_bits) {
449
- tcg_out_test_alignment(s, true, addr_regl, a_bits);
450
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
451
}
452
- base = addr_regl;
453
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
454
+ base = addr_reg;
455
+ if (TARGET_LONG_BITS == 32) {
456
tcg_out_ext32u(s, TCG_REG_TMP0, base);
457
base = TCG_REG_TMP0;
458
}
459
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
460
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
461
base = TCG_REG_TMP0;
462
}
463
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
464
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
465
#endif
466
}
467
468
-static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
469
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
470
TCGReg base, MemOp opc)
471
{
472
/* Byte swapping is left to middle-end expansion. */
473
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
474
475
switch (opc & (MO_SSIZE)) {
476
case MO_8:
477
- tcg_out_opc_store(s, OPC_SB, base, lo, 0);
478
+ tcg_out_opc_store(s, OPC_SB, base, val, 0);
479
break;
480
case MO_16:
481
- tcg_out_opc_store(s, OPC_SH, base, lo, 0);
482
+ tcg_out_opc_store(s, OPC_SH, base, val, 0);
483
break;
484
case MO_32:
485
- tcg_out_opc_store(s, OPC_SW, base, lo, 0);
486
+ tcg_out_opc_store(s, OPC_SW, base, val, 0);
487
break;
488
case MO_64:
489
- if (TCG_TARGET_REG_BITS == 64) {
490
- tcg_out_opc_store(s, OPC_SD, base, lo, 0);
491
- } else {
492
- tcg_out_opc_store(s, OPC_SW, base, lo, 0);
493
- tcg_out_opc_store(s, OPC_SW, base, hi, 4);
494
- }
495
+ tcg_out_opc_store(s, OPC_SD, base, val, 0);
496
break;
497
default:
498
g_assert_not_reached();
499
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
500
501
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
502
{
503
- TCGReg addr_regl, addr_regh __attribute__((unused));
504
- TCGReg data_regl, data_regh;
505
+ TCGReg addr_reg, data_reg;
506
MemOpIdx oi;
507
MemOp opc;
508
#if defined(CONFIG_SOFTMMU)
509
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
510
#endif
511
TCGReg base;
512
513
- data_regl = *args++;
514
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
515
- addr_regl = *args++;
516
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
517
+ data_reg = *args++;
518
+ addr_reg = *args++;
519
oi = *args++;
520
opc = get_memop(oi);
521
522
#if defined(CONFIG_SOFTMMU)
523
- base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0);
524
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
525
- add_qemu_ldst_label(s, 0, oi,
526
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
527
- data_regl, data_regh, addr_regl, addr_regh,
528
- s->code_ptr, label_ptr);
529
+ base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
530
+ tcg_out_qemu_st_direct(s, data_reg, base, opc);
531
+ add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
532
+ data_reg, addr_reg, s->code_ptr, label_ptr);
533
#else
534
a_bits = get_alignment_bits(opc);
535
if (a_bits) {
536
- tcg_out_test_alignment(s, false, addr_regl, a_bits);
537
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
538
}
539
- base = addr_regl;
540
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
541
+ base = addr_reg;
542
+ if (TARGET_LONG_BITS == 32) {
543
tcg_out_ext32u(s, TCG_REG_TMP0, base);
544
base = TCG_REG_TMP0;
545
}
546
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
547
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
548
base = TCG_REG_TMP0;
549
}
550
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
551
+ tcg_out_qemu_st_direct(s, data_reg, base, opc);
552
#endif
553
}
554
555
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
556
case INDEX_op_brcond_i64:
557
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
558
break;
559
- case INDEX_op_brcond2_i32:
560
- tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
561
- break;
562
563
case INDEX_op_setcond_i32:
564
case INDEX_op_setcond_i64:
565
tcg_out_setcond(s, args[3], a0, a1, a2);
566
break;
567
- case INDEX_op_setcond2_i32:
568
- tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
569
- break;
570
571
case INDEX_op_qemu_ld_i32:
572
tcg_out_qemu_ld(s, args, false);
573
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
574
case INDEX_op_sub2_i64:
575
return C_O2_I4(r, r, rZ, rZ, rM, rM);
576
577
- case INDEX_op_brcond2_i32:
578
- return C_O0_I4(rZ, rZ, rZ, rZ);
579
-
580
- case INDEX_op_setcond2_i32:
581
- return C_O1_I4(r, rZ, rZ, rZ, rZ);
582
-
583
case INDEX_op_qemu_ld_i32:
584
- return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
585
- ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
586
- case INDEX_op_qemu_st_i32:
587
- return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
588
- ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
589
case INDEX_op_qemu_ld_i64:
590
- return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
591
- : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
592
- : C_O2_I2(r, r, L, L));
593
+ return C_O1_I1(r, L);
594
+ case INDEX_op_qemu_st_i32:
595
case INDEX_op_qemu_st_i64:
596
- return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
597
- : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
598
- : C_O0_I4(LZ, LZ, L, L));
599
+ return C_O0_I2(LZ, L);
600
601
default:
602
g_assert_not_reached();
603
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
604
static void tcg_target_init(TCGContext *s)
605
{
606
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
607
- if (TCG_TARGET_REG_BITS == 64) {
608
- tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
609
- }
610
+ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
611
612
tcg_target_call_clobber_regs = -1u;
613
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
614
--
615
2.34.1
616
617
diff view generated by jsdifflib
1
Right now the translator stops right *after* the end of a page, which
1
Interpret the variable argument placement in the caller. Pass data_type
2
breaks reporting of fault locations when the last instruction of a
2
instead of is64 -- there are several places where we already convert back
3
multi-insn translation block crosses a page boundary.
3
from bool to type. Clean things up by using type throughout.
4
4
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1155
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
8
---
11
target/riscv/translate.c | 17 +++++--
9
tcg/riscv/tcg-target.c.inc | 66 ++++++++++++++------------------------
12
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++++++++++++
10
1 file changed, 24 insertions(+), 42 deletions(-)
13
tests/tcg/riscv64/Makefile.target | 1 +
14
3 files changed, 93 insertions(+), 4 deletions(-)
15
create mode 100644 tests/tcg/riscv64/noexec.c
16
11
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
12
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
14
--- a/tcg/riscv/tcg-target.c.inc
20
+++ b/target/riscv/translate.c
15
+++ b/tcg/riscv/tcg-target.c.inc
21
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
16
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
22
}
17
#endif /* CONFIG_SOFTMMU */
23
ctx->nftemp = 0;
18
24
19
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
25
+ /* Only the first insn within a TB is allowed to cross a page boundary. */
20
- TCGReg base, MemOp opc, bool is_64)
26
if (ctx->base.is_jmp == DISAS_NEXT) {
21
+ TCGReg base, MemOp opc, TCGType type)
27
- target_ulong page_start;
22
{
28
-
23
/* Byte swapping is left to middle-end expansion. */
29
- page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
24
tcg_debug_assert((opc & MO_BSWAP) == 0);
30
- if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
31
+ if (!is_same_page(&ctx->base, ctx->base.pc_next)) {
26
tcg_out_opc_imm(s, OPC_LH, val, base, 0);
32
ctx->base.is_jmp = DISAS_TOO_MANY;
27
break;
33
+ } else {
28
case MO_UL:
34
+ unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
29
- if (is_64) {
35
+
30
+ if (type == TCG_TYPE_I64) {
36
+ if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
31
tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
37
+ uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
32
break;
38
+ int len = insn_len(next_insn);
39
+
40
+ if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
41
+ ctx->base.is_jmp = DISAS_TOO_MANY;
42
+ }
43
+ }
44
}
33
}
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
45
}
35
}
46
}
36
}
47
diff --git a/tests/tcg/riscv64/noexec.c b/tests/tcg/riscv64/noexec.c
37
48
new file mode 100644
38
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
49
index XXXXXXX..XXXXXXX
39
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
50
--- /dev/null
40
+ MemOpIdx oi, TCGType data_type)
51
+++ b/tests/tcg/riscv64/noexec.c
41
{
52
@@ -XXX,XX +XXX,XX @@
42
- TCGReg addr_reg, data_reg;
53
+#include "../multiarch/noexec.c.inc"
43
- MemOpIdx oi;
44
- MemOp opc;
45
-#if defined(CONFIG_SOFTMMU)
46
- tcg_insn_unit *label_ptr[1];
47
-#else
48
- unsigned a_bits;
49
-#endif
50
+ MemOp opc = get_memop(oi);
51
TCGReg base;
52
53
- data_reg = *args++;
54
- addr_reg = *args++;
55
- oi = *args++;
56
- opc = get_memop(oi);
57
-
58
#if defined(CONFIG_SOFTMMU)
59
+ tcg_insn_unit *label_ptr[1];
54
+
60
+
55
+static void *arch_mcontext_pc(const mcontext_t *ctx)
61
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
56
+{
62
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
57
+ return (void *)ctx->__gregs[REG_PC];
63
- add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
58
+}
64
- data_reg, addr_reg, s->code_ptr, label_ptr);
65
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
66
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
67
+ s->code_ptr, label_ptr);
68
#else
69
- a_bits = get_alignment_bits(opc);
70
+ unsigned a_bits = get_alignment_bits(opc);
71
if (a_bits) {
72
tcg_out_test_alignment(s, true, addr_reg, a_bits);
73
}
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
75
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
76
base = TCG_REG_TMP0;
77
}
78
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
79
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
80
#endif
81
}
82
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
84
}
85
}
86
87
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
88
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
89
+ MemOpIdx oi, TCGType data_type)
90
{
91
- TCGReg addr_reg, data_reg;
92
- MemOpIdx oi;
93
- MemOp opc;
94
-#if defined(CONFIG_SOFTMMU)
95
- tcg_insn_unit *label_ptr[1];
96
-#else
97
- unsigned a_bits;
98
-#endif
99
+ MemOp opc = get_memop(oi);
100
TCGReg base;
101
102
- data_reg = *args++;
103
- addr_reg = *args++;
104
- oi = *args++;
105
- opc = get_memop(oi);
106
-
107
#if defined(CONFIG_SOFTMMU)
108
+ tcg_insn_unit *label_ptr[1];
59
+
109
+
60
+static int arch_mcontext_arg(const mcontext_t *ctx)
110
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
61
+{
111
tcg_out_qemu_st_direct(s, data_reg, base, opc);
62
+ return ctx->__gregs[REG_A0];
112
- add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
63
+}
113
- data_reg, addr_reg, s->code_ptr, label_ptr);
64
+
114
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
65
+static void arch_flush(void *p, int len)
115
+ s->code_ptr, label_ptr);
66
+{
116
#else
67
+ __builtin___clear_cache(p, p + len);
117
- a_bits = get_alignment_bits(opc);
68
+}
118
+ unsigned a_bits = get_alignment_bits(opc);
69
+
119
if (a_bits) {
70
+extern char noexec_1[];
120
tcg_out_test_alignment(s, false, addr_reg, a_bits);
71
+extern char noexec_2[];
121
}
72
+extern char noexec_end[];
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
73
+
123
break;
74
+asm(".option push\n"
124
75
+ ".option norvc\n"
125
case INDEX_op_qemu_ld_i32:
76
+ "noexec_1:\n"
126
- tcg_out_qemu_ld(s, args, false);
77
+ " li a0,1\n" /* a0 is 0 on entry, set 1. */
127
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
78
+ "noexec_2:\n"
128
break;
79
+ " li a0,2\n" /* a0 is 0/1; set 2. */
129
case INDEX_op_qemu_ld_i64:
80
+ " ret\n"
130
- tcg_out_qemu_ld(s, args, true);
81
+ "noexec_end:\n"
131
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
82
+ ".option pop");
132
break;
83
+
133
case INDEX_op_qemu_st_i32:
84
+int main(void)
134
- tcg_out_qemu_st(s, args, false);
85
+{
135
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
86
+ struct noexec_test noexec_tests[] = {
136
break;
87
+ {
137
case INDEX_op_qemu_st_i64:
88
+ .name = "fallthrough",
138
- tcg_out_qemu_st(s, args, true);
89
+ .test_code = noexec_1,
139
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
90
+ .test_len = noexec_end - noexec_1,
140
break;
91
+ .page_ofs = noexec_1 - noexec_2,
141
92
+ .entry_ofs = noexec_1 - noexec_2,
142
case INDEX_op_extrh_i64_i32:
93
+ .expected_si_ofs = 0,
94
+ .expected_pc_ofs = 0,
95
+ .expected_arg = 1,
96
+ },
97
+ {
98
+ .name = "jump",
99
+ .test_code = noexec_1,
100
+ .test_len = noexec_end - noexec_1,
101
+ .page_ofs = noexec_1 - noexec_2,
102
+ .entry_ofs = 0,
103
+ .expected_si_ofs = 0,
104
+ .expected_pc_ofs = 0,
105
+ .expected_arg = 0,
106
+ },
107
+ {
108
+ .name = "fallthrough [cross]",
109
+ .test_code = noexec_1,
110
+ .test_len = noexec_end - noexec_1,
111
+ .page_ofs = noexec_1 - noexec_2 - 2,
112
+ .entry_ofs = noexec_1 - noexec_2 - 2,
113
+ .expected_si_ofs = 0,
114
+ .expected_pc_ofs = -2,
115
+ .expected_arg = 1,
116
+ },
117
+ {
118
+ .name = "jump [cross]",
119
+ .test_code = noexec_1,
120
+ .test_len = noexec_end - noexec_1,
121
+ .page_ofs = noexec_1 - noexec_2 - 2,
122
+ .entry_ofs = -2,
123
+ .expected_si_ofs = 0,
124
+ .expected_pc_ofs = -2,
125
+ .expected_arg = 0,
126
+ },
127
+ };
128
+
129
+ return test_noexec(noexec_tests,
130
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
131
+}
132
diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tests/tcg/riscv64/Makefile.target
135
+++ b/tests/tcg/riscv64/Makefile.target
136
@@ -XXX,XX +XXX,XX @@
137
138
VPATH += $(SRC_PATH)/tests/tcg/riscv64
139
TESTS += test-div
140
+TESTS += noexec
141
--
143
--
142
2.34.1
144
2.34.1
145
146
diff view generated by jsdifflib
1
We cannot deliver two interrupts simultaneously;
1
We need to set this in TCGLabelQemuLdst, so plumb this
2
the first interrupt handler must execute first.
2
all the way through from tcg_out_op.
3
3
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
target/avr/helper.c | 9 +++------
7
tcg/s390x/tcg-target.c.inc | 22 ++++++++++++++--------
9
1 file changed, 3 insertions(+), 6 deletions(-)
8
1 file changed, 14 insertions(+), 8 deletions(-)
10
9
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
10
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
12
--- a/tcg/s390x/tcg-target.c.inc
14
+++ b/target/avr/helper.c
13
+++ b/tcg/s390x/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
16
15
}
17
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
16
17
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
18
- TCGReg data, TCGReg addr,
19
+ TCGType type, TCGReg data, TCGReg addr,
20
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
18
{
21
{
19
- bool ret = false;
22
TCGLabelQemuLdst *label = new_ldst_label(s);
20
AVRCPU *cpu = AVR_CPU(cs);
23
21
CPUAVRState *env = &cpu->env;
24
label->is_ld = is_ld;
22
25
label->oi = oi;
23
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
26
+ label->type = type;
24
avr_cpu_do_interrupt(cs);
27
label->datalo_reg = data;
25
28
label->addrlo_reg = addr;
26
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
29
label->raddr = tcg_splitwx_to_rx(raddr);
27
-
30
@@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
28
- ret = true;
31
#endif /* CONFIG_SOFTMMU */
29
+ return true;
32
30
}
33
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
31
}
34
- MemOpIdx oi)
32
if (interrupt_request & CPU_INTERRUPT_HARD) {
35
+ MemOpIdx oi, TCGType data_type)
33
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
36
{
34
if (!env->intsrc) {
37
MemOp opc = get_memop(oi);
35
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
38
#ifdef CONFIG_SOFTMMU
36
}
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
37
-
40
38
- ret = true;
41
tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
39
+ return true;
42
40
}
43
- add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
41
}
44
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
42
- return ret;
45
+ s->code_ptr, label_ptr);
43
+ return false;
46
#else
47
TCGReg index_reg;
48
tcg_target_long disp;
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
44
}
50
}
45
51
46
void avr_cpu_do_interrupt(CPUState *cs)
52
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
53
- MemOpIdx oi)
54
+ MemOpIdx oi, TCGType data_type)
55
{
56
MemOp opc = get_memop(oi);
57
#ifdef CONFIG_SOFTMMU
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59
60
tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
61
62
- add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
63
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
64
+ s->code_ptr, label_ptr);
65
#else
66
TCGReg index_reg;
67
tcg_target_long disp;
68
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
69
break;
70
71
case INDEX_op_qemu_ld_i32:
72
- /* ??? Technically we can use a non-extending instruction. */
73
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
74
+ break;
75
case INDEX_op_qemu_ld_i64:
76
- tcg_out_qemu_ld(s, args[0], args[1], args[2]);
77
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
78
break;
79
case INDEX_op_qemu_st_i32:
80
+ tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
81
+ break;
82
case INDEX_op_qemu_st_i64:
83
- tcg_out_qemu_st(s, args[0], args[1], args[2]);
84
+ tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
85
break;
86
87
case INDEX_op_ld16s_i64:
47
--
88
--
48
2.34.1
89
2.34.1
49
90
50
91
diff view generated by jsdifflib
1
While there are no target-specific nonfaulting probes,
1
Collect the 3 potential parts of the host address into a struct.
2
generic code may grow some uses at some point.
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
3
4
Note that the attrs argument was incorrect -- it should have
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
been MEMTXATTRS_UNSPECIFIED. Just use the simpler interface.
6
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
target/avr/helper.c | 46 ++++++++++++++++++++++++++++-----------------
7
tcg/s390x/tcg-target.c.inc | 109 ++++++++++++++++++++-----------------
11
1 file changed, 29 insertions(+), 17 deletions(-)
8
1 file changed, 60 insertions(+), 49 deletions(-)
12
9
13
diff --git a/target/avr/helper.c b/target/avr/helper.c
10
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/target/avr/helper.c
12
--- a/tcg/s390x/tcg-target.c.inc
16
+++ b/target/avr/helper.c
13
+++ b/tcg/s390x/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
18
MMUAccessType access_type, int mmu_idx,
15
tcg_out_call_int(s, dest);
19
bool probe, uintptr_t retaddr)
16
}
20
{
17
21
- int prot = 0;
18
+typedef struct {
22
- MemTxAttrs attrs = {};
19
+ TCGReg base;
23
+ int prot, page_size = TARGET_PAGE_SIZE;
20
+ TCGReg index;
24
uint32_t paddr;
21
+ int disp;
25
22
+} HostAddress;
26
address &= TARGET_PAGE_MASK;
23
+
27
24
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
28
if (mmu_idx == MMU_CODE_IDX) {
25
- TCGReg base, TCGReg index, int disp)
29
- /* access to code in flash */
26
+ HostAddress h)
30
+ /* Access to code in flash. */
27
{
31
paddr = OFFSET_CODE + address;
28
switch (opc & (MO_SSIZE | MO_BSWAP)) {
32
prot = PAGE_READ | PAGE_EXEC;
29
case MO_UB:
33
- if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) {
30
- tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
34
+ if (paddr >= OFFSET_DATA) {
31
+ tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
35
+ /*
32
break;
36
+ * This should not be possible via any architectural operations.
33
case MO_SB:
37
+ * There is certainly not an exception that we can deliver.
34
- tcg_out_insn(s, RXY, LGB, data, base, index, disp);
38
+ * Accept probing that might come from generic code.
35
+ tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
39
+ */
36
break;
40
+ if (probe) {
37
41
+ return false;
38
case MO_UW | MO_BSWAP:
42
+ }
39
/* swapped unsigned halfword load with upper bits zeroed */
43
error_report("execution left flash memory");
40
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
44
abort();
41
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
42
tcg_out_ext16u(s, data, data);
43
break;
44
case MO_UW:
45
- tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
46
+ tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
47
break;
48
49
case MO_SW | MO_BSWAP:
50
/* swapped sign-extended halfword load */
51
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
52
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
53
tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
54
break;
55
case MO_SW:
56
- tcg_out_insn(s, RXY, LGH, data, base, index, disp);
57
+ tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
58
break;
59
60
case MO_UL | MO_BSWAP:
61
/* swapped unsigned int load with upper bits zeroed */
62
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
63
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
64
tcg_out_ext32u(s, data, data);
65
break;
66
case MO_UL:
67
- tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
68
+ tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
69
break;
70
71
case MO_SL | MO_BSWAP:
72
/* swapped sign-extended int load */
73
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
74
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
75
tcg_out_ext32s(s, data, data);
76
break;
77
case MO_SL:
78
- tcg_out_insn(s, RXY, LGF, data, base, index, disp);
79
+ tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
80
break;
81
82
case MO_UQ | MO_BSWAP:
83
- tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
84
+ tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
85
break;
86
case MO_UQ:
87
- tcg_out_insn(s, RXY, LG, data, base, index, disp);
88
+ tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
89
break;
90
91
default:
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
93
}
94
95
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
96
- TCGReg base, TCGReg index, int disp)
97
+ HostAddress h)
98
{
99
switch (opc & (MO_SIZE | MO_BSWAP)) {
100
case MO_UB:
101
- if (disp >= 0 && disp < 0x1000) {
102
- tcg_out_insn(s, RX, STC, data, base, index, disp);
103
+ if (h.disp >= 0 && h.disp < 0x1000) {
104
+ tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
105
} else {
106
- tcg_out_insn(s, RXY, STCY, data, base, index, disp);
107
+ tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
45
}
108
}
46
- } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
109
break;
47
- /*
110
48
- * access to CPU registers, exit and rebuilt this TB to use full access
111
case MO_UW | MO_BSWAP:
49
- * incase it touches specially handled registers like SREG or SP
112
- tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
50
- */
113
+ tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
51
- AVRCPU *cpu = AVR_CPU(cs);
114
break;
52
- CPUAVRState *env = &cpu->env;
115
case MO_UW:
53
- env->fullacc = 1;
116
- if (disp >= 0 && disp < 0x1000) {
54
- cpu_loop_exit_restore(cs, retaddr);
117
- tcg_out_insn(s, RX, STH, data, base, index, disp);
118
+ if (h.disp >= 0 && h.disp < 0x1000) {
119
+ tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
120
} else {
121
- tcg_out_insn(s, RXY, STHY, data, base, index, disp);
122
+ tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
123
}
124
break;
125
126
case MO_UL | MO_BSWAP:
127
- tcg_out_insn(s, RXY, STRV, data, base, index, disp);
128
+ tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
129
break;
130
case MO_UL:
131
- if (disp >= 0 && disp < 0x1000) {
132
- tcg_out_insn(s, RX, ST, data, base, index, disp);
133
+ if (h.disp >= 0 && h.disp < 0x1000) {
134
+ tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
135
} else {
136
- tcg_out_insn(s, RXY, STY, data, base, index, disp);
137
+ tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
138
}
139
break;
140
141
case MO_UQ | MO_BSWAP:
142
- tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
143
+ tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
144
break;
145
case MO_UQ:
146
- tcg_out_insn(s, RXY, STG, data, base, index, disp);
147
+ tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
148
break;
149
150
default:
151
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
152
return tcg_out_fail_alignment(s, l);
153
}
154
155
-static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
156
- TCGReg *index_reg, tcg_target_long *disp)
157
+static HostAddress tcg_prepare_user_ldst(TCGContext *s, TCGReg addr_reg)
158
{
159
+ TCGReg index;
160
+ int disp;
161
+
162
if (TARGET_LONG_BITS == 32) {
163
- tcg_out_ext32u(s, TCG_TMP0, *addr_reg);
164
- *addr_reg = TCG_TMP0;
165
+ tcg_out_ext32u(s, TCG_TMP0, addr_reg);
166
+ addr_reg = TCG_TMP0;
167
}
168
if (guest_base < 0x80000) {
169
- *index_reg = TCG_REG_NONE;
170
- *disp = guest_base;
171
+ index = TCG_REG_NONE;
172
+ disp = guest_base;
55
} else {
173
} else {
56
- /* access to memory. nothing special */
174
- *index_reg = TCG_GUEST_BASE_REG;
57
+ /* Access to memory. */
175
- *disp = 0;
58
paddr = OFFSET_DATA + address;
176
+ index = TCG_GUEST_BASE_REG;
59
prot = PAGE_READ | PAGE_WRITE;
177
+ disp = 0;
60
+ if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
178
}
61
+ /*
179
+ return (HostAddress){ .base = addr_reg, .index = index, .disp = disp };
62
+ * Access to CPU registers, exit and rebuilt this TB to use
180
}
63
+ * full access in case it touches specially handled registers
181
#endif /* CONFIG_SOFTMMU */
64
+ * like SREG or SP. For probing, set page_size = 1, in order
182
65
+ * to force tlb_fill to be called for the next access.
183
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
66
+ */
184
MemOpIdx oi, TCGType data_type)
67
+ if (probe) {
185
{
68
+ page_size = 1;
186
MemOp opc = get_memop(oi);
69
+ } else {
187
+ HostAddress h;
70
+ AVRCPU *cpu = AVR_CPU(cs);
188
+
71
+ CPUAVRState *env = &cpu->env;
189
#ifdef CONFIG_SOFTMMU
72
+ env->fullacc = 1;
190
unsigned mem_index = get_mmuidx(oi);
73
+ cpu_loop_exit_restore(cs, retaddr);
191
tcg_insn_unit *label_ptr;
74
+ }
192
- TCGReg base_reg;
75
+ }
193
76
}
194
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
77
195
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
78
- tlb_set_page_with_attrs(cs, address, paddr, attrs, prot,
196
+ h.index = TCG_REG_R2;
79
- mmu_idx, TARGET_PAGE_SIZE);
197
+ h.disp = 0;
80
-
198
81
+ tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
199
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
82
return true;
200
label_ptr = s->code_ptr;
83
}
201
s->code_ptr += 1;
202
203
- tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
204
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
205
206
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
207
s->code_ptr, label_ptr);
208
#else
209
- TCGReg index_reg;
210
- tcg_target_long disp;
211
unsigned a_bits = get_alignment_bits(opc);
212
213
if (a_bits) {
214
tcg_out_test_alignment(s, true, addr_reg, a_bits);
215
}
216
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
217
- tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
218
+ h = tcg_prepare_user_ldst(s, addr_reg);
219
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
220
#endif
221
}
222
223
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
224
MemOpIdx oi, TCGType data_type)
225
{
226
MemOp opc = get_memop(oi);
227
+ HostAddress h;
228
+
229
#ifdef CONFIG_SOFTMMU
230
unsigned mem_index = get_mmuidx(oi);
231
tcg_insn_unit *label_ptr;
232
- TCGReg base_reg;
233
234
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
235
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
236
+ h.index = TCG_REG_R2;
237
+ h.disp = 0;
238
239
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
240
label_ptr = s->code_ptr;
241
s->code_ptr += 1;
242
243
- tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
244
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
245
246
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
247
s->code_ptr, label_ptr);
248
#else
249
- TCGReg index_reg;
250
- tcg_target_long disp;
251
unsigned a_bits = get_alignment_bits(opc);
252
253
if (a_bits) {
254
tcg_out_test_alignment(s, false, addr_reg, a_bits);
255
}
256
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
257
- tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
258
+ h = tcg_prepare_user_ldst(s, addr_reg);
259
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
260
#endif
261
}
84
262
85
--
263
--
86
2.34.1
264
2.34.1
87
265
88
266
diff view generated by jsdifflib
New patch
1
In tcg_canonicalize_memop, we remove MO_SIGN from MO_32 operations
2
with TCG_TYPE_I32. Thus this is never set. We already have an
3
identical test just above which does not include is_64
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/sparc64/tcg-target.c.inc | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
11
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/sparc64/tcg-target.c.inc
14
+++ b/tcg/sparc64/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
16
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
17
18
/* We let the helper sign-extend SB and SW, but leave SL for here. */
19
- if (is_64 && (memop & MO_SSIZE) == MO_SL) {
20
+ if ((memop & MO_SSIZE) == MO_SL) {
21
tcg_out_ext32s(s, data, TCG_REG_O0);
22
} else {
23
tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
24
--
25
2.34.1
26
27
diff view generated by jsdifflib
New patch
1
We need to set this in TCGLabelQemuLdst, so plumb this
2
all the way through from tcg_out_op.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/sparc64/tcg-target.c.inc | 6 +++---
8
1 file changed, 3 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/sparc64/tcg-target.c.inc
13
+++ b/tcg/sparc64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
15
};
16
17
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
18
- MemOpIdx oi, bool is_64)
19
+ MemOpIdx oi, TCGType data_type)
20
{
21
MemOp memop = get_memop(oi);
22
tcg_insn_unit *label_ptr;
23
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
24
break;
25
26
case INDEX_op_qemu_ld_i32:
27
- tcg_out_qemu_ld(s, a0, a1, a2, false);
28
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
29
break;
30
case INDEX_op_qemu_ld_i64:
31
- tcg_out_qemu_ld(s, a0, a1, a2, true);
32
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
33
break;
34
case INDEX_op_qemu_st_i32:
35
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
36
--
37
2.34.1
38
39
diff view generated by jsdifflib
1
There is no need to go through cc->tcg_ops when
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
we know what value that must have.
3
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
target/avr/helper.c | 5 ++---
4
tcg/tcg.c | 13 +++++++++++++
9
1 file changed, 2 insertions(+), 3 deletions(-)
5
tcg/tcg-ldst.c.inc | 14 --------------
6
2 files changed, 13 insertions(+), 14 deletions(-)
10
7
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
8
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
10
--- a/tcg/tcg.c
14
+++ b/target/avr/helper.c
11
+++ b/tcg/tcg.c
12
@@ -XXX,XX +XXX,XX @@ typedef struct QEMU_PACKED {
13
DebugFrameFDEHeader fde;
14
} DebugFrameHeader;
15
16
+typedef struct TCGLabelQemuLdst {
17
+ bool is_ld; /* qemu_ld: true, qemu_st: false */
18
+ MemOpIdx oi;
19
+ TCGType type; /* result type of a load */
20
+ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
21
+ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
22
+ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
23
+ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
24
+ const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
25
+ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
26
+ QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
27
+} TCGLabelQemuLdst;
28
+
29
static void tcg_register_jit_int(const void *buf, size_t size,
30
const void *debug_frame,
31
size_t debug_frame_size)
32
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg-ldst.c.inc
35
+++ b/tcg/tcg-ldst.c.inc
15
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@
16
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
37
* THE SOFTWARE.
17
{
38
*/
18
bool ret = false;
39
19
- CPUClass *cc = CPU_GET_CLASS(cs);
40
-typedef struct TCGLabelQemuLdst {
20
AVRCPU *cpu = AVR_CPU(cs);
41
- bool is_ld; /* qemu_ld: true, qemu_st: false */
21
CPUAVRState *env = &cpu->env;
42
- MemOpIdx oi;
22
43
- TCGType type; /* result type of a load */
23
if (interrupt_request & CPU_INTERRUPT_RESET) {
44
- TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
24
if (cpu_interrupts_enabled(env)) {
45
- TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
25
cs->exception_index = EXCP_RESET;
46
- TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
26
- cc->tcg_ops->do_interrupt(cs);
47
- TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
27
+ avr_cpu_do_interrupt(cs);
48
- const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
28
49
- tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
29
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
50
- QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
30
51
-} TCGLabelQemuLdst;
31
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
52
-
32
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
53
-
33
int index = ctz32(env->intsrc);
54
/*
34
cs->exception_index = EXCP_INT(index);
55
* Generate TB finalization at the end of block
35
- cc->tcg_ops->do_interrupt(cs);
56
*/
36
+ avr_cpu_do_interrupt(cs);
37
38
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
39
if (!env->intsrc) {
40
--
57
--
41
2.34.1
58
2.34.1
42
59
43
60
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
An inline function is safer than a macro, and REG_P
2
was rather too generic.
2
3
3
Introduce a function that checks whether a given address is on the same
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
page as where disassembly started. Having it improves readability of
5
the following patches.
6
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Message-Id: <20220811095534.241224-3-iii@linux.ibm.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
[rth: Make the DisasContextBase parameter const.]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/exec/translator.h | 10 ++++++++++
7
tcg/tcg-internal.h | 4 ----
15
1 file changed, 10 insertions(+)
8
tcg/tcg.c | 16 +++++++++++++---
9
2 files changed, 13 insertions(+), 7 deletions(-)
16
10
17
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/translator.h
13
--- a/tcg/tcg-internal.h
20
+++ b/include/exec/translator.h
14
+++ b/tcg/tcg-internal.h
21
@@ -XXX,XX +XXX,XX @@ FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
15
@@ -XXX,XX +XXX,XX @@ typedef struct TCGCallArgumentLoc {
22
16
unsigned tmp_subindex : 2;
23
#undef GEN_TRANSLATOR_LD
17
} TCGCallArgumentLoc;
24
18
25
+/*
19
-/* Avoid "unsigned < 0 is always false" Werror, when iarg_regs is empty. */
26
+ * Return whether addr is on the same page as where disassembly started.
20
-#define REG_P(L) \
27
+ * Translators can use this to enforce the rule that only single-insn
21
- ((int)(L)->arg_slot < (int)ARRAY_SIZE(tcg_target_call_iarg_regs))
28
+ * translation blocks are allowed to cross page boundaries.
22
-
29
+ */
23
typedef struct TCGHelperInfo {
30
+static inline bool is_same_page(const DisasContextBase *db, target_ulong addr)
24
void *func;
25
const char *name;
26
diff --git a/tcg/tcg.c b/tcg/tcg.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/tcg.c
29
+++ b/tcg/tcg.c
30
@@ -XXX,XX +XXX,XX @@ static void init_ffi_layouts(void)
31
}
32
#endif /* CONFIG_TCG_INTERPRETER */
33
34
+static inline bool arg_slot_reg_p(unsigned arg_slot)
31
+{
35
+{
32
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
36
+ /*
37
+ * Split the sizeof away from the comparison to avoid Werror from
38
+ * "unsigned < 0 is always false", when iarg_regs is empty.
39
+ */
40
+ unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
41
+ return arg_slot < nreg;
33
+}
42
+}
34
+
43
+
35
#endif /* EXEC__TRANSLATOR_H */
44
typedef struct TCGCumulativeArgs {
45
int arg_idx; /* tcg_gen_callN args[] */
46
int info_in_idx; /* TCGHelperInfo in[] */
47
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
48
case TCG_CALL_ARG_NORMAL:
49
case TCG_CALL_ARG_EXTEND_U:
50
case TCG_CALL_ARG_EXTEND_S:
51
- if (REG_P(loc)) {
52
+ if (arg_slot_reg_p(loc->arg_slot)) {
53
*la_temp_pref(ts) = 0;
54
break;
55
}
56
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
57
case TCG_CALL_ARG_NORMAL:
58
case TCG_CALL_ARG_EXTEND_U:
59
case TCG_CALL_ARG_EXTEND_S:
60
- if (REG_P(loc)) {
61
+ if (arg_slot_reg_p(loc->arg_slot)) {
62
tcg_regset_set_reg(*la_temp_pref(ts),
63
tcg_target_call_iarg_regs[loc->arg_slot]);
64
}
65
@@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
66
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
67
TCGTemp *ts, TCGRegSet *allocated_regs)
68
{
69
- if (REG_P(l)) {
70
+ if (arg_slot_reg_p(l->arg_slot)) {
71
TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
72
load_arg_reg(s, reg, ts, *allocated_regs);
73
tcg_regset_set_reg(*allocated_regs, reg);
36
--
74
--
37
2.34.1
75
2.34.1
76
77
diff view generated by jsdifflib
1
These will be useful in properly ending the TB.
1
Unify all computation of argument stack offset in one function.
2
This requires that we adjust ref_slot to be in the same units,
3
by adding max_reg_slots during init_call_layout.
2
4
3
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
target/riscv/translate.c | 10 +++++++++-
8
tcg/tcg.c | 29 +++++++++++++++++------------
9
1 file changed, 9 insertions(+), 1 deletion(-)
9
1 file changed, 17 insertions(+), 12 deletions(-)
10
10
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/translate.c
13
--- a/tcg/tcg.c
14
+++ b/target/riscv/translate.c
14
+++ b/tcg/tcg.c
15
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
15
@@ -XXX,XX +XXX,XX @@ static inline bool arg_slot_reg_p(unsigned arg_slot)
16
/* Include decoders for factored-out extensions */
16
return arg_slot < nreg;
17
#include "decode-XVentanaCondOps.c.inc"
17
}
18
18
19
+/* The specification allows for longer insns, but not supported by qemu. */
19
+static inline int arg_slot_stk_ofs(unsigned arg_slot)
20
+#define MAX_INSN_LEN 4
20
+{
21
+ unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
22
+ unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
21
+
23
+
22
+static inline int insn_len(uint16_t first_word)
24
+ tcg_debug_assert(stk_slot < max);
23
+{
25
+ return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
24
+ return (first_word & 3) == 3 ? 4 : 2;
25
+}
26
+}
26
+
27
+
27
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
28
typedef struct TCGCumulativeArgs {
29
int arg_idx; /* tcg_gen_callN args[] */
30
int info_in_idx; /* TCGHelperInfo in[] */
31
@@ -XXX,XX +XXX,XX @@ static void init_call_layout(TCGHelperInfo *info)
32
}
33
}
34
assert(ref_base + cum.ref_slot <= max_stk_slots);
35
+ ref_base += max_reg_slots;
36
37
if (ref_base != 0) {
38
for (int i = cum.info_in_idx - 1; i >= 0; --i) {
39
@@ -XXX,XX +XXX,XX @@ static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
40
}
41
}
42
43
-static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
44
+static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
45
TCGRegSet allocated_regs)
28
{
46
{
29
/*
47
/*
30
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
48
@@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
31
};
49
*/
32
50
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
33
/* Check for compressed insn */
51
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
34
- if (extract16(opcode, 0, 2) != 3) {
52
- TCG_TARGET_CALL_STACK_OFFSET +
35
+ if (insn_len(opcode) == 2) {
53
- stk_slot * sizeof(tcg_target_long));
36
if (!has_ext(ctx, RVC)) {
54
+ arg_slot_stk_ofs(arg_slot));
37
gen_exception_illegal(ctx);
55
}
38
} else {
56
57
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
58
@@ -XXX,XX +XXX,XX @@ static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
59
load_arg_reg(s, reg, ts, *allocated_regs);
60
tcg_regset_set_reg(*allocated_regs, reg);
61
} else {
62
- load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
63
- ts, *allocated_regs);
64
+ load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
65
}
66
}
67
68
-static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
69
+static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
70
intptr_t ref_off, TCGRegSet *allocated_regs)
71
{
72
TCGReg reg;
73
- int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
74
75
- if (stk_slot < 0) {
76
+ if (arg_slot_reg_p(arg_slot)) {
77
reg = tcg_target_call_iarg_regs[arg_slot];
78
tcg_reg_free(s, reg, *allocated_regs);
79
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
80
@@ -XXX,XX +XXX,XX @@ static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
81
*allocated_regs, 0, false);
82
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
83
tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
84
- TCG_TARGET_CALL_STACK_OFFSET
85
- + stk_slot * sizeof(tcg_target_long));
86
+ arg_slot_stk_ofs(arg_slot));
87
}
88
}
89
90
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
91
case TCG_CALL_ARG_BY_REF:
92
load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
93
load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
94
- TCG_TARGET_CALL_STACK_OFFSET
95
- + loc->ref_slot * sizeof(tcg_target_long),
96
+ arg_slot_stk_ofs(loc->ref_slot),
97
&allocated_regs);
98
break;
99
case TCG_CALL_ARG_BY_REF_N:
39
--
100
--
40
2.34.1
101
2.34.1
102
103
diff view generated by jsdifflib
1
The base qemu_ram_addr_from_host function is already in
1
While the old type was correct in the ideal sense, some ABIs require
2
softmmu/physmem.c; move the nofail version to be adjacent.
2
the argument to be zero-extended. Using uint32_t for all such values
3
is a decent compromise.
3
4
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/exec/cpu-common.h | 1 +
8
include/tcg/tcg-ldst.h | 10 +++++++---
10
accel/tcg/cputlb.c | 12 ------------
9
accel/tcg/cputlb.c | 6 +++---
11
softmmu/physmem.c | 12 ++++++++++++
10
2 files changed, 10 insertions(+), 6 deletions(-)
12
3 files changed, 13 insertions(+), 12 deletions(-)
13
11
14
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
12
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-common.h
14
--- a/include/tcg/tcg-ldst.h
17
+++ b/include/exec/cpu-common.h
15
+++ b/include/tcg/tcg-ldst.h
18
@@ -XXX,XX +XXX,XX @@ typedef uintptr_t ram_addr_t;
16
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
19
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
17
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
20
/* This should not be used by devices. */
18
MemOpIdx oi, uintptr_t retaddr);
21
ram_addr_t qemu_ram_addr_from_host(void *ptr);
19
22
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
20
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
23
RAMBlock *qemu_ram_block_by_name(const char *name);
21
+/*
24
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
22
+ * Value extended to at least uint32_t, so that some ABIs do not require
25
ram_addr_t *offset);
23
+ * zero-extension from uint8_t or uint16_t.
24
+ */
25
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
26
MemOpIdx oi, uintptr_t retaddr);
27
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
28
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
29
MemOpIdx oi, uintptr_t retaddr);
30
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
31
MemOpIdx oi, uintptr_t retaddr);
32
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
33
MemOpIdx oi, uintptr_t retaddr);
34
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
35
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
36
MemOpIdx oi, uintptr_t retaddr);
37
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
38
MemOpIdx oi, uintptr_t retaddr);
26
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
39
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
27
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/cputlb.c
41
--- a/accel/tcg/cputlb.c
29
+++ b/accel/tcg/cputlb.c
42
+++ b/accel/tcg/cputlb.c
30
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
43
@@ -XXX,XX +XXX,XX @@ full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
31
prot, mmu_idx, size);
44
store_helper(env, addr, val, oi, retaddr, MO_UB);
32
}
45
}
33
46
34
-static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
47
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
35
-{
48
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
36
- ram_addr_t ram_addr;
49
MemOpIdx oi, uintptr_t retaddr)
37
-
50
{
38
- ram_addr = qemu_ram_addr_from_host(ptr);
51
full_stb_mmu(env, addr, val, oi, retaddr);
39
- if (ram_addr == RAM_ADDR_INVALID) {
52
@@ -XXX,XX +XXX,XX @@ static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
40
- error_report("Bad ram pointer %p", ptr);
53
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
41
- abort();
42
- }
43
- return ram_addr;
44
-}
45
-
46
/*
47
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
48
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
49
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/softmmu/physmem.c
52
+++ b/softmmu/physmem.c
53
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
54
return block->offset + offset;
55
}
54
}
56
55
57
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
56
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
58
+{
57
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
59
+ ram_addr_t ram_addr;
58
MemOpIdx oi, uintptr_t retaddr)
60
+
59
{
61
+ ram_addr = qemu_ram_addr_from_host(ptr);
60
full_le_stw_mmu(env, addr, val, oi, retaddr);
62
+ if (ram_addr == RAM_ADDR_INVALID) {
61
@@ -XXX,XX +XXX,XX @@ static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
63
+ error_report("Bad ram pointer %p", ptr);
62
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
64
+ abort();
63
}
65
+ }
64
66
+ return ram_addr;
65
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
67
+}
66
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
68
+
67
MemOpIdx oi, uintptr_t retaddr)
69
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
68
{
70
MemTxAttrs attrs, void *buf, hwaddr len);
69
full_be_stw_mmu(env, addr, val, oi, retaddr);
71
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
72
--
70
--
73
2.34.1
71
2.34.1
72
73
diff view generated by jsdifflib