1
V2 replaces the tcg const temp overflow patch.
1
v2: Fix target/loongarch printf formats for vaddr
2
Include two more reviewed patches.
2
3
3
4
4
r~
5
r~
5
6
7
The following changes since commit 0e32462630687a18039464511bd0447ada5709c3:
8
9
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.0-pull-request' into staging (2021-01-22 10:35:55 +0000)
10
11
are available in the Git repository at:
12
13
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210124
14
15
for you to fetch changes up to ae30e86661b0f48562cd95918d37cbeec5d02262:
16
17
tcg: Restart code generation when we run out of temps (2021-01-24 08:03:27 -1000)
18
19
----------------------------------------------------------------
20
Fix tcg constant temp overflow.
21
Fix running during atomic single-step.
22
Partial support for apple silicon.
23
Cleanups for accel/tcg.
24
25
----------------------------------------------------------------
26
Douglas Crosher (1):
27
tcg: update the cpu running flag in cpu_exec_step_atomic
28
29
Philippe Mathieu-Daudé (4):
30
accel/tcg: Make cpu_gen_init() static
31
accel/tcg: Restrict tb_gen_code() from other accelerators
32
accel/tcg: Declare missing cpu_loop_exit*() stubs
33
accel/tcg: Restrict cpu_io_recompile() from other accelerators
34
35
Richard Henderson (4):
36
qemu/compiler: Split out qemu_build_not_reached_always
37
tcg: Optimize inline dup_const for MO_64
38
accel/tcg: Move tb_flush_jmp_cache() to cputlb.c
39
tcg: Restart code generation when we run out of temps
40
41
Roman Bolshakov (1):
42
tcg: Toggle page execution for Apple Silicon
43
44
accel/tcg/internal.h | 20 ++++++++++++++++++++
45
include/exec/exec-all.h | 11 -----------
46
include/qemu/compiler.h | 5 +++--
47
include/qemu/osdep.h | 28 ++++++++++++++++++++++++++++
48
include/tcg/tcg.h | 6 +++++-
49
accel/stubs/tcg-stub.c | 10 ++++++++++
50
accel/tcg/cpu-exec.c | 7 +++++++
51
accel/tcg/cputlb.c | 19 +++++++++++++++++++
52
accel/tcg/translate-all.c | 38 +++++++++++++++++++-------------------
53
tcg/tcg.c | 12 +++++++++---
54
10 files changed, 120 insertions(+), 36 deletions(-)
55
create mode 100644 accel/tcg/internal.h
56
diff view generated by jsdifflib
New patch
1
These should have been removed with the rest. There are
2
a couple of hosts which can emit guest_base into the
3
constant pool: aarch64, mips64, ppc64, riscv64.
1
4
5
Fixes: a417ef835058 ("tcg: Remove TCG_TARGET_NEED_LDST_LABELS and TCG_TARGET_NEED_POOL_LABELS")
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
---
9
tcg/tci/tcg-target.h | 1 -
10
tcg/tcg.c | 4 ----
11
2 files changed, 5 deletions(-)
12
13
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tci/tcg-target.h
16
+++ b/tcg/tci/tcg-target.h
17
@@ -XXX,XX +XXX,XX @@ typedef enum {
18
} TCGReg;
19
20
#define HAVE_TCG_QEMU_TB_EXEC
21
-#define TCG_TARGET_NEED_POOL_LABELS
22
23
#endif /* TCG_TARGET_H */
24
diff --git a/tcg/tcg.c b/tcg/tcg.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/tcg.c
27
+++ b/tcg/tcg.c
28
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(void)
29
tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
30
#endif
31
32
-#ifdef TCG_TARGET_NEED_POOL_LABELS
33
s->pool_labels = NULL;
34
-#endif
35
36
qemu_thread_jit_write();
37
/* Generate the prologue. */
38
tcg_target_qemu_prologue(s);
39
40
-#ifdef TCG_TARGET_NEED_POOL_LABELS
41
/* Allow the prologue to put e.g. guest_base into a pool entry. */
42
{
43
int result = tcg_out_pool_finalize(s);
44
tcg_debug_assert(result == 0);
45
}
46
-#endif
47
48
prologue_size = tcg_current_code_size(s);
49
perf_report_prologue(s->code_gen_ptr, prologue_size);
50
--
51
2.43.0
52
53
diff view generated by jsdifflib
New patch
1
1
This is now prohibited in configuration.
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/qemu/atomic.h | 18 +++--------------
7
include/tcg/oversized-guest.h | 23 ----------------------
8
accel/tcg/cputlb.c | 7 -------
9
accel/tcg/tcg-all.c | 9 ++++-----
10
target/arm/ptw.c | 34 ---------------------------------
11
target/riscv/cpu_helper.c | 13 +------------
12
docs/devel/multi-thread-tcg.rst | 1 -
13
7 files changed, 8 insertions(+), 97 deletions(-)
14
delete mode 100644 include/tcg/oversized-guest.h
15
16
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/atomic.h
19
+++ b/include/qemu/atomic.h
20
@@ -XXX,XX +XXX,XX @@
21
*/
22
#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
23
24
-/* Sanity check that the size of an atomic operation isn't "overly large".
25
+/*
26
+ * Sanity check that the size of an atomic operation isn't "overly large".
27
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
28
* want to use them because we ought not need them, and this lets us do a
29
* bit of sanity checking that other 32-bit hosts might build.
30
- *
31
- * That said, we have a problem on 64-bit ILP32 hosts in that in order to
32
- * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
33
- * We'd prefer not want to pull in everything else TCG related, so handle
34
- * those few cases by hand.
35
- *
36
- * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
37
- * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
38
- * n64 (LP64) ABIs are both detected using __mips64.
39
*/
40
-#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
41
-# define ATOMIC_REG_SIZE 8
42
-#else
43
-# define ATOMIC_REG_SIZE sizeof(void *)
44
-#endif
45
+#define ATOMIC_REG_SIZE sizeof(void *)
46
47
/* Weak atomic operations prevent the compiler moving other
48
* loads/stores past the atomic operation load/store. However there is
49
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
50
deleted file mode 100644
51
index XXXXXXX..XXXXXXX
52
--- a/include/tcg/oversized-guest.h
53
+++ /dev/null
54
@@ -XXX,XX +XXX,XX @@
55
-/* SPDX-License-Identifier: MIT */
56
-/*
57
- * Define TCG_OVERSIZED_GUEST
58
- * Copyright (c) 2008 Fabrice Bellard
59
- */
60
-
61
-#ifndef EXEC_TCG_OVERSIZED_GUEST_H
62
-#define EXEC_TCG_OVERSIZED_GUEST_H
63
-
64
-#include "tcg-target-reg-bits.h"
65
-#include "cpu-param.h"
66
-
67
-/*
68
- * Oversized TCG guests make things like MTTCG hard
69
- * as we can't use atomics for cputlb updates.
70
- */
71
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
72
-#define TCG_OVERSIZED_GUEST 1
73
-#else
74
-#define TCG_OVERSIZED_GUEST 0
75
-#endif
76
-
77
-#endif
78
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/cputlb.c
81
+++ b/accel/tcg/cputlb.c
82
@@ -XXX,XX +XXX,XX @@
83
#include "qemu/plugin-memory.h"
84
#endif
85
#include "tcg/tcg-ldst.h"
86
-#include "tcg/oversized-guest.h"
87
88
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
89
/* #define DEBUG_TLB */
90
@@ -XXX,XX +XXX,XX @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
91
return qatomic_read(ptr);
92
#else
93
const uint64_t *ptr = &entry->addr_idx[access_type];
94
-# if TCG_OVERSIZED_GUEST
95
- return *ptr;
96
-# else
97
/* ofs might correspond to .addr_write, so use qatomic_read */
98
return qatomic_read(ptr);
99
-# endif
100
#endif
101
}
102
103
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
104
uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
105
ptr_write += HOST_BIG_ENDIAN;
106
qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
107
-#elif TCG_OVERSIZED_GUEST
108
- tlb_entry->addr_write |= TLB_NOTDIRTY;
109
#else
110
qatomic_set(&tlb_entry->addr_write,
111
tlb_entry->addr_write | TLB_NOTDIRTY);
112
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/accel/tcg/tcg-all.c
115
+++ b/accel/tcg/tcg-all.c
116
@@ -XXX,XX +XXX,XX @@
117
#include "exec/replay-core.h"
118
#include "system/cpu-timers.h"
119
#include "tcg/startup.h"
120
-#include "tcg/oversized-guest.h"
121
#include "qapi/error.h"
122
#include "qemu/error-report.h"
123
#include "qemu/accel.h"
124
@@ -XXX,XX +XXX,XX @@
125
#include "hw/boards.h"
126
#endif
127
#include "internal-common.h"
128
+#include "cpu-param.h"
129
+
130
131
struct TCGState {
132
AccelState parent_obj;
133
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
134
135
static bool default_mttcg_enabled(void)
136
{
137
- if (icount_enabled() || TCG_OVERSIZED_GUEST) {
138
+ if (icount_enabled()) {
139
return false;
140
}
141
#ifdef TARGET_SUPPORTS_MTTCG
142
@@ -XXX,XX +XXX,XX @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
143
TCGState *s = TCG_STATE(obj);
144
145
if (strcmp(value, "multi") == 0) {
146
- if (TCG_OVERSIZED_GUEST) {
147
- error_setg(errp, "No MTTCG when guest word size > hosts");
148
- } else if (icount_enabled()) {
149
+ if (icount_enabled()) {
150
error_setg(errp, "No MTTCG when icount is enabled");
151
} else {
152
#ifndef TARGET_SUPPORTS_MTTCG
153
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/target/arm/ptw.c
156
+++ b/target/arm/ptw.c
157
@@ -XXX,XX +XXX,XX @@
158
#include "internals.h"
159
#include "cpu-features.h"
160
#include "idau.h"
161
-#ifdef CONFIG_TCG
162
-# include "tcg/oversized-guest.h"
163
-#endif
164
165
typedef struct S1Translate {
166
/*
167
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
168
ptw->out_rw = true;
169
}
170
171
-#ifdef CONFIG_ATOMIC64
172
if (ptw->out_be) {
173
old_val = cpu_to_be64(old_val);
174
new_val = cpu_to_be64(new_val);
175
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
176
cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
177
cur_val = le64_to_cpu(cur_val);
178
}
179
-#else
180
- /*
181
- * We can't support the full 64-bit atomic cmpxchg on the host.
182
- * Because this is only used for FEAT_HAFDBS, which is only for AA64,
183
- * we know that TCG_OVERSIZED_GUEST is set, which means that we are
184
- * running in round-robin mode and could only race with dma i/o.
185
- */
186
-#if !TCG_OVERSIZED_GUEST
187
-# error "Unexpected configuration"
188
-#endif
189
- bool locked = bql_locked();
190
- if (!locked) {
191
- bql_lock();
192
- }
193
- if (ptw->out_be) {
194
- cur_val = ldq_be_p(host);
195
- if (cur_val == old_val) {
196
- stq_be_p(host, new_val);
197
- }
198
- } else {
199
- cur_val = ldq_le_p(host);
200
- if (cur_val == old_val) {
201
- stq_le_p(host, new_val);
202
- }
203
- }
204
- if (!locked) {
205
- bql_unlock();
206
- }
207
-#endif
208
-
209
return cur_val;
210
#else
211
/* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
212
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/riscv/cpu_helper.c
215
+++ b/target/riscv/cpu_helper.c
216
@@ -XXX,XX +XXX,XX @@
217
#include "system/cpu-timers.h"
218
#include "cpu_bits.h"
219
#include "debug.h"
220
-#include "tcg/oversized-guest.h"
221
#include "pmp.h"
222
223
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
224
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
225
hwaddr pte_addr;
226
int i;
227
228
-#if !TCG_OVERSIZED_GUEST
229
-restart:
230
-#endif
231
+ restart:
232
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
233
target_ulong idx;
234
if (i == 0) {
235
@@ -XXX,XX +XXX,XX @@ restart:
236
false, MEMTXATTRS_UNSPECIFIED);
237
if (memory_region_is_ram(mr)) {
238
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
239
-#if TCG_OVERSIZED_GUEST
240
- /*
241
- * MTTCG is not enabled on oversized TCG guests so
242
- * page table updates do not need to be atomic
243
- */
244
- *pte_pa = pte = updated_pte;
245
-#else
246
target_ulong old_pte;
247
if (riscv_cpu_sxl(env) == MXL_RV32) {
248
old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte);
249
@@ -XXX,XX +XXX,XX @@ restart:
250
goto restart;
251
}
252
pte = updated_pte;
253
-#endif
254
} else {
255
/*
256
* Misconfigured PTE in ROM (AD bits are not preset) or
257
diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst
258
index XXXXXXX..XXXXXXX 100644
259
--- a/docs/devel/multi-thread-tcg.rst
260
+++ b/docs/devel/multi-thread-tcg.rst
261
@@ -XXX,XX +XXX,XX @@ if:
262
263
* forced by --accel tcg,thread=single
264
* enabling --icount mode
265
-* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST)
266
267
In the general case of running translated code there should be no
268
inter-vCPU dependencies and all vCPUs should be able to run at full
269
--
270
2.43.0
271
272
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg-op-ldst.c | 21 +++------------------
5
tcg/tcg.c | 4 +---
6
2 files changed, 4 insertions(+), 21 deletions(-)
1
7
8
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tcg-op-ldst.c
11
+++ b/tcg/tcg-op-ldst.c
12
@@ -XXX,XX +XXX,XX @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
13
static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
14
TCGTemp *addr, MemOpIdx oi)
15
{
16
- if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
17
- if (vh) {
18
- tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
19
- temp_arg(addr), oi);
20
- } else {
21
- tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
22
- }
23
+ if (vh) {
24
+ tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
25
} else {
26
- /* See TCGV_LOW/HIGH. */
27
- TCGTemp *al = addr + HOST_BIG_ENDIAN;
28
- TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
29
-
30
- if (vh) {
31
- tcg_gen_op5(opc, type, temp_arg(vl), temp_arg(vh),
32
- temp_arg(al), temp_arg(ah), oi);
33
- } else {
34
- tcg_gen_op4(opc, type, temp_arg(vl),
35
- temp_arg(al), temp_arg(ah), oi);
36
- }
37
+ tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
38
}
39
}
40
41
diff --git a/tcg/tcg.c b/tcg/tcg.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/tcg.c
44
+++ b/tcg/tcg.c
45
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
46
s->emit_before_op = NULL;
47
QSIMPLEQ_INIT(&s->labels);
48
49
- tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
50
- s->addr_type == TCG_TYPE_I64);
51
-
52
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
53
tcg_debug_assert(s->insn_start_words > 0);
54
}
55
56
--
57
2.43.0
58
59
diff view generated by jsdifflib
New patch
1
Since 64-on-32 is now unsupported, guest addresses always
2
fit in one host register. Drop the replication of opcodes.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-opc.h | 28 ++------
8
tcg/optimize.c | 21 ++----
9
tcg/tcg-op-ldst.c | 82 +++++----------------
10
tcg/tcg.c | 42 ++++-------
11
tcg/tci.c | 119 ++++++-------------------------
12
tcg/aarch64/tcg-target.c.inc | 36 ++++------
13
tcg/arm/tcg-target.c.inc | 40 +++--------
14
tcg/i386/tcg-target.c.inc | 69 ++++--------------
15
tcg/loongarch64/tcg-target.c.inc | 36 ++++------
16
tcg/mips/tcg-target.c.inc | 51 +++----------
17
tcg/ppc/tcg-target.c.inc | 68 ++++--------------
18
tcg/riscv/tcg-target.c.inc | 24 +++----
19
tcg/s390x/tcg-target.c.inc | 36 ++++------
20
tcg/sparc64/tcg-target.c.inc | 24 +++----
21
tcg/tci/tcg-target.c.inc | 60 ++++------------
22
15 files changed, 177 insertions(+), 559 deletions(-)
23
24
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/tcg/tcg-opc.h
27
+++ b/include/tcg/tcg-opc.h
28
@@ -XXX,XX +XXX,XX @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
29
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
30
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
31
32
-/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
33
-DEF(qemu_ld_a32_i32, 1, 1, 1,
34
+DEF(qemu_ld_i32, 1, 1, 1,
35
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
36
-DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
37
+DEF(qemu_st_i32, 0, 1 + 1, 1,
38
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
39
-DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
40
+DEF(qemu_ld_i64, DATA64_ARGS, 1, 1,
41
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
42
-DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
43
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
44
-
45
-DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
46
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
47
-DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
48
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
49
-DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
50
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
51
-DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
52
+DEF(qemu_st_i64, 0, DATA64_ARGS + 1, 1,
53
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
54
55
/* Only used by i386 to cope with stupid register constraints. */
56
-DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
57
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
58
-DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
59
+DEF(qemu_st8_i32, 0, 1 + 1, 1,
60
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
61
62
/* Only for 64-bit hosts at the moment. */
63
-DEF(qemu_ld_a32_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
64
-DEF(qemu_ld_a64_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
65
-DEF(qemu_st_a32_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
66
-DEF(qemu_st_a64_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
67
+DEF(qemu_ld_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
68
+DEF(qemu_st_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
69
70
/* Host vector support. */
71
72
diff --git a/tcg/optimize.c b/tcg/optimize.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/tcg/optimize.c
75
+++ b/tcg/optimize.c
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
CASE_OP_32_64_VEC(orc):
78
done = fold_orc(&ctx, op);
79
break;
80
- case INDEX_op_qemu_ld_a32_i32:
81
- case INDEX_op_qemu_ld_a64_i32:
82
+ case INDEX_op_qemu_ld_i32:
83
done = fold_qemu_ld_1reg(&ctx, op);
84
break;
85
- case INDEX_op_qemu_ld_a32_i64:
86
- case INDEX_op_qemu_ld_a64_i64:
87
+ case INDEX_op_qemu_ld_i64:
88
if (TCG_TARGET_REG_BITS == 64) {
89
done = fold_qemu_ld_1reg(&ctx, op);
90
break;
91
}
92
QEMU_FALLTHROUGH;
93
- case INDEX_op_qemu_ld_a32_i128:
94
- case INDEX_op_qemu_ld_a64_i128:
95
+ case INDEX_op_qemu_ld_i128:
96
done = fold_qemu_ld_2reg(&ctx, op);
97
break;
98
- case INDEX_op_qemu_st8_a32_i32:
99
- case INDEX_op_qemu_st8_a64_i32:
100
- case INDEX_op_qemu_st_a32_i32:
101
- case INDEX_op_qemu_st_a64_i32:
102
- case INDEX_op_qemu_st_a32_i64:
103
- case INDEX_op_qemu_st_a64_i64:
104
- case INDEX_op_qemu_st_a32_i128:
105
- case INDEX_op_qemu_st_a64_i128:
106
+ case INDEX_op_qemu_st8_i32:
107
+ case INDEX_op_qemu_st_i32:
108
+ case INDEX_op_qemu_st_i64:
109
+ case INDEX_op_qemu_st_i128:
110
done = fold_qemu_st(&ctx, op);
111
break;
112
CASE_OP_32_64(rem):
113
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/tcg/tcg-op-ldst.c
116
+++ b/tcg/tcg-op-ldst.c
117
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
118
MemOp orig_memop;
119
MemOpIdx orig_oi, oi;
120
TCGv_i64 copy_addr;
121
- TCGOpcode opc;
122
123
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
124
orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0);
125
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
126
}
127
128
copy_addr = plugin_maybe_preserve_addr(addr);
129
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
130
- opc = INDEX_op_qemu_ld_a32_i32;
131
- } else {
132
- opc = INDEX_op_qemu_ld_a64_i32;
133
- }
134
- gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
135
+ gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
136
+ tcgv_i32_temp(val), NULL, addr, oi);
137
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
138
QEMU_PLUGIN_MEM_R);
139
140
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
141
}
142
143
if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
144
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
145
- opc = INDEX_op_qemu_st8_a32_i32;
146
- } else {
147
- opc = INDEX_op_qemu_st8_a64_i32;
148
- }
149
+ opc = INDEX_op_qemu_st8_i32;
150
} else {
151
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
152
- opc = INDEX_op_qemu_st_a32_i32;
153
- } else {
154
- opc = INDEX_op_qemu_st_a64_i32;
155
- }
156
+ opc = INDEX_op_qemu_st_i32;
157
}
158
gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
159
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
160
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
161
MemOp orig_memop;
162
MemOpIdx orig_oi, oi;
163
TCGv_i64 copy_addr;
164
- TCGOpcode opc;
165
166
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
167
tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop);
168
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
169
}
170
171
copy_addr = plugin_maybe_preserve_addr(addr);
172
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
173
- opc = INDEX_op_qemu_ld_a32_i64;
174
- } else {
175
- opc = INDEX_op_qemu_ld_a64_i64;
176
- }
177
- gen_ldst_i64(opc, val, addr, oi);
178
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
179
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
180
QEMU_PLUGIN_MEM_R);
181
182
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
183
{
184
TCGv_i64 swap = NULL;
185
MemOpIdx orig_oi, oi;
186
- TCGOpcode opc;
187
188
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
189
tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop);
190
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
191
oi = make_memop_idx(memop, idx);
192
}
193
194
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
195
- opc = INDEX_op_qemu_st_a32_i64;
196
- } else {
197
- opc = INDEX_op_qemu_st_a64_i64;
198
- }
199
- gen_ldst_i64(opc, val, addr, oi);
200
+ gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
201
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
202
203
if (swap) {
204
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
205
{
206
MemOpIdx orig_oi;
207
TCGv_i64 ext_addr = NULL;
208
- TCGOpcode opc;
209
210
check_max_alignment(memop_alignment_bits(memop));
211
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
212
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
213
hi = TCGV128_HIGH(val);
214
}
215
216
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
217
- opc = INDEX_op_qemu_ld_a32_i128;
218
- } else {
219
- opc = INDEX_op_qemu_ld_a64_i128;
220
- }
221
- gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
222
+ gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
223
tcgv_i64_temp(hi), addr, oi);
224
225
if (need_bswap) {
226
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
227
canonicalize_memop_i128_as_i64(mop, memop);
228
need_bswap = (mop[0] ^ memop) & MO_BSWAP;
229
230
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
231
- opc = INDEX_op_qemu_ld_a32_i64;
232
- } else {
233
- opc = INDEX_op_qemu_ld_a64_i64;
234
- }
235
-
236
/*
237
* Since there are no global TCGv_i128, there is no visible state
238
* changed if the second load faults. Load directly into the two
239
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
240
y = TCGV128_LOW(val);
241
}
242
243
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
244
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
245
+ make_memop_idx(mop[0], idx));
246
247
if (need_bswap) {
248
tcg_gen_bswap64_i64(x, x);
249
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
250
addr_p8 = tcgv_i64_temp(t);
251
}
252
253
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
254
+ gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
255
+ make_memop_idx(mop[1], idx));
256
tcg_temp_free_internal(addr_p8);
257
258
if (need_bswap) {
259
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
260
{
261
MemOpIdx orig_oi;
262
TCGv_i64 ext_addr = NULL;
263
- TCGOpcode opc;
264
265
check_max_alignment(memop_alignment_bits(memop));
266
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
267
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
268
hi = TCGV128_HIGH(val);
269
}
270
271
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
272
- opc = INDEX_op_qemu_st_a32_i128;
273
- } else {
274
- opc = INDEX_op_qemu_st_a64_i128;
275
- }
276
- gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
277
- tcgv_i64_temp(hi), addr, oi);
278
+ gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
279
+ tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
280
281
if (need_bswap) {
282
tcg_temp_free_i64(lo);
283
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
284
285
canonicalize_memop_i128_as_i64(mop, memop);
286
287
- if (tcg_ctx->addr_type == TCG_TYPE_I32) {
288
- opc = INDEX_op_qemu_st_a32_i64;
289
- } else {
290
- opc = INDEX_op_qemu_st_a64_i64;
291
- }
292
-
293
if ((memop & MO_BSWAP) == MO_LE) {
294
x = TCGV128_LOW(val);
295
y = TCGV128_HIGH(val);
296
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
297
x = b;
298
}
299
300
- gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx));
301
+ gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
302
+ make_memop_idx(mop[0], idx));
303
304
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
305
TCGv_i32 t = tcg_temp_ebb_new_i32();
306
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
307
308
if (b) {
309
tcg_gen_bswap64_i64(b, y);
310
- gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx));
311
+ gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
312
+ make_memop_idx(mop[1], idx));
313
tcg_temp_free_i64(b);
314
} else {
315
- gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx));
316
+ gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
317
+ make_memop_idx(mop[1], idx));
318
}
319
tcg_temp_free_internal(addr_p8);
320
} else {
321
diff --git a/tcg/tcg.c b/tcg/tcg.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/tcg/tcg.c
324
+++ b/tcg/tcg.c
325
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
326
case INDEX_op_exit_tb:
327
case INDEX_op_goto_tb:
328
case INDEX_op_goto_ptr:
329
- case INDEX_op_qemu_ld_a32_i32:
330
- case INDEX_op_qemu_ld_a64_i32:
331
- case INDEX_op_qemu_st_a32_i32:
332
- case INDEX_op_qemu_st_a64_i32:
333
- case INDEX_op_qemu_ld_a32_i64:
334
- case INDEX_op_qemu_ld_a64_i64:
335
- case INDEX_op_qemu_st_a32_i64:
336
- case INDEX_op_qemu_st_a64_i64:
337
+ case INDEX_op_qemu_ld_i32:
338
+ case INDEX_op_qemu_st_i32:
339
+ case INDEX_op_qemu_ld_i64:
340
+ case INDEX_op_qemu_st_i64:
341
return true;
342
343
- case INDEX_op_qemu_st8_a32_i32:
344
- case INDEX_op_qemu_st8_a64_i32:
345
+ case INDEX_op_qemu_st8_i32:
346
return TCG_TARGET_HAS_qemu_st8_i32;
347
348
- case INDEX_op_qemu_ld_a32_i128:
349
- case INDEX_op_qemu_ld_a64_i128:
350
- case INDEX_op_qemu_st_a32_i128:
351
- case INDEX_op_qemu_st_a64_i128:
352
+ case INDEX_op_qemu_ld_i128:
353
+ case INDEX_op_qemu_st_i128:
354
return TCG_TARGET_HAS_qemu_ldst_i128;
355
356
case INDEX_op_mov_i32:
357
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
358
}
359
i = 1;
360
break;
361
- case INDEX_op_qemu_ld_a32_i32:
362
- case INDEX_op_qemu_ld_a64_i32:
363
- case INDEX_op_qemu_st_a32_i32:
364
- case INDEX_op_qemu_st_a64_i32:
365
- case INDEX_op_qemu_st8_a32_i32:
366
- case INDEX_op_qemu_st8_a64_i32:
367
- case INDEX_op_qemu_ld_a32_i64:
368
- case INDEX_op_qemu_ld_a64_i64:
369
- case INDEX_op_qemu_st_a32_i64:
370
- case INDEX_op_qemu_st_a64_i64:
371
- case INDEX_op_qemu_ld_a32_i128:
372
- case INDEX_op_qemu_ld_a64_i128:
373
- case INDEX_op_qemu_st_a32_i128:
374
- case INDEX_op_qemu_st_a64_i128:
375
+ case INDEX_op_qemu_ld_i32:
376
+ case INDEX_op_qemu_st_i32:
377
+ case INDEX_op_qemu_st8_i32:
378
+ case INDEX_op_qemu_ld_i64:
379
+ case INDEX_op_qemu_st_i64:
380
+ case INDEX_op_qemu_ld_i128:
381
+ case INDEX_op_qemu_st_i128:
382
{
383
const char *s_al, *s_op, *s_at;
384
MemOpIdx oi = op->args[k++];
385
diff --git a/tcg/tci.c b/tcg/tci.c
386
index XXXXXXX..XXXXXXX 100644
387
--- a/tcg/tci.c
388
+++ b/tcg/tci.c
389
@@ -XXX,XX +XXX,XX @@ static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
390
*i4 = extract32(insn, 26, 6);
391
}
392
393
-static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
394
- TCGReg *r2, TCGReg *r3, TCGReg *r4)
395
-{
396
- *r0 = extract32(insn, 8, 4);
397
- *r1 = extract32(insn, 12, 4);
398
- *r2 = extract32(insn, 16, 4);
399
- *r3 = extract32(insn, 20, 4);
400
- *r4 = extract32(insn, 24, 4);
401
-}
402
-
403
static void tci_args_rrrr(uint32_t insn,
404
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
405
{
406
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
407
tb_ptr = ptr;
408
break;
409
410
- case INDEX_op_qemu_ld_a32_i32:
411
+ case INDEX_op_qemu_ld_i32:
412
tci_args_rrm(insn, &r0, &r1, &oi);
413
- taddr = (uint32_t)regs[r1];
414
- goto do_ld_i32;
415
- case INDEX_op_qemu_ld_a64_i32:
416
- if (TCG_TARGET_REG_BITS == 64) {
417
- tci_args_rrm(insn, &r0, &r1, &oi);
418
- taddr = regs[r1];
419
- } else {
420
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
421
- taddr = tci_uint64(regs[r2], regs[r1]);
422
- oi = regs[r3];
423
- }
424
- do_ld_i32:
425
+ taddr = regs[r1];
426
regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
427
break;
428
429
- case INDEX_op_qemu_ld_a32_i64:
430
- if (TCG_TARGET_REG_BITS == 64) {
431
- tci_args_rrm(insn, &r0, &r1, &oi);
432
- taddr = (uint32_t)regs[r1];
433
- } else {
434
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
435
- taddr = (uint32_t)regs[r2];
436
- oi = regs[r3];
437
- }
438
- goto do_ld_i64;
439
- case INDEX_op_qemu_ld_a64_i64:
440
+ case INDEX_op_qemu_ld_i64:
441
if (TCG_TARGET_REG_BITS == 64) {
442
tci_args_rrm(insn, &r0, &r1, &oi);
443
taddr = regs[r1];
444
} else {
445
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
446
- taddr = tci_uint64(regs[r3], regs[r2]);
447
- oi = regs[r4];
448
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
449
+ taddr = regs[r2];
450
+ oi = regs[r3];
451
}
452
- do_ld_i64:
453
tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
454
if (TCG_TARGET_REG_BITS == 32) {
455
tci_write_reg64(regs, r1, r0, tmp64);
456
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
457
}
458
break;
459
460
- case INDEX_op_qemu_st_a32_i32:
461
+ case INDEX_op_qemu_st_i32:
462
tci_args_rrm(insn, &r0, &r1, &oi);
463
- taddr = (uint32_t)regs[r1];
464
- goto do_st_i32;
465
- case INDEX_op_qemu_st_a64_i32:
466
- if (TCG_TARGET_REG_BITS == 64) {
467
- tci_args_rrm(insn, &r0, &r1, &oi);
468
- taddr = regs[r1];
469
- } else {
470
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
471
- taddr = tci_uint64(regs[r2], regs[r1]);
472
- oi = regs[r3];
473
- }
474
- do_st_i32:
475
+ taddr = regs[r1];
476
tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
477
break;
478
479
- case INDEX_op_qemu_st_a32_i64:
480
- if (TCG_TARGET_REG_BITS == 64) {
481
- tci_args_rrm(insn, &r0, &r1, &oi);
482
- tmp64 = regs[r0];
483
- taddr = (uint32_t)regs[r1];
484
- } else {
485
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
486
- tmp64 = tci_uint64(regs[r1], regs[r0]);
487
- taddr = (uint32_t)regs[r2];
488
- oi = regs[r3];
489
- }
490
- goto do_st_i64;
491
- case INDEX_op_qemu_st_a64_i64:
492
+ case INDEX_op_qemu_st_i64:
493
if (TCG_TARGET_REG_BITS == 64) {
494
tci_args_rrm(insn, &r0, &r1, &oi);
495
tmp64 = regs[r0];
496
taddr = regs[r1];
497
} else {
498
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
499
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
500
tmp64 = tci_uint64(regs[r1], regs[r0]);
501
- taddr = tci_uint64(regs[r3], regs[r2]);
502
- oi = regs[r4];
503
+ taddr = regs[r2];
504
+ oi = regs[r3];
505
}
506
- do_st_i64:
507
tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
508
break;
509
510
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
511
str_r(r3), str_r(r4), str_r(r5));
512
break;
513
514
- case INDEX_op_qemu_ld_a32_i32:
515
- case INDEX_op_qemu_st_a32_i32:
516
- len = 1 + 1;
517
- goto do_qemu_ldst;
518
- case INDEX_op_qemu_ld_a32_i64:
519
- case INDEX_op_qemu_st_a32_i64:
520
- case INDEX_op_qemu_ld_a64_i32:
521
- case INDEX_op_qemu_st_a64_i32:
522
- len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
523
- goto do_qemu_ldst;
524
- case INDEX_op_qemu_ld_a64_i64:
525
- case INDEX_op_qemu_st_a64_i64:
526
- len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
527
- goto do_qemu_ldst;
528
- do_qemu_ldst:
529
- switch (len) {
530
- case 2:
531
- tci_args_rrm(insn, &r0, &r1, &oi);
532
- info->fprintf_func(info->stream, "%-12s %s, %s, %x",
533
- op_name, str_r(r0), str_r(r1), oi);
534
- break;
535
- case 3:
536
+ case INDEX_op_qemu_ld_i64:
537
+ case INDEX_op_qemu_st_i64:
538
+ if (TCG_TARGET_REG_BITS == 32) {
539
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
540
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
541
op_name, str_r(r0), str_r(r1),
542
str_r(r2), str_r(r3));
543
break;
544
- case 4:
545
- tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
546
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
547
- op_name, str_r(r0), str_r(r1),
548
- str_r(r2), str_r(r3), str_r(r4));
549
- break;
550
- default:
551
- g_assert_not_reached();
552
}
553
+ /* fall through */
554
+ case INDEX_op_qemu_ld_i32:
555
+ case INDEX_op_qemu_st_i32:
556
+ tci_args_rrm(insn, &r0, &r1, &oi);
557
+ info->fprintf_func(info->stream, "%-12s %s, %s, %x",
558
+ op_name, str_r(r0), str_r(r1), oi);
559
break;
560
561
case 0:
562
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
563
index XXXXXXX..XXXXXXX 100644
564
--- a/tcg/aarch64/tcg-target.c.inc
565
+++ b/tcg/aarch64/tcg-target.c.inc
566
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
567
tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
568
break;
569
570
- case INDEX_op_qemu_ld_a32_i32:
571
- case INDEX_op_qemu_ld_a64_i32:
572
- case INDEX_op_qemu_ld_a32_i64:
573
- case INDEX_op_qemu_ld_a64_i64:
574
+ case INDEX_op_qemu_ld_i32:
575
+ case INDEX_op_qemu_ld_i64:
576
tcg_out_qemu_ld(s, a0, a1, a2, ext);
577
break;
578
- case INDEX_op_qemu_st_a32_i32:
579
- case INDEX_op_qemu_st_a64_i32:
580
- case INDEX_op_qemu_st_a32_i64:
581
- case INDEX_op_qemu_st_a64_i64:
582
+ case INDEX_op_qemu_st_i32:
583
+ case INDEX_op_qemu_st_i64:
584
tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
585
break;
586
- case INDEX_op_qemu_ld_a32_i128:
587
- case INDEX_op_qemu_ld_a64_i128:
588
+ case INDEX_op_qemu_ld_i128:
589
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
590
break;
591
- case INDEX_op_qemu_st_a32_i128:
592
- case INDEX_op_qemu_st_a64_i128:
593
+ case INDEX_op_qemu_st_i128:
594
tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
595
break;
596
597
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
598
case INDEX_op_movcond_i64:
599
return C_O1_I4(r, r, rC, rZ, rZ);
600
601
- case INDEX_op_qemu_ld_a32_i32:
602
- case INDEX_op_qemu_ld_a64_i32:
603
- case INDEX_op_qemu_ld_a32_i64:
604
- case INDEX_op_qemu_ld_a64_i64:
605
+ case INDEX_op_qemu_ld_i32:
606
+ case INDEX_op_qemu_ld_i64:
607
return C_O1_I1(r, r);
608
- case INDEX_op_qemu_ld_a32_i128:
609
- case INDEX_op_qemu_ld_a64_i128:
610
+ case INDEX_op_qemu_ld_i128:
611
return C_O2_I1(r, r, r);
612
- case INDEX_op_qemu_st_a32_i32:
613
- case INDEX_op_qemu_st_a64_i32:
614
- case INDEX_op_qemu_st_a32_i64:
615
- case INDEX_op_qemu_st_a64_i64:
616
+ case INDEX_op_qemu_st_i32:
617
+ case INDEX_op_qemu_st_i64:
618
return C_O0_I2(rZ, r);
619
- case INDEX_op_qemu_st_a32_i128:
620
- case INDEX_op_qemu_st_a64_i128:
621
+ case INDEX_op_qemu_st_i128:
622
return C_O0_I3(rZ, rZ, r);
623
624
case INDEX_op_deposit_i32:
625
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
626
index XXXXXXX..XXXXXXX 100644
627
--- a/tcg/arm/tcg-target.c.inc
628
+++ b/tcg/arm/tcg-target.c.inc
629
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
630
ARITH_MOV, args[0], 0, 0);
631
break;
632
633
- case INDEX_op_qemu_ld_a32_i32:
634
+ case INDEX_op_qemu_ld_i32:
635
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
636
break;
637
- case INDEX_op_qemu_ld_a64_i32:
638
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
639
- args[3], TCG_TYPE_I32);
640
- break;
641
- case INDEX_op_qemu_ld_a32_i64:
642
+ case INDEX_op_qemu_ld_i64:
643
tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
644
args[3], TCG_TYPE_I64);
645
break;
646
- case INDEX_op_qemu_ld_a64_i64:
647
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
648
- args[4], TCG_TYPE_I64);
649
- break;
650
651
- case INDEX_op_qemu_st_a32_i32:
652
+ case INDEX_op_qemu_st_i32:
653
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
654
break;
655
- case INDEX_op_qemu_st_a64_i32:
656
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
657
- args[3], TCG_TYPE_I32);
658
- break;
659
- case INDEX_op_qemu_st_a32_i64:
660
+ case INDEX_op_qemu_st_i64:
661
tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
662
args[3], TCG_TYPE_I64);
663
break;
664
- case INDEX_op_qemu_st_a64_i64:
665
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
666
- args[4], TCG_TYPE_I64);
667
- break;
668
669
case INDEX_op_bswap16_i32:
670
tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
671
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
672
case INDEX_op_setcond2_i32:
673
return C_O1_I4(r, r, r, rI, rI);
674
675
- case INDEX_op_qemu_ld_a32_i32:
676
+ case INDEX_op_qemu_ld_i32:
677
return C_O1_I1(r, q);
678
- case INDEX_op_qemu_ld_a64_i32:
679
- return C_O1_I2(r, q, q);
680
- case INDEX_op_qemu_ld_a32_i64:
681
+ case INDEX_op_qemu_ld_i64:
682
return C_O2_I1(e, p, q);
683
- case INDEX_op_qemu_ld_a64_i64:
684
- return C_O2_I2(e, p, q, q);
685
- case INDEX_op_qemu_st_a32_i32:
686
+ case INDEX_op_qemu_st_i32:
687
return C_O0_I2(q, q);
688
- case INDEX_op_qemu_st_a64_i32:
689
- return C_O0_I3(q, q, q);
690
- case INDEX_op_qemu_st_a32_i64:
691
+ case INDEX_op_qemu_st_i64:
692
return C_O0_I3(Q, p, q);
693
- case INDEX_op_qemu_st_a64_i64:
694
- return C_O0_I4(Q, p, q, q);
695
696
case INDEX_op_st_vec:
697
return C_O0_I2(w, r);
698
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
699
index XXXXXXX..XXXXXXX 100644
700
--- a/tcg/i386/tcg-target.c.inc
701
+++ b/tcg/i386/tcg-target.c.inc
702
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
703
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
704
break;
705
706
- case INDEX_op_qemu_ld_a64_i32:
707
- if (TCG_TARGET_REG_BITS == 32) {
708
- tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
709
- break;
710
- }
711
- /* fall through */
712
- case INDEX_op_qemu_ld_a32_i32:
713
+ case INDEX_op_qemu_ld_i32:
714
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
715
break;
716
- case INDEX_op_qemu_ld_a32_i64:
717
+ case INDEX_op_qemu_ld_i64:
718
if (TCG_TARGET_REG_BITS == 64) {
719
tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
720
} else {
721
tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
722
}
723
break;
724
- case INDEX_op_qemu_ld_a64_i64:
725
- if (TCG_TARGET_REG_BITS == 64) {
726
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
727
- } else {
728
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
729
- }
730
- break;
731
- case INDEX_op_qemu_ld_a32_i128:
732
- case INDEX_op_qemu_ld_a64_i128:
733
+ case INDEX_op_qemu_ld_i128:
734
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
735
tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
736
break;
737
738
- case INDEX_op_qemu_st_a64_i32:
739
- case INDEX_op_qemu_st8_a64_i32:
740
- if (TCG_TARGET_REG_BITS == 32) {
741
- tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
742
- break;
743
- }
744
- /* fall through */
745
- case INDEX_op_qemu_st_a32_i32:
746
- case INDEX_op_qemu_st8_a32_i32:
747
+ case INDEX_op_qemu_st_i32:
748
+ case INDEX_op_qemu_st8_i32:
749
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
750
break;
751
- case INDEX_op_qemu_st_a32_i64:
752
+ case INDEX_op_qemu_st_i64:
753
if (TCG_TARGET_REG_BITS == 64) {
754
tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
755
} else {
756
tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
757
}
758
break;
759
- case INDEX_op_qemu_st_a64_i64:
760
- if (TCG_TARGET_REG_BITS == 64) {
761
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
762
- } else {
763
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
764
- }
765
- break;
766
- case INDEX_op_qemu_st_a32_i128:
767
- case INDEX_op_qemu_st_a64_i128:
768
+ case INDEX_op_qemu_st_i128:
769
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
770
tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
771
break;
772
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
773
case INDEX_op_clz_i64:
774
return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
775
776
- case INDEX_op_qemu_ld_a32_i32:
777
+ case INDEX_op_qemu_ld_i32:
778
return C_O1_I1(r, L);
779
- case INDEX_op_qemu_ld_a64_i32:
780
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O1_I2(r, L, L);
781
782
- case INDEX_op_qemu_st_a32_i32:
783
+ case INDEX_op_qemu_st_i32:
784
return C_O0_I2(L, L);
785
- case INDEX_op_qemu_st_a64_i32:
786
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
787
- case INDEX_op_qemu_st8_a32_i32:
788
+ case INDEX_op_qemu_st8_i32:
789
return C_O0_I2(s, L);
790
- case INDEX_op_qemu_st8_a64_i32:
791
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(s, L) : C_O0_I3(s, L, L);
792
793
- case INDEX_op_qemu_ld_a32_i64:
794
+ case INDEX_op_qemu_ld_i64:
795
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
796
- case INDEX_op_qemu_ld_a64_i64:
797
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I2(r, r, L, L);
798
799
- case INDEX_op_qemu_st_a32_i64:
800
+ case INDEX_op_qemu_st_i64:
801
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
802
- case INDEX_op_qemu_st_a64_i64:
803
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L);
804
805
- case INDEX_op_qemu_ld_a32_i128:
806
- case INDEX_op_qemu_ld_a64_i128:
807
+ case INDEX_op_qemu_ld_i128:
808
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
809
return C_O2_I1(r, r, L);
810
- case INDEX_op_qemu_st_a32_i128:
811
- case INDEX_op_qemu_st_a64_i128:
812
+ case INDEX_op_qemu_st_i128:
813
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
814
return C_O0_I3(L, L, L);
815
816
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
817
index XXXXXXX..XXXXXXX 100644
818
--- a/tcg/loongarch64/tcg-target.c.inc
819
+++ b/tcg/loongarch64/tcg-target.c.inc
820
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
821
tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
822
break;
823
824
- case INDEX_op_qemu_ld_a32_i32:
825
- case INDEX_op_qemu_ld_a64_i32:
826
+ case INDEX_op_qemu_ld_i32:
827
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
828
break;
829
- case INDEX_op_qemu_ld_a32_i64:
830
- case INDEX_op_qemu_ld_a64_i64:
831
+ case INDEX_op_qemu_ld_i64:
832
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
833
break;
834
- case INDEX_op_qemu_ld_a32_i128:
835
- case INDEX_op_qemu_ld_a64_i128:
836
+ case INDEX_op_qemu_ld_i128:
837
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
838
break;
839
- case INDEX_op_qemu_st_a32_i32:
840
- case INDEX_op_qemu_st_a64_i32:
841
+ case INDEX_op_qemu_st_i32:
842
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
843
break;
844
- case INDEX_op_qemu_st_a32_i64:
845
- case INDEX_op_qemu_st_a64_i64:
846
+ case INDEX_op_qemu_st_i64:
847
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
848
break;
849
- case INDEX_op_qemu_st_a32_i128:
850
- case INDEX_op_qemu_st_a64_i128:
851
+ case INDEX_op_qemu_st_i128:
852
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
853
break;
854
855
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
856
case INDEX_op_st32_i64:
857
case INDEX_op_st_i32:
858
case INDEX_op_st_i64:
859
- case INDEX_op_qemu_st_a32_i32:
860
- case INDEX_op_qemu_st_a64_i32:
861
- case INDEX_op_qemu_st_a32_i64:
862
- case INDEX_op_qemu_st_a64_i64:
863
+ case INDEX_op_qemu_st_i32:
864
+ case INDEX_op_qemu_st_i64:
865
return C_O0_I2(rZ, r);
866
867
- case INDEX_op_qemu_ld_a32_i128:
868
- case INDEX_op_qemu_ld_a64_i128:
869
+ case INDEX_op_qemu_ld_i128:
870
return C_N2_I1(r, r, r);
871
872
- case INDEX_op_qemu_st_a32_i128:
873
- case INDEX_op_qemu_st_a64_i128:
874
+ case INDEX_op_qemu_st_i128:
875
return C_O0_I3(r, r, r);
876
877
case INDEX_op_brcond_i32:
878
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
879
case INDEX_op_ld32u_i64:
880
case INDEX_op_ld_i32:
881
case INDEX_op_ld_i64:
882
- case INDEX_op_qemu_ld_a32_i32:
883
- case INDEX_op_qemu_ld_a64_i32:
884
- case INDEX_op_qemu_ld_a32_i64:
885
- case INDEX_op_qemu_ld_a64_i64:
886
+ case INDEX_op_qemu_ld_i32:
887
+ case INDEX_op_qemu_ld_i64:
888
return C_O1_I1(r, r);
889
890
case INDEX_op_andc_i32:
891
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
892
index XXXXXXX..XXXXXXX 100644
893
--- a/tcg/mips/tcg-target.c.inc
894
+++ b/tcg/mips/tcg-target.c.inc
895
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
896
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
897
break;
898
899
- case INDEX_op_qemu_ld_a64_i32:
900
- if (TCG_TARGET_REG_BITS == 32) {
901
- tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
902
- break;
903
- }
904
- /* fall through */
905
- case INDEX_op_qemu_ld_a32_i32:
906
+ case INDEX_op_qemu_ld_i32:
907
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
908
break;
909
- case INDEX_op_qemu_ld_a32_i64:
910
+ case INDEX_op_qemu_ld_i64:
911
if (TCG_TARGET_REG_BITS == 64) {
912
tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
913
} else {
914
tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
915
}
916
break;
917
- case INDEX_op_qemu_ld_a64_i64:
918
- if (TCG_TARGET_REG_BITS == 64) {
919
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
920
- } else {
921
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
922
- }
923
- break;
924
925
- case INDEX_op_qemu_st_a64_i32:
926
- if (TCG_TARGET_REG_BITS == 32) {
927
- tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
928
- break;
929
- }
930
- /* fall through */
931
- case INDEX_op_qemu_st_a32_i32:
932
+ case INDEX_op_qemu_st_i32:
933
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
934
break;
935
- case INDEX_op_qemu_st_a32_i64:
936
+ case INDEX_op_qemu_st_i64:
937
if (TCG_TARGET_REG_BITS == 64) {
938
tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
939
} else {
940
tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
941
}
942
break;
943
- case INDEX_op_qemu_st_a64_i64:
944
- if (TCG_TARGET_REG_BITS == 64) {
945
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
946
- } else {
947
- tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
948
- }
949
- break;
950
951
case INDEX_op_add2_i32:
952
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
953
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
954
case INDEX_op_brcond2_i32:
955
return C_O0_I4(rZ, rZ, rZ, rZ);
956
957
- case INDEX_op_qemu_ld_a32_i32:
958
+ case INDEX_op_qemu_ld_i32:
959
return C_O1_I1(r, r);
960
- case INDEX_op_qemu_ld_a64_i32:
961
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
962
- case INDEX_op_qemu_st_a32_i32:
963
+ case INDEX_op_qemu_st_i32:
964
return C_O0_I2(rZ, r);
965
- case INDEX_op_qemu_st_a64_i32:
966
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, r, r);
967
- case INDEX_op_qemu_ld_a32_i64:
968
+ case INDEX_op_qemu_ld_i64:
969
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
970
- case INDEX_op_qemu_ld_a64_i64:
971
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
972
- case INDEX_op_qemu_st_a32_i64:
973
+ case INDEX_op_qemu_st_i64:
974
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r);
975
- case INDEX_op_qemu_st_a64_i64:
976
- return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r)
977
- : C_O0_I4(rZ, rZ, r, r));
978
979
default:
980
return C_NotImplemented;
981
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
982
index XXXXXXX..XXXXXXX 100644
983
--- a/tcg/ppc/tcg-target.c.inc
984
+++ b/tcg/ppc/tcg-target.c.inc
985
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
986
tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
987
break;
988
989
- case INDEX_op_qemu_ld_a64_i32:
990
- if (TCG_TARGET_REG_BITS == 32) {
991
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
992
- args[3], TCG_TYPE_I32);
993
- break;
994
- }
995
- /* fall through */
996
- case INDEX_op_qemu_ld_a32_i32:
997
+ case INDEX_op_qemu_ld_i32:
998
tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
999
break;
1000
- case INDEX_op_qemu_ld_a32_i64:
1001
+ case INDEX_op_qemu_ld_i64:
1002
if (TCG_TARGET_REG_BITS == 64) {
1003
tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
1004
args[2], TCG_TYPE_I64);
1005
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1006
args[3], TCG_TYPE_I64);
1007
}
1008
break;
1009
- case INDEX_op_qemu_ld_a64_i64:
1010
- if (TCG_TARGET_REG_BITS == 64) {
1011
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
1012
- args[2], TCG_TYPE_I64);
1013
- } else {
1014
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
1015
- args[4], TCG_TYPE_I64);
1016
- }
1017
- break;
1018
- case INDEX_op_qemu_ld_a32_i128:
1019
- case INDEX_op_qemu_ld_a64_i128:
1020
+ case INDEX_op_qemu_ld_i128:
1021
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1022
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
1023
break;
1024
1025
- case INDEX_op_qemu_st_a64_i32:
1026
- if (TCG_TARGET_REG_BITS == 32) {
1027
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
1028
- args[3], TCG_TYPE_I32);
1029
- break;
1030
- }
1031
- /* fall through */
1032
- case INDEX_op_qemu_st_a32_i32:
1033
+ case INDEX_op_qemu_st_i32:
1034
tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
1035
break;
1036
- case INDEX_op_qemu_st_a32_i64:
1037
+ case INDEX_op_qemu_st_i64:
1038
if (TCG_TARGET_REG_BITS == 64) {
1039
tcg_out_qemu_st(s, args[0], -1, args[1], -1,
1040
args[2], TCG_TYPE_I64);
1041
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1042
args[3], TCG_TYPE_I64);
1043
}
1044
break;
1045
- case INDEX_op_qemu_st_a64_i64:
1046
- if (TCG_TARGET_REG_BITS == 64) {
1047
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
1048
- args[2], TCG_TYPE_I64);
1049
- } else {
1050
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
1051
- args[4], TCG_TYPE_I64);
1052
- }
1053
- break;
1054
- case INDEX_op_qemu_st_a32_i128:
1055
- case INDEX_op_qemu_st_a64_i128:
1056
+ case INDEX_op_qemu_st_i128:
1057
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1058
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
1059
break;
1060
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1061
case INDEX_op_sub2_i32:
1062
return C_O2_I4(r, r, rI, rZM, r, r);
1063
1064
- case INDEX_op_qemu_ld_a32_i32:
1065
+ case INDEX_op_qemu_ld_i32:
1066
return C_O1_I1(r, r);
1067
- case INDEX_op_qemu_ld_a64_i32:
1068
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
1069
- case INDEX_op_qemu_ld_a32_i64:
1070
+ case INDEX_op_qemu_ld_i64:
1071
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
1072
- case INDEX_op_qemu_ld_a64_i64:
1073
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
1074
1075
- case INDEX_op_qemu_st_a32_i32:
1076
+ case INDEX_op_qemu_st_i32:
1077
return C_O0_I2(r, r);
1078
- case INDEX_op_qemu_st_a64_i32:
1079
+ case INDEX_op_qemu_st_i64:
1080
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1081
- case INDEX_op_qemu_st_a32_i64:
1082
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1083
- case INDEX_op_qemu_st_a64_i64:
1084
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
1085
1086
- case INDEX_op_qemu_ld_a32_i128:
1087
- case INDEX_op_qemu_ld_a64_i128:
1088
+ case INDEX_op_qemu_ld_i128:
1089
return C_N1O1_I1(o, m, r);
1090
- case INDEX_op_qemu_st_a32_i128:
1091
- case INDEX_op_qemu_st_a64_i128:
1092
+ case INDEX_op_qemu_st_i128:
1093
return C_O0_I3(o, m, r);
1094
1095
case INDEX_op_add_vec:
1096
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
1097
index XXXXXXX..XXXXXXX 100644
1098
--- a/tcg/riscv/tcg-target.c.inc
1099
+++ b/tcg/riscv/tcg-target.c.inc
1100
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1101
args[3], const_args[3], args[4], const_args[4]);
1102
break;
1103
1104
- case INDEX_op_qemu_ld_a32_i32:
1105
- case INDEX_op_qemu_ld_a64_i32:
1106
+ case INDEX_op_qemu_ld_i32:
1107
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1108
break;
1109
- case INDEX_op_qemu_ld_a32_i64:
1110
- case INDEX_op_qemu_ld_a64_i64:
1111
+ case INDEX_op_qemu_ld_i64:
1112
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1113
break;
1114
- case INDEX_op_qemu_st_a32_i32:
1115
- case INDEX_op_qemu_st_a64_i32:
1116
+ case INDEX_op_qemu_st_i32:
1117
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1118
break;
1119
- case INDEX_op_qemu_st_a32_i64:
1120
- case INDEX_op_qemu_st_a64_i64:
1121
+ case INDEX_op_qemu_st_i64:
1122
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1123
break;
1124
1125
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1126
case INDEX_op_sub2_i64:
1127
return C_O2_I4(r, r, rZ, rZ, rM, rM);
1128
1129
- case INDEX_op_qemu_ld_a32_i32:
1130
- case INDEX_op_qemu_ld_a64_i32:
1131
- case INDEX_op_qemu_ld_a32_i64:
1132
- case INDEX_op_qemu_ld_a64_i64:
1133
+ case INDEX_op_qemu_ld_i32:
1134
+ case INDEX_op_qemu_ld_i64:
1135
return C_O1_I1(r, r);
1136
- case INDEX_op_qemu_st_a32_i32:
1137
- case INDEX_op_qemu_st_a64_i32:
1138
- case INDEX_op_qemu_st_a32_i64:
1139
- case INDEX_op_qemu_st_a64_i64:
1140
+ case INDEX_op_qemu_st_i32:
1141
+ case INDEX_op_qemu_st_i64:
1142
return C_O0_I2(rZ, r);
1143
1144
case INDEX_op_st_vec:
1145
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1146
index XXXXXXX..XXXXXXX 100644
1147
--- a/tcg/s390x/tcg-target.c.inc
1148
+++ b/tcg/s390x/tcg-target.c.inc
1149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1150
args[2], const_args[2], args[3], const_args[3], args[4]);
1151
break;
1152
1153
- case INDEX_op_qemu_ld_a32_i32:
1154
- case INDEX_op_qemu_ld_a64_i32:
1155
+ case INDEX_op_qemu_ld_i32:
1156
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
1157
break;
1158
- case INDEX_op_qemu_ld_a32_i64:
1159
- case INDEX_op_qemu_ld_a64_i64:
1160
+ case INDEX_op_qemu_ld_i64:
1161
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
1162
break;
1163
- case INDEX_op_qemu_st_a32_i32:
1164
- case INDEX_op_qemu_st_a64_i32:
1165
+ case INDEX_op_qemu_st_i32:
1166
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
1167
break;
1168
- case INDEX_op_qemu_st_a32_i64:
1169
- case INDEX_op_qemu_st_a64_i64:
1170
+ case INDEX_op_qemu_st_i64:
1171
tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
1172
break;
1173
- case INDEX_op_qemu_ld_a32_i128:
1174
- case INDEX_op_qemu_ld_a64_i128:
1175
+ case INDEX_op_qemu_ld_i128:
1176
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
1177
break;
1178
- case INDEX_op_qemu_st_a32_i128:
1179
- case INDEX_op_qemu_st_a64_i128:
1180
+ case INDEX_op_qemu_st_i128:
1181
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
1182
break;
1183
1184
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1185
case INDEX_op_ctpop_i64:
1186
return C_O1_I1(r, r);
1187
1188
- case INDEX_op_qemu_ld_a32_i32:
1189
- case INDEX_op_qemu_ld_a64_i32:
1190
- case INDEX_op_qemu_ld_a32_i64:
1191
- case INDEX_op_qemu_ld_a64_i64:
1192
+ case INDEX_op_qemu_ld_i32:
1193
+ case INDEX_op_qemu_ld_i64:
1194
return C_O1_I1(r, r);
1195
- case INDEX_op_qemu_st_a32_i64:
1196
- case INDEX_op_qemu_st_a64_i64:
1197
- case INDEX_op_qemu_st_a32_i32:
1198
- case INDEX_op_qemu_st_a64_i32:
1199
+ case INDEX_op_qemu_st_i64:
1200
+ case INDEX_op_qemu_st_i32:
1201
return C_O0_I2(r, r);
1202
- case INDEX_op_qemu_ld_a32_i128:
1203
- case INDEX_op_qemu_ld_a64_i128:
1204
+ case INDEX_op_qemu_ld_i128:
1205
return C_O2_I1(o, m, r);
1206
- case INDEX_op_qemu_st_a32_i128:
1207
- case INDEX_op_qemu_st_a64_i128:
1208
+ case INDEX_op_qemu_st_i128:
1209
return C_O0_I3(o, m, r);
1210
1211
case INDEX_op_deposit_i32:
1212
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1213
index XXXXXXX..XXXXXXX 100644
1214
--- a/tcg/sparc64/tcg-target.c.inc
1215
+++ b/tcg/sparc64/tcg-target.c.inc
1216
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1217
tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1218
break;
1219
1220
- case INDEX_op_qemu_ld_a32_i32:
1221
- case INDEX_op_qemu_ld_a64_i32:
1222
+ case INDEX_op_qemu_ld_i32:
1223
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1224
break;
1225
- case INDEX_op_qemu_ld_a32_i64:
1226
- case INDEX_op_qemu_ld_a64_i64:
1227
+ case INDEX_op_qemu_ld_i64:
1228
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1229
break;
1230
- case INDEX_op_qemu_st_a32_i32:
1231
- case INDEX_op_qemu_st_a64_i32:
1232
+ case INDEX_op_qemu_st_i32:
1233
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1234
break;
1235
- case INDEX_op_qemu_st_a32_i64:
1236
- case INDEX_op_qemu_st_a64_i64:
1237
+ case INDEX_op_qemu_st_i64:
1238
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1239
break;
1240
1241
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1242
case INDEX_op_extu_i32_i64:
1243
case INDEX_op_extract_i64:
1244
case INDEX_op_sextract_i64:
1245
- case INDEX_op_qemu_ld_a32_i32:
1246
- case INDEX_op_qemu_ld_a64_i32:
1247
- case INDEX_op_qemu_ld_a32_i64:
1248
- case INDEX_op_qemu_ld_a64_i64:
1249
+ case INDEX_op_qemu_ld_i32:
1250
+ case INDEX_op_qemu_ld_i64:
1251
return C_O1_I1(r, r);
1252
1253
case INDEX_op_st8_i32:
1254
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1255
case INDEX_op_st_i32:
1256
case INDEX_op_st32_i64:
1257
case INDEX_op_st_i64:
1258
- case INDEX_op_qemu_st_a32_i32:
1259
- case INDEX_op_qemu_st_a64_i32:
1260
- case INDEX_op_qemu_st_a32_i64:
1261
- case INDEX_op_qemu_st_a64_i64:
1262
+ case INDEX_op_qemu_st_i32:
1263
+ case INDEX_op_qemu_st_i64:
1264
return C_O0_I2(rZ, r);
1265
1266
case INDEX_op_add_i32:
1267
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1268
index XXXXXXX..XXXXXXX 100644
1269
--- a/tcg/tci/tcg-target.c.inc
1270
+++ b/tcg/tci/tcg-target.c.inc
1271
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1272
case INDEX_op_setcond2_i32:
1273
return C_O1_I4(r, r, r, r, r);
1274
1275
- case INDEX_op_qemu_ld_a32_i32:
1276
+ case INDEX_op_qemu_ld_i32:
1277
return C_O1_I1(r, r);
1278
- case INDEX_op_qemu_ld_a64_i32:
1279
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
1280
- case INDEX_op_qemu_ld_a32_i64:
1281
+ case INDEX_op_qemu_ld_i64:
1282
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
1283
- case INDEX_op_qemu_ld_a64_i64:
1284
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
1285
- case INDEX_op_qemu_st_a32_i32:
1286
+ case INDEX_op_qemu_st_i32:
1287
return C_O0_I2(r, r);
1288
- case INDEX_op_qemu_st_a64_i32:
1289
+ case INDEX_op_qemu_st_i64:
1290
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1291
- case INDEX_op_qemu_st_a32_i64:
1292
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
1293
- case INDEX_op_qemu_st_a64_i64:
1294
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
1295
1296
default:
1297
return C_NotImplemented;
1298
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
1299
tcg_out32(s, insn);
1300
}
1301
1302
-static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0,
1303
- TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4)
1304
-{
1305
- tcg_insn_unit insn = 0;
1306
-
1307
- insn = deposit32(insn, 0, 8, op);
1308
- insn = deposit32(insn, 8, 4, r0);
1309
- insn = deposit32(insn, 12, 4, r1);
1310
- insn = deposit32(insn, 16, 4, r2);
1311
- insn = deposit32(insn, 20, 4, r3);
1312
- insn = deposit32(insn, 24, 4, r4);
1313
- tcg_out32(s, insn);
1314
-}
1315
-
1316
static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
1317
TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
1318
{
1319
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1320
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
1321
break;
1322
1323
- case INDEX_op_qemu_ld_a32_i32:
1324
- case INDEX_op_qemu_st_a32_i32:
1325
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1326
- break;
1327
- case INDEX_op_qemu_ld_a64_i32:
1328
- case INDEX_op_qemu_st_a64_i32:
1329
- case INDEX_op_qemu_ld_a32_i64:
1330
- case INDEX_op_qemu_st_a32_i64:
1331
- if (TCG_TARGET_REG_BITS == 64) {
1332
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1333
- } else {
1334
+ case INDEX_op_qemu_ld_i64:
1335
+ case INDEX_op_qemu_st_i64:
1336
+ if (TCG_TARGET_REG_BITS == 32) {
1337
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1338
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1339
+ break;
1340
}
1341
- break;
1342
- case INDEX_op_qemu_ld_a64_i64:
1343
- case INDEX_op_qemu_st_a64_i64:
1344
- if (TCG_TARGET_REG_BITS == 64) {
1345
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1346
+ /* fall through */
1347
+ case INDEX_op_qemu_ld_i32:
1348
+ case INDEX_op_qemu_st_i32:
1349
+ if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1350
+ tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1351
+ tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1352
} else {
1353
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
1354
- tcg_out_op_rrrrr(s, opc, args[0], args[1],
1355
- args[2], args[3], TCG_REG_TMP);
1356
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1357
}
1358
break;
1359
1360
--
1361
2.43.0
1362
1363
diff view generated by jsdifflib
New patch
1
1
The guest address will now always be TCG_TYPE_I32.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/arm/tcg-target.c.inc | 73 +++++++++++++---------------------------
7
1 file changed, 23 insertions(+), 50 deletions(-)
8
9
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/arm/tcg-target.c.inc
12
+++ b/tcg/arm/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
14
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
15
}
16
17
-static void __attribute__((unused))
18
-tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
19
-{
20
- tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
21
-}
22
-
23
-static void __attribute__((unused))
24
-tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
25
+static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
26
+ TCGReg rn, int imm8)
27
{
28
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
29
}
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
31
#define MIN_TLB_MASK_TABLE_OFS -256
32
33
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
34
- TCGReg addrlo, TCGReg addrhi,
35
- MemOpIdx oi, bool is_ld)
36
+ TCGReg addr, MemOpIdx oi, bool is_ld)
37
{
38
TCGLabelQemuLdst *ldst = NULL;
39
MemOp opc = get_memop(oi);
40
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
41
if (tcg_use_softmmu) {
42
*h = (HostAddress){
43
.cond = COND_AL,
44
- .base = addrlo,
45
+ .base = addr,
46
.index = TCG_REG_R1,
47
.index_scratch = true,
48
};
49
} else {
50
*h = (HostAddress){
51
.cond = COND_AL,
52
- .base = addrlo,
53
+ .base = addr,
54
.index = guest_base ? TCG_REG_GUEST_BASE : -1,
55
.index_scratch = false,
56
};
57
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
58
ldst = new_ldst_label(s);
59
ldst->is_ld = is_ld;
60
ldst->oi = oi;
61
- ldst->addrlo_reg = addrlo;
62
- ldst->addrhi_reg = addrhi;
63
+ ldst->addrlo_reg = addr;
64
65
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
66
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
67
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
68
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
69
70
/* Extract the tlb index from the address into R0. */
71
- tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
72
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
73
SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
74
75
/*
76
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
77
- * Load the tlb comparator into R2/R3 and the fast path addend into R1.
78
+ * Load the tlb comparator into R2 and the fast path addend into R1.
79
*/
80
QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
81
if (cmp_off == 0) {
82
- if (s->addr_type == TCG_TYPE_I32) {
83
- tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
84
- TCG_REG_R1, TCG_REG_R0);
85
- } else {
86
- tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
87
- TCG_REG_R1, TCG_REG_R0);
88
- }
89
+ tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
90
} else {
91
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
92
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
93
- if (s->addr_type == TCG_TYPE_I32) {
94
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
95
- } else {
96
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
97
- }
98
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
99
}
100
101
/* Load the tlb addend. */
102
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
103
* This leaves the least significant alignment bits unchanged, and of
104
* course must be zero.
105
*/
106
- t_addr = addrlo;
107
+ t_addr = addr;
108
if (a_mask < s_mask) {
109
t_addr = TCG_REG_R0;
110
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
111
- addrlo, s_mask - a_mask);
112
+ addr, s_mask - a_mask);
113
}
114
if (use_armv7_instructions && s->page_bits <= 16) {
115
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
116
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
117
} else {
118
if (a_mask) {
119
tcg_debug_assert(a_mask <= 0xff);
120
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
121
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
122
}
123
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
124
SHIFT_IMM_LSR(s->page_bits));
125
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
126
0, TCG_REG_R2, TCG_REG_TMP,
127
SHIFT_IMM_LSL(s->page_bits));
128
}
129
-
130
- if (s->addr_type != TCG_TYPE_I32) {
131
- tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
132
- }
133
} else if (a_mask) {
134
ldst = new_ldst_label(s);
135
ldst->is_ld = is_ld;
136
ldst->oi = oi;
137
- ldst->addrlo_reg = addrlo;
138
- ldst->addrhi_reg = addrhi;
139
+ ldst->addrlo_reg = addr;
140
141
/* We are expecting alignment to max out at 7 */
142
tcg_debug_assert(a_mask <= 0xff);
143
/* tst addr, #mask */
144
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
145
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
146
}
147
148
return ldst;
149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
150
}
151
152
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
153
- TCGReg addrlo, TCGReg addrhi,
154
- MemOpIdx oi, TCGType data_type)
155
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
156
{
157
MemOp opc = get_memop(oi);
158
TCGLabelQemuLdst *ldst;
159
HostAddress h;
160
161
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
162
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
163
if (ldst) {
164
ldst->type = data_type;
165
ldst->datalo_reg = datalo;
166
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
167
}
168
169
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
170
- TCGReg addrlo, TCGReg addrhi,
171
- MemOpIdx oi, TCGType data_type)
172
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
173
{
174
MemOp opc = get_memop(oi);
175
TCGLabelQemuLdst *ldst;
176
HostAddress h;
177
178
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
179
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
180
if (ldst) {
181
ldst->type = data_type;
182
ldst->datalo_reg = datalo;
183
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
184
break;
185
186
case INDEX_op_qemu_ld_i32:
187
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
188
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
189
break;
190
case INDEX_op_qemu_ld_i64:
191
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
192
- args[3], TCG_TYPE_I64);
193
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
194
break;
195
196
case INDEX_op_qemu_st_i32:
197
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
198
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
199
break;
200
case INDEX_op_qemu_st_i64:
201
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
202
- args[3], TCG_TYPE_I64);
203
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
204
break;
205
206
case INDEX_op_bswap16_i32:
207
--
208
2.43.0
209
210
diff view generated by jsdifflib
New patch
1
The guest address will now always fit in one register.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/i386/tcg-target.c.inc | 56 ++++++++++++++-------------------------
7
1 file changed, 20 insertions(+), 36 deletions(-)
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/i386/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
14
* is required and fill in @h with the host address for the fast path.
15
*/
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
17
- TCGReg addrlo, TCGReg addrhi,
18
- MemOpIdx oi, bool is_ld)
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
20
{
21
TCGLabelQemuLdst *ldst = NULL;
22
MemOp opc = get_memop(oi);
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
24
} else {
25
*h = x86_guest_base;
26
}
27
- h->base = addrlo;
28
+ h->base = addr;
29
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
30
a_mask = (1 << h->aa.align) - 1;
31
32
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
33
ldst = new_ldst_label(s);
34
ldst->is_ld = is_ld;
35
ldst->oi = oi;
36
- ldst->addrlo_reg = addrlo;
37
- ldst->addrhi_reg = addrhi;
38
+ ldst->addrlo_reg = addr;
39
40
if (TCG_TARGET_REG_BITS == 64) {
41
ttype = s->addr_type;
42
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
43
}
44
}
45
46
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
47
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addr);
48
tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
49
s->page_bits - CPU_TLB_ENTRY_BITS);
50
51
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
52
* check that we don't cross pages for the complete access.
53
*/
54
if (a_mask >= s_mask) {
55
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
56
+ tcg_out_mov(s, ttype, TCG_REG_L1, addr);
57
} else {
58
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
59
- addrlo, s_mask - a_mask);
60
+ addr, s_mask - a_mask);
61
}
62
tlb_mask = s->page_mask | a_mask;
63
tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
64
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
65
ldst->label_ptr[0] = s->code_ptr;
66
s->code_ptr += 4;
67
68
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
69
- /* cmp 4(TCG_REG_L0), addrhi */
70
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
71
- TCG_REG_L0, cmp_ofs + 4);
72
-
73
- /* jne slow_path */
74
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
75
- ldst->label_ptr[1] = s->code_ptr;
76
- s->code_ptr += 4;
77
- }
78
-
79
/* TLB Hit. */
80
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
81
offsetof(CPUTLBEntry, addend));
82
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
83
ldst = new_ldst_label(s);
84
ldst->is_ld = is_ld;
85
ldst->oi = oi;
86
- ldst->addrlo_reg = addrlo;
87
- ldst->addrhi_reg = addrhi;
88
+ ldst->addrlo_reg = addr;
89
90
/* jne slow_path */
91
- jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false);
92
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false);
93
tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0);
94
ldst->label_ptr[0] = s->code_ptr;
95
s->code_ptr += 4;
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
97
}
98
99
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
100
- TCGReg addrlo, TCGReg addrhi,
101
- MemOpIdx oi, TCGType data_type)
102
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
103
{
104
TCGLabelQemuLdst *ldst;
105
HostAddress h;
106
107
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
108
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
109
tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi));
110
111
if (ldst) {
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
113
}
114
115
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
116
- TCGReg addrlo, TCGReg addrhi,
117
- MemOpIdx oi, TCGType data_type)
118
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
119
{
120
TCGLabelQemuLdst *ldst;
121
HostAddress h;
122
123
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
124
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
125
tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi));
126
127
if (ldst) {
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
129
break;
130
131
case INDEX_op_qemu_ld_i32:
132
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
133
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
134
break;
135
case INDEX_op_qemu_ld_i64:
136
if (TCG_TARGET_REG_BITS == 64) {
137
- tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
138
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I64);
139
} else {
140
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
141
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
142
}
143
break;
144
case INDEX_op_qemu_ld_i128:
145
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
146
- tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
147
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I128);
148
break;
149
150
case INDEX_op_qemu_st_i32:
151
case INDEX_op_qemu_st8_i32:
152
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
153
+ tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I32);
154
break;
155
case INDEX_op_qemu_st_i64:
156
if (TCG_TARGET_REG_BITS == 64) {
157
- tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
158
+ tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I64);
159
} else {
160
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
161
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
162
}
163
break;
164
case INDEX_op_qemu_st_i128:
165
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
166
- tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
167
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
168
break;
169
170
OP_32_64(mulu2):
171
--
172
2.43.0
173
174
diff view generated by jsdifflib
New patch
1
The guest address will now always fit in one register.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/mips/tcg-target.c.inc | 62 ++++++++++++++-------------------------
7
1 file changed, 22 insertions(+), 40 deletions(-)
8
9
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/mips/tcg-target.c.inc
12
+++ b/tcg/mips/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
14
* is required and fill in @h with the host address for the fast path.
15
*/
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
17
- TCGReg addrlo, TCGReg addrhi,
18
- MemOpIdx oi, bool is_ld)
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
20
{
21
TCGType addr_type = s->addr_type;
22
TCGLabelQemuLdst *ldst = NULL;
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
24
ldst = new_ldst_label(s);
25
ldst->is_ld = is_ld;
26
ldst->oi = oi;
27
- ldst->addrlo_reg = addrlo;
28
- ldst->addrhi_reg = addrhi;
29
+ ldst->addrlo_reg = addr;
30
31
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
32
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
33
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
34
35
/* Extract the TLB index from the address into TMP3. */
36
if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
37
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
38
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr,
39
s->page_bits - CPU_TLB_ENTRY_BITS);
40
} else {
41
- tcg_out_dsrl(s, TCG_TMP3, addrlo,
42
- s->page_bits - CPU_TLB_ENTRY_BITS);
43
+ tcg_out_dsrl(s, TCG_TMP3, addr, s->page_bits - CPU_TLB_ENTRY_BITS);
44
}
45
tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
46
47
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
48
tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
49
|| addr_type == TCG_TYPE_I32
50
? OPC_ADDIU : OPC_DADDIU),
51
- TCG_TMP2, addrlo, s_mask - a_mask);
52
+ TCG_TMP2, addr, s_mask - a_mask);
53
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
54
} else {
55
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
56
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addr);
57
}
58
59
/* Zero extend a 32-bit guest address for a 64-bit host. */
60
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
61
- tcg_out_ext32u(s, TCG_TMP2, addrlo);
62
- addrlo = TCG_TMP2;
63
+ tcg_out_ext32u(s, TCG_TMP2, addr);
64
+ addr = TCG_TMP2;
65
}
66
67
ldst->label_ptr[0] = s->code_ptr;
68
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
69
70
- /* Load and test the high half tlb comparator. */
71
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
72
- /* delay slot */
73
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
74
-
75
- /* Load the tlb addend for the fast path. */
76
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
77
-
78
- ldst->label_ptr[1] = s->code_ptr;
79
- tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
80
- }
81
-
82
/* delay slot */
83
base = TCG_TMP3;
84
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
85
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addr);
86
} else {
87
if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
88
ldst = new_ldst_label(s);
89
90
ldst->is_ld = is_ld;
91
ldst->oi = oi;
92
- ldst->addrlo_reg = addrlo;
93
- ldst->addrhi_reg = addrhi;
94
+ ldst->addrlo_reg = addr;
95
96
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
97
tcg_debug_assert(a_bits < 16);
98
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
99
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addr, a_mask);
100
101
ldst->label_ptr[0] = s->code_ptr;
102
if (use_mips32r6_instructions) {
103
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
104
}
105
}
106
107
- base = addrlo;
108
+ base = addr;
109
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
110
tcg_out_ext32u(s, TCG_REG_A0, base);
111
base = TCG_REG_A0;
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
113
}
114
115
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
116
- TCGReg addrlo, TCGReg addrhi,
117
- MemOpIdx oi, TCGType data_type)
118
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
119
{
120
MemOp opc = get_memop(oi);
121
TCGLabelQemuLdst *ldst;
122
HostAddress h;
123
124
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
125
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
126
127
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
128
tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type);
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
130
}
131
132
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
133
- TCGReg addrlo, TCGReg addrhi,
134
- MemOpIdx oi, TCGType data_type)
135
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
136
{
137
MemOp opc = get_memop(oi);
138
TCGLabelQemuLdst *ldst;
139
HostAddress h;
140
141
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
142
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
143
144
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
145
tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc);
146
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
147
break;
148
149
case INDEX_op_qemu_ld_i32:
150
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
151
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
152
break;
153
case INDEX_op_qemu_ld_i64:
154
if (TCG_TARGET_REG_BITS == 64) {
155
- tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
156
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I64);
157
} else {
158
- tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
159
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
160
}
161
break;
162
163
case INDEX_op_qemu_st_i32:
164
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
165
+ tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I32);
166
break;
167
case INDEX_op_qemu_st_i64:
168
if (TCG_TARGET_REG_BITS == 64) {
169
- tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
170
+ tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I64);
171
} else {
172
- tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
173
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
174
}
175
break;
176
177
--
178
2.43.0
179
180
diff view generated by jsdifflib
New patch
1
1
The guest address will now always fit in one register.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/ppc/tcg-target.c.inc | 75 ++++++++++++----------------------------
7
1 file changed, 23 insertions(+), 52 deletions(-)
8
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/ppc/tcg-target.c.inc
12
+++ b/tcg/ppc/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
14
* is required and fill in @h with the host address for the fast path.
15
*/
16
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
17
- TCGReg addrlo, TCGReg addrhi,
18
- MemOpIdx oi, bool is_ld)
19
+ TCGReg addr, MemOpIdx oi, bool is_ld)
20
{
21
TCGType addr_type = s->addr_type;
22
TCGLabelQemuLdst *ldst = NULL;
23
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
24
ldst = new_ldst_label(s);
25
ldst->is_ld = is_ld;
26
ldst->oi = oi;
27
- ldst->addrlo_reg = addrlo;
28
- ldst->addrhi_reg = addrhi;
29
+ ldst->addrlo_reg = addr;
30
31
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
32
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
33
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
34
35
/* Extract the page index, shifted into place for tlb index. */
36
if (TCG_TARGET_REG_BITS == 32) {
37
- tcg_out_shri32(s, TCG_REG_R0, addrlo,
38
+ tcg_out_shri32(s, TCG_REG_R0, addr,
39
s->page_bits - CPU_TLB_ENTRY_BITS);
40
} else {
41
- tcg_out_shri64(s, TCG_REG_R0, addrlo,
42
+ tcg_out_shri64(s, TCG_REG_R0, addr,
43
s->page_bits - CPU_TLB_ENTRY_BITS);
44
}
45
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
46
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
47
if (a_bits < s_bits) {
48
a_bits = s_bits;
49
}
50
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
51
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0,
52
(32 - a_bits) & 31, 31 - s->page_bits);
53
} else {
54
- TCGReg t = addrlo;
55
+ TCGReg t = addr;
56
57
/*
58
* If the access is unaligned, we need to make sure we fail if we
59
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
60
}
61
}
62
63
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
64
- /* Low part comparison into cr7. */
65
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
66
- 0, 7, TCG_TYPE_I32);
67
-
68
- /* Load the high part TLB comparator into TMP2. */
69
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
70
- cmp_off + 4 * !HOST_BIG_ENDIAN);
71
-
72
- /* Load addend, deferred for this case. */
73
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
74
- offsetof(CPUTLBEntry, addend));
75
-
76
- /* High part comparison into cr6. */
77
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
78
- 0, 6, TCG_TYPE_I32);
79
-
80
- /* Combine comparisons into cr0. */
81
- tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
82
- } else {
83
- /* Full comparison into cr0. */
84
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
85
- 0, 0, addr_type);
86
- }
87
+ /* Full comparison into cr0. */
88
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 0, addr_type);
89
90
/* Load a pointer into the current opcode w/conditional branch-link. */
91
ldst->label_ptr[0] = s->code_ptr;
92
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
93
ldst = new_ldst_label(s);
94
ldst->is_ld = is_ld;
95
ldst->oi = oi;
96
- ldst->addrlo_reg = addrlo;
97
- ldst->addrhi_reg = addrhi;
98
+ ldst->addrlo_reg = addr;
99
100
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
101
tcg_debug_assert(a_bits < 16);
102
- tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
103
+ tcg_out32(s, ANDI | SAI(addr, TCG_REG_R0, (1 << a_bits) - 1));
104
105
ldst->label_ptr[0] = s->code_ptr;
106
tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
107
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
108
109
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
110
/* Zero-extend the guest address for use in the host address. */
111
- tcg_out_ext32u(s, TCG_REG_TMP2, addrlo);
112
+ tcg_out_ext32u(s, TCG_REG_TMP2, addr);
113
h->index = TCG_REG_TMP2;
114
} else {
115
- h->index = addrlo;
116
+ h->index = addr;
117
}
118
119
return ldst;
120
}
121
122
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
123
- TCGReg addrlo, TCGReg addrhi,
124
- MemOpIdx oi, TCGType data_type)
125
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
126
{
127
MemOp opc = get_memop(oi);
128
TCGLabelQemuLdst *ldst;
129
HostAddress h;
130
131
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
132
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
133
134
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
135
if (opc & MO_BSWAP) {
136
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
137
}
138
139
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
140
- TCGReg addrlo, TCGReg addrhi,
141
- MemOpIdx oi, TCGType data_type)
142
+ TCGReg addr, MemOpIdx oi, TCGType data_type)
143
{
144
MemOp opc = get_memop(oi);
145
TCGLabelQemuLdst *ldst;
146
HostAddress h;
147
148
- ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
149
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
150
151
if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
152
if (opc & MO_BSWAP) {
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
154
uint32_t insn;
155
TCGReg index;
156
157
- ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
158
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
159
160
/* Compose the final address, as LQ/STQ have no indexing. */
161
index = h.index;
162
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
163
break;
164
165
case INDEX_op_qemu_ld_i32:
166
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
167
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
168
break;
169
case INDEX_op_qemu_ld_i64:
170
if (TCG_TARGET_REG_BITS == 64) {
171
- tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
172
- args[2], TCG_TYPE_I64);
173
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
174
} else {
175
- tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
176
+ tcg_out_qemu_ld(s, args[0], args[1], args[2],
177
args[3], TCG_TYPE_I64);
178
}
179
break;
180
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
181
break;
182
183
case INDEX_op_qemu_st_i32:
184
- tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
185
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
186
break;
187
case INDEX_op_qemu_st_i64:
188
if (TCG_TARGET_REG_BITS == 64) {
189
- tcg_out_qemu_st(s, args[0], -1, args[1], -1,
190
- args[2], TCG_TYPE_I64);
191
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
192
} else {
193
- tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
194
+ tcg_out_qemu_st(s, args[0], args[1], args[2],
195
args[3], TCG_TYPE_I64);
196
}
197
break;
198
--
199
2.43.0
200
201
diff view generated by jsdifflib
New patch
1
1
There is now always only one guest address register.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tcg.c | 18 +++++++++---------
7
tcg/aarch64/tcg-target.c.inc | 4 ++--
8
tcg/arm/tcg-target.c.inc | 4 ++--
9
tcg/i386/tcg-target.c.inc | 4 ++--
10
tcg/loongarch64/tcg-target.c.inc | 4 ++--
11
tcg/mips/tcg-target.c.inc | 4 ++--
12
tcg/ppc/tcg-target.c.inc | 4 ++--
13
tcg/riscv/tcg-target.c.inc | 4 ++--
14
tcg/s390x/tcg-target.c.inc | 4 ++--
15
tcg/sparc64/tcg-target.c.inc | 4 ++--
16
10 files changed, 27 insertions(+), 27 deletions(-)
17
18
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/tcg.c
21
+++ b/tcg/tcg.c
22
@@ -XXX,XX +XXX,XX @@ struct TCGLabelQemuLdst {
23
bool is_ld; /* qemu_ld: true, qemu_st: false */
24
MemOpIdx oi;
25
TCGType type; /* result type of a load */
26
- TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
27
- TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
28
+ TCGReg addr_reg; /* reg index for guest virtual addr */
29
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
30
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
31
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
32
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
33
*/
34
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
35
TCG_TYPE_I32, TCG_TYPE_I32,
36
- ldst->addrlo_reg, -1);
37
+ ldst->addr_reg, -1);
38
tcg_out_helper_load_slots(s, 1, mov, parm);
39
40
tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
42
next_arg += 2;
43
} else {
44
nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
45
- ldst->addrlo_reg, ldst->addrhi_reg);
46
+ ldst->addr_reg, -1);
47
tcg_out_helper_load_slots(s, nmov, mov, parm);
48
next_arg += nmov;
49
}
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
51
52
/* Handle addr argument. */
53
loc = &info->in[next_arg];
54
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
55
+ tcg_debug_assert(s->addr_type <= TCG_TYPE_REG);
56
+ if (TCG_TARGET_REG_BITS == 32) {
57
/*
58
- * 32-bit host with 32-bit guest: zero-extend the guest address
59
+ * 32-bit host (and thus 32-bit guest): zero-extend the guest address
60
* to 64-bits for the helper by storing the low part. Later,
61
* after we have processed the register inputs, we will load a
62
* zero for the high part.
63
*/
64
tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
65
TCG_TYPE_I32, TCG_TYPE_I32,
66
- ldst->addrlo_reg, -1);
67
+ ldst->addr_reg, -1);
68
next_arg += 2;
69
nmov += 1;
70
} else {
71
n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
72
- ldst->addrlo_reg, ldst->addrhi_reg);
73
+ ldst->addr_reg, -1);
74
next_arg += n;
75
nmov += n;
76
}
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
78
g_assert_not_reached();
79
}
80
81
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
82
+ if (TCG_TARGET_REG_BITS == 32) {
83
/* Zero extend the address by loading a zero for the high part. */
84
loc = &info->in[1 + !HOST_BIG_ENDIAN];
85
tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
86
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/aarch64/tcg-target.c.inc
89
+++ b/tcg/aarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
91
ldst = new_ldst_label(s);
92
ldst->is_ld = is_ld;
93
ldst->oi = oi;
94
- ldst->addrlo_reg = addr_reg;
95
+ ldst->addr_reg = addr_reg;
96
97
mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
98
? TCG_TYPE_I64 : TCG_TYPE_I32);
99
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
100
101
ldst->is_ld = is_ld;
102
ldst->oi = oi;
103
- ldst->addrlo_reg = addr_reg;
104
+ ldst->addr_reg = addr_reg;
105
106
/* tst addr, #mask */
107
tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
108
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/arm/tcg-target.c.inc
111
+++ b/tcg/arm/tcg-target.c.inc
112
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
113
ldst = new_ldst_label(s);
114
ldst->is_ld = is_ld;
115
ldst->oi = oi;
116
- ldst->addrlo_reg = addr;
117
+ ldst->addr_reg = addr;
118
119
/* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
120
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
121
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
122
ldst = new_ldst_label(s);
123
ldst->is_ld = is_ld;
124
ldst->oi = oi;
125
- ldst->addrlo_reg = addr;
126
+ ldst->addr_reg = addr;
127
128
/* We are expecting alignment to max out at 7 */
129
tcg_debug_assert(a_mask <= 0xff);
130
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/i386/tcg-target.c.inc
133
+++ b/tcg/i386/tcg-target.c.inc
134
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
135
ldst = new_ldst_label(s);
136
ldst->is_ld = is_ld;
137
ldst->oi = oi;
138
- ldst->addrlo_reg = addr;
139
+ ldst->addr_reg = addr;
140
141
if (TCG_TARGET_REG_BITS == 64) {
142
ttype = s->addr_type;
143
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
144
ldst = new_ldst_label(s);
145
ldst->is_ld = is_ld;
146
ldst->oi = oi;
147
- ldst->addrlo_reg = addr;
148
+ ldst->addr_reg = addr;
149
150
/* jne slow_path */
151
jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addr, a_mask, true, false);
152
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/loongarch64/tcg-target.c.inc
155
+++ b/tcg/loongarch64/tcg-target.c.inc
156
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
157
ldst = new_ldst_label(s);
158
ldst->is_ld = is_ld;
159
ldst->oi = oi;
160
- ldst->addrlo_reg = addr_reg;
161
+ ldst->addr_reg = addr_reg;
162
163
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
164
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
165
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
166
167
ldst->is_ld = is_ld;
168
ldst->oi = oi;
169
- ldst->addrlo_reg = addr_reg;
170
+ ldst->addr_reg = addr_reg;
171
172
/*
173
* Without micro-architecture details, we don't know which of
174
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/mips/tcg-target.c.inc
177
+++ b/tcg/mips/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
179
ldst = new_ldst_label(s);
180
ldst->is_ld = is_ld;
181
ldst->oi = oi;
182
- ldst->addrlo_reg = addr;
183
+ ldst->addr_reg = addr;
184
185
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
186
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
187
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
188
189
ldst->is_ld = is_ld;
190
ldst->oi = oi;
191
- ldst->addrlo_reg = addr;
192
+ ldst->addr_reg = addr;
193
194
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
195
tcg_debug_assert(a_bits < 16);
196
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tcg/ppc/tcg-target.c.inc
199
+++ b/tcg/ppc/tcg-target.c.inc
200
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
201
ldst = new_ldst_label(s);
202
ldst->is_ld = is_ld;
203
ldst->oi = oi;
204
- ldst->addrlo_reg = addr;
205
+ ldst->addr_reg = addr;
206
207
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
208
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
209
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
210
ldst = new_ldst_label(s);
211
ldst->is_ld = is_ld;
212
ldst->oi = oi;
213
- ldst->addrlo_reg = addr;
214
+ ldst->addr_reg = addr;
215
216
/* We are expecting a_bits to max out at 7, much lower than ANDI. */
217
tcg_debug_assert(a_bits < 16);
218
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/riscv/tcg-target.c.inc
221
+++ b/tcg/riscv/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
223
ldst = new_ldst_label(s);
224
ldst->is_ld = is_ld;
225
ldst->oi = oi;
226
- ldst->addrlo_reg = addr_reg;
227
+ ldst->addr_reg = addr_reg;
228
229
init_setting_vtype(s);
230
231
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
232
ldst = new_ldst_label(s);
233
ldst->is_ld = is_ld;
234
ldst->oi = oi;
235
- ldst->addrlo_reg = addr_reg;
236
+ ldst->addr_reg = addr_reg;
237
238
init_setting_vtype(s);
239
240
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/s390x/tcg-target.c.inc
243
+++ b/tcg/s390x/tcg-target.c.inc
244
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
245
ldst = new_ldst_label(s);
246
ldst->is_ld = is_ld;
247
ldst->oi = oi;
248
- ldst->addrlo_reg = addr_reg;
249
+ ldst->addr_reg = addr_reg;
250
251
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
252
s->page_bits - CPU_TLB_ENTRY_BITS);
253
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
254
ldst = new_ldst_label(s);
255
ldst->is_ld = is_ld;
256
ldst->oi = oi;
257
- ldst->addrlo_reg = addr_reg;
258
+ ldst->addr_reg = addr_reg;
259
260
tcg_debug_assert(a_mask <= 0xffff);
261
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
262
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/sparc64/tcg-target.c.inc
265
+++ b/tcg/sparc64/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
267
ldst = new_ldst_label(s);
268
ldst->is_ld = is_ld;
269
ldst->oi = oi;
270
- ldst->addrlo_reg = addr_reg;
271
+ ldst->addr_reg = addr_reg;
272
ldst->label_ptr[0] = s->code_ptr;
273
274
/* bne,pn %[xi]cc, label0 */
275
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
276
ldst = new_ldst_label(s);
277
ldst->is_ld = is_ld;
278
ldst->oi = oi;
279
- ldst->addrlo_reg = addr_reg;
280
+ ldst->addr_reg = addr_reg;
281
ldst->label_ptr[0] = s->code_ptr;
282
283
/* bne,pn %icc, label0 */
284
--
285
2.43.0
286
287
diff view generated by jsdifflib
New patch
1
The declaration uses uint64_t for addr.
1
2
3
Fixes: 595cd9ce2ec ("plugins: add plugin API to read guest memory")
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
plugins/api.c | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
10
diff --git a/plugins/api.c b/plugins/api.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/plugins/api.c
13
+++ b/plugins/api.c
14
@@ -XXX,XX +XXX,XX @@ GArray *qemu_plugin_get_registers(void)
15
return create_register_handles(regs);
16
}
17
18
-bool qemu_plugin_read_memory_vaddr(vaddr addr, GByteArray *data, size_t len)
19
+bool qemu_plugin_read_memory_vaddr(uint64_t addr, GByteArray *data, size_t len)
20
{
21
g_assert(current_cpu);
22
23
--
24
2.43.0
25
26
diff view generated by jsdifflib
New patch
1
The declarations use vaddr for size.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
accel/tcg/cputlb.c | 4 ++--
6
1 file changed, 2 insertions(+), 2 deletions(-)
7
8
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/accel/tcg/cputlb.c
11
+++ b/accel/tcg/cputlb.c
12
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
13
14
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
15
hwaddr paddr, MemTxAttrs attrs, int prot,
16
- int mmu_idx, uint64_t size)
17
+ int mmu_idx, vaddr size)
18
{
19
CPUTLBEntryFull full = {
20
.phys_addr = paddr,
21
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
22
23
void tlb_set_page(CPUState *cpu, vaddr addr,
24
hwaddr paddr, int prot,
25
- int mmu_idx, uint64_t size)
26
+ int mmu_idx, vaddr size)
27
{
28
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
29
prot, mmu_idx, size);
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
DisasContextBase.pc_next has type vaddr; use the correct log format.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/loongarch/tcg/translate.c | 2 +-
6
target/loongarch/tcg/insn_trans/trans_atomic.c.inc | 2 +-
7
2 files changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/loongarch/tcg/translate.c
12
+++ b/target/loongarch/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
14
15
if (!decode(ctx, ctx->opcode)) {
16
qemu_log_mask(LOG_UNIMP, "Error: unknown opcode. "
17
- TARGET_FMT_lx ": 0x%x\n",
18
+ "0x%" VADDR_PRIx ": 0x%x\n",
19
ctx->base.pc_next, ctx->opcode);
20
generate_exception(ctx, EXCCODE_INE);
21
}
22
diff --git a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
25
+++ b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc
26
@@ -XXX,XX +XXX,XX @@ static bool gen_am(DisasContext *ctx, arg_rrr *a,
27
if (a->rd != 0 && (a->rj == a->rd || a->rk == a->rd)) {
28
qemu_log_mask(LOG_GUEST_ERROR,
29
"Warning: source register overlaps destination register"
30
- "in atomic insn at pc=0x" TARGET_FMT_lx "\n",
31
+ "in atomic insn at pc=0x%" VADDR_PRIx "\n",
32
ctx->base.pc_next - 4);
33
return false;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Since we no longer support 64-bit guests on 32-bit hosts,
2
we can use a 32-bit type on a 32-bit host.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/vaddr.h | 16 +++++++++-------
8
1 file changed, 9 insertions(+), 7 deletions(-)
9
10
diff --git a/include/exec/vaddr.h b/include/exec/vaddr.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/vaddr.h
13
+++ b/include/exec/vaddr.h
14
@@ -XXX,XX +XXX,XX @@
15
/**
16
* vaddr:
17
* Type wide enough to contain any #target_ulong virtual address.
18
+ * We do not support 64-bit guest on 32-host and detect at configure time.
19
+ * Therefore, a host pointer width will always fit a guest pointer.
20
*/
21
-typedef uint64_t vaddr;
22
-#define VADDR_PRId PRId64
23
-#define VADDR_PRIu PRIu64
24
-#define VADDR_PRIo PRIo64
25
-#define VADDR_PRIx PRIx64
26
-#define VADDR_PRIX PRIX64
27
-#define VADDR_MAX UINT64_MAX
28
+typedef uintptr_t vaddr;
29
+#define VADDR_PRId PRIdPTR
30
+#define VADDR_PRIu PRIuPTR
31
+#define VADDR_PRIo PRIoPTR
32
+#define VADDR_PRIx PRIxPTR
33
+#define VADDR_PRIX PRIXPTR
34
+#define VADDR_MAX UINTPTR_MAX
35
36
#endif
37
--
38
2.43.0
39
40
diff view generated by jsdifflib
New patch
1
Since we no longer support 64-bit guests on 32-bit hosts,
2
we can use a 32-bit type on a 32-bit host. This shrinks
3
the size of the structure to 16 bytes on a 32-bit host.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/tlb-common.h | 10 +++++-----
9
accel/tcg/cputlb.c | 21 ++++-----------------
10
tcg/arm/tcg-target.c.inc | 1 -
11
tcg/mips/tcg-target.c.inc | 12 +++++-------
12
tcg/ppc/tcg-target.c.inc | 21 +++++----------------
13
5 files changed, 19 insertions(+), 46 deletions(-)
14
15
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/tlb-common.h
18
+++ b/include/exec/tlb-common.h
19
@@ -XXX,XX +XXX,XX @@
20
#ifndef EXEC_TLB_COMMON_H
21
#define EXEC_TLB_COMMON_H 1
22
23
-#define CPU_TLB_ENTRY_BITS 5
24
+#define CPU_TLB_ENTRY_BITS (HOST_LONG_BITS == 32 ? 4 : 5)
25
26
/* Minimalized TLB entry for use by TCG fast path. */
27
typedef union CPUTLBEntry {
28
struct {
29
- uint64_t addr_read;
30
- uint64_t addr_write;
31
- uint64_t addr_code;
32
+ uintptr_t addr_read;
33
+ uintptr_t addr_write;
34
+ uintptr_t addr_code;
35
/*
36
* Addend to virtual address to get host address. IO accesses
37
* use the corresponding iotlb value.
38
@@ -XXX,XX +XXX,XX @@ typedef union CPUTLBEntry {
39
* Padding to get a power of two size, as well as index
40
* access to addr_{read,write,code}.
41
*/
42
- uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
43
+ uintptr_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uintptr_t)];
44
} CPUTLBEntry;
45
46
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
47
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/cputlb.c
50
+++ b/accel/tcg/cputlb.c
51
@@ -XXX,XX +XXX,XX @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
52
{
53
/* Do not rearrange the CPUTLBEntry structure members. */
54
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
55
- MMU_DATA_LOAD * sizeof(uint64_t));
56
+ MMU_DATA_LOAD * sizeof(uintptr_t));
57
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
58
- MMU_DATA_STORE * sizeof(uint64_t));
59
+ MMU_DATA_STORE * sizeof(uintptr_t));
60
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
61
- MMU_INST_FETCH * sizeof(uint64_t));
62
+ MMU_INST_FETCH * sizeof(uintptr_t));
63
64
-#if TARGET_LONG_BITS == 32
65
- /* Use qatomic_read, in case of addr_write; only care about low bits. */
66
- const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
67
- ptr += HOST_BIG_ENDIAN;
68
- return qatomic_read(ptr);
69
-#else
70
- const uint64_t *ptr = &entry->addr_idx[access_type];
71
+ const uintptr_t *ptr = &entry->addr_idx[access_type];
72
/* ofs might correspond to .addr_write, so use qatomic_read */
73
return qatomic_read(ptr);
74
-#endif
75
}
76
77
static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
78
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
79
addr &= TARGET_PAGE_MASK;
80
addr += tlb_entry->addend;
81
if ((addr - start) < length) {
82
-#if TARGET_LONG_BITS == 32
83
- uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
84
- ptr_write += HOST_BIG_ENDIAN;
85
- qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
86
-#else
87
qatomic_set(&tlb_entry->addr_write,
88
tlb_entry->addr_write | TLB_NOTDIRTY);
89
-#endif
90
}
91
}
92
}
93
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/arm/tcg-target.c.inc
96
+++ b/tcg/arm/tcg-target.c.inc
97
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
98
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
99
* Load the tlb comparator into R2 and the fast path addend into R1.
100
*/
101
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
102
if (cmp_off == 0) {
103
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
104
} else {
105
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/mips/tcg-target.c.inc
108
+++ b/tcg/mips/tcg-target.c.inc
109
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
110
/* Add the tlb_table pointer, creating the CPUTLBEntry address. */
111
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
112
113
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
114
- /* Load the (low half) tlb comparator. */
115
+ /* Load the tlb comparator. */
116
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
117
tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
118
cmp_off + HOST_BIG_ENDIAN * 4);
119
} else {
120
- tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
121
+ tcg_out_ld(s, TCG_TYPE_REG, TCG_TMP0, TCG_TMP3, cmp_off);
122
}
123
124
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
125
- /* Load the tlb addend for the fast path. */
126
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
127
- }
128
+ /* Load the tlb addend for the fast path. */
129
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
130
131
/*
132
* Mask the page bits, keeping the alignment bits to compare against.
133
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/ppc/tcg-target.c.inc
136
+++ b/tcg/ppc/tcg-target.c.inc
137
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
138
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
139
140
/*
141
- * Load the (low part) TLB comparator into TMP2.
142
+ * Load the TLB comparator into TMP2.
143
* For 64-bit host, always load the entire 64-bit slot for simplicity.
144
* We will ignore the high bits with tcg_out_cmp(..., addr_type).
145
*/
146
- if (TCG_TARGET_REG_BITS == 64) {
147
- if (cmp_off == 0) {
148
- tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
149
- TCG_REG_TMP1, TCG_REG_TMP2));
150
- } else {
151
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
152
- TCG_REG_TMP1, TCG_REG_TMP2));
153
- tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
154
- TCG_REG_TMP1, cmp_off);
155
- }
156
- } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
157
- tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
158
- TCG_REG_TMP1, TCG_REG_TMP2));
159
+ if (cmp_off == 0) {
160
+ tcg_out32(s, (TCG_TARGET_REG_BITS == 64 ? LDUX : LWZUX)
161
+ | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
162
} else {
163
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
164
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
165
- cmp_off + 4 * HOST_BIG_ENDIAN);
166
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
167
}
168
169
/*
170
--
171
2.43.0
172
173
diff view generated by jsdifflib
1
Some large translation blocks can generate so many unique
1
For loongarch, mips, riscv and sparc, a zero register is
2
constants that we run out of temps to hold them. In this
2
available all the time. For aarch64, register index 31
3
case, longjmp back to the start of code generation and
3
depends on context: sometimes it is the stack pointer,
4
restart with a smaller translation block.
4
and sometimes it is the zero register.
5
5
6
Buglink: https://bugs.launchpad.net/bugs/1912065
6
Introduce a new general-purpose constraint which maps 0
7
Tested-by: BALATON Zoltan <balaton@eik.bme.hu>
7
to TCG_REG_ZERO, if defined. This differs from existing
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
constant constraints in that const_arg[*] is recorded as
9
false, indicating that the value is in a register.
10
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
13
---
11
include/tcg/tcg.h | 3 +++
14
include/tcg/tcg.h | 3 ++-
12
accel/tcg/translate-all.c | 15 ++++++++++++++-
15
tcg/aarch64/tcg-target.h | 2 ++
13
tcg/tcg.c | 11 ++++++++---
16
tcg/loongarch64/tcg-target.h | 2 ++
14
3 files changed, 25 insertions(+), 4 deletions(-)
17
tcg/mips/tcg-target.h | 2 ++
18
tcg/riscv/tcg-target.h | 2 ++
19
tcg/sparc64/tcg-target.h | 3 ++-
20
tcg/tcg.c | 29 ++++++++++++++++++++++-------
21
docs/devel/tcg-ops.rst | 4 +++-
22
8 files changed, 37 insertions(+), 10 deletions(-)
15
23
16
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
24
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
17
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
18
--- a/include/tcg/tcg.h
26
--- a/include/tcg/tcg.h
19
+++ b/include/tcg/tcg.h
27
+++ b/include/tcg/tcg.h
20
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
28
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *, int,
21
29
22
uint16_t gen_insn_end_off[TCG_MAX_INSNS];
30
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
23
target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
31
32
-#define TCG_CT_CONST 1 /* any constant of register size */
33
+#define TCG_CT_CONST 1 /* any constant of register size */
34
+#define TCG_CT_REG_ZERO 2 /* zero, in TCG_REG_ZERO */
35
36
typedef struct TCGArgConstraint {
37
unsigned ct : 16;
38
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/aarch64/tcg-target.h
41
+++ b/tcg/aarch64/tcg-target.h
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
43
TCG_AREG0 = TCG_REG_X19,
44
} TCGReg;
45
46
+#define TCG_REG_ZERO TCG_REG_XZR
24
+
47
+
25
+ /* Exit to translator on overflow. */
48
#define TCG_TARGET_NB_REGS 64
26
+ sigjmp_buf jmp_trans;
49
27
};
50
#endif /* AARCH64_TCG_TARGET_H */
28
51
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
29
static inline bool temp_readonly(TCGTemp *ts)
30
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
31
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
32
--- a/accel/tcg/translate-all.c
53
--- a/tcg/loongarch64/tcg-target.h
33
+++ b/accel/tcg/translate-all.c
54
+++ b/tcg/loongarch64/tcg-target.h
34
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
55
@@ -XXX,XX +XXX,XX @@ typedef enum {
35
ti = profile_getclock();
56
TCG_VEC_TMP0 = TCG_REG_V23,
57
} TCGReg;
58
59
+#define TCG_REG_ZERO TCG_REG_ZERO
60
+
61
#endif /* LOONGARCH_TCG_TARGET_H */
62
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/mips/tcg-target.h
65
+++ b/tcg/mips/tcg-target.h
66
@@ -XXX,XX +XXX,XX @@ typedef enum {
67
TCG_AREG0 = TCG_REG_S8,
68
} TCGReg;
69
70
+#define TCG_REG_ZERO TCG_REG_ZERO
71
+
36
#endif
72
#endif
37
73
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
38
+ gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
74
index XXXXXXX..XXXXXXX 100644
39
+ if (unlikely(gen_code_size != 0)) {
75
--- a/tcg/riscv/tcg-target.h
40
+ goto error_return;
76
+++ b/tcg/riscv/tcg-target.h
41
+ }
77
@@ -XXX,XX +XXX,XX @@ typedef enum {
78
TCG_REG_TMP2 = TCG_REG_T4,
79
} TCGReg;
80
81
+#define TCG_REG_ZERO TCG_REG_ZERO
42
+
82
+
43
tcg_func_start(tcg_ctx);
83
#endif
44
84
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
45
tcg_ctx->cpu = env_cpu(env);
85
index XXXXXXX..XXXXXXX 100644
46
gen_intermediate_code(cpu, tb, max_insns);
86
--- a/tcg/sparc64/tcg-target.h
47
tcg_ctx->cpu = NULL;
87
+++ b/tcg/sparc64/tcg-target.h
48
+ max_insns = tb->icount;
88
@@ -XXX,XX +XXX,XX @@ typedef enum {
49
89
TCG_REG_I7,
50
trace_translate_block(tb, tb->pc, tb->tc.ptr);
90
} TCGReg;
51
91
52
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
92
-#define TCG_AREG0 TCG_REG_I0
53
93
+#define TCG_AREG0 TCG_REG_I0
54
gen_code_size = tcg_gen_code(tcg_ctx, tb);
94
+#define TCG_REG_ZERO TCG_REG_G0
55
if (unlikely(gen_code_size < 0)) {
95
56
+ error_return:
96
#endif
57
switch (gen_code_size) {
58
case -1:
59
/*
60
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
61
* flush the TBs, allocate a new TB, re-initialize it per
62
* above, and re-do the actual code generation.
63
*/
64
+ qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
65
+ "Restarting code generation for "
66
+ "code_gen_buffer overflow\n");
67
goto buffer_overflow;
68
69
case -2:
70
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
71
* Try again with half as many insns as we attempted this time.
72
* If a single insn overflows, there's a bug somewhere...
73
*/
74
- max_insns = tb->icount;
75
assert(max_insns > 1);
76
max_insns /= 2;
77
+ qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
78
+ "Restarting code generation with "
79
+ "smaller translation block (max %d insns)\n",
80
+ max_insns);
81
goto tb_overflow;
82
83
default:
84
diff --git a/tcg/tcg.c b/tcg/tcg.c
97
diff --git a/tcg/tcg.c b/tcg/tcg.c
85
index XXXXXXX..XXXXXXX 100644
98
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/tcg.c
99
--- a/tcg/tcg.c
87
+++ b/tcg/tcg.c
100
+++ b/tcg/tcg.c
88
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
101
@@ -XXX,XX +XXX,XX @@ static void process_constraint_sets(void)
89
QSIMPLEQ_INIT(&s->labels);
102
case 'i':
90
}
103
args_ct[i].ct |= TCG_CT_CONST;
91
104
break;
92
-static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
105
+#ifdef TCG_REG_ZERO
93
+static TCGTemp *tcg_temp_alloc(TCGContext *s)
106
+ case 'z':
94
{
107
+ args_ct[i].ct |= TCG_CT_REG_ZERO;
95
int n = s->nb_temps++;
108
+ break;
96
- tcg_debug_assert(n < TCG_MAX_TEMPS);
109
+#endif
110
111
/* Include all of the target-specific constraints. */
112
113
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
114
arg_ct = &args_ct[i];
115
ts = arg_temp(arg);
116
117
- if (ts->val_type == TEMP_VAL_CONST
118
- && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
119
- op_cond, TCGOP_VECE(op))) {
120
- /* constant is OK for instruction */
121
- const_args[i] = 1;
122
- new_args[i] = ts->val;
123
- continue;
124
+ if (ts->val_type == TEMP_VAL_CONST) {
125
+#ifdef TCG_REG_ZERO
126
+ if (ts->val == 0 && (arg_ct->ct & TCG_CT_REG_ZERO)) {
127
+ /* Hardware zero register: indicate register via non-const. */
128
+ const_args[i] = 0;
129
+ new_args[i] = TCG_REG_ZERO;
130
+ continue;
131
+ }
132
+#endif
97
+
133
+
98
+ if (n >= TCG_MAX_TEMPS) {
134
+ if (tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
99
+ /* Signal overflow, starting over with fewer guest insns. */
135
+ op_cond, TCGOP_VECE(op))) {
100
+ siglongjmp(s->jmp_trans, -2);
136
+ /* constant is OK for instruction */
101
+ }
137
+ const_args[i] = 1;
102
return memset(&s->temps[n], 0, sizeof(TCGTemp));
138
+ new_args[i] = ts->val;
103
}
139
+ continue;
104
140
+ }
105
-static inline TCGTemp *tcg_global_alloc(TCGContext *s)
141
}
106
+static TCGTemp *tcg_global_alloc(TCGContext *s)
142
107
{
143
reg = ts->reg;
108
TCGTemp *ts;
144
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
109
145
index XXXXXXX..XXXXXXX 100644
110
tcg_debug_assert(s->nb_globals == s->nb_temps);
146
--- a/docs/devel/tcg-ops.rst
111
+ tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
147
+++ b/docs/devel/tcg-ops.rst
112
s->nb_globals++;
148
@@ -XXX,XX +XXX,XX @@ operation uses a constant input constraint which does not allow all
113
ts = tcg_temp_alloc(s);
149
constants, it must also accept registers in order to have a fallback.
114
ts->kind = TEMP_GLOBAL;
150
The constraint '``i``' is defined generically to accept any constant.
151
The constraint '``r``' is not defined generically, but is consistently
152
-used by each backend to indicate all registers.
153
+used by each backend to indicate all registers. If ``TCG_REG_ZERO``
154
+is defined by the backend, the constraint '``z``' is defined generically
155
+to map constant 0 to the hardware zero register.
156
157
The movi_i32 and movi_i64 operations must accept any constants.
158
115
--
159
--
116
2.25.1
160
2.43.0
117
161
118
162
diff view generated by jsdifflib
New patch
1
Note that 'Z' is still used for addsub2.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/aarch64/tcg-target-con-set.h | 12 ++++-----
6
tcg/aarch64/tcg-target.c.inc | 46 ++++++++++++++------------------
7
2 files changed, 26 insertions(+), 32 deletions(-)
8
9
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/aarch64/tcg-target-con-set.h
12
+++ b/tcg/aarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@
14
*/
15
C_O0_I1(r)
16
C_O0_I2(r, rC)
17
-C_O0_I2(rZ, r)
18
+C_O0_I2(rz, r)
19
C_O0_I2(w, r)
20
-C_O0_I3(rZ, rZ, r)
21
+C_O0_I3(rz, rz, r)
22
C_O1_I1(r, r)
23
C_O1_I1(w, r)
24
C_O1_I1(w, w)
25
C_O1_I1(w, wr)
26
-C_O1_I2(r, 0, rZ)
27
+C_O1_I2(r, 0, rz)
28
C_O1_I2(r, r, r)
29
C_O1_I2(r, r, rA)
30
C_O1_I2(r, r, rAL)
31
C_O1_I2(r, r, rC)
32
C_O1_I2(r, r, ri)
33
C_O1_I2(r, r, rL)
34
-C_O1_I2(r, rZ, rZ)
35
+C_O1_I2(r, rz, rz)
36
C_O1_I2(w, 0, w)
37
C_O1_I2(w, w, w)
38
C_O1_I2(w, w, wN)
39
C_O1_I2(w, w, wO)
40
C_O1_I2(w, w, wZ)
41
C_O1_I3(w, w, w, w)
42
-C_O1_I4(r, r, rC, rZ, rZ)
43
+C_O1_I4(r, r, rC, rz, rz)
44
C_O2_I1(r, r, r)
45
-C_O2_I4(r, r, rZ, rZ, rA, rMZ)
46
+C_O2_I4(r, r, rz, rz, rA, rMZ)
47
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/aarch64/tcg-target.c.inc
50
+++ b/tcg/aarch64/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
52
TCGArg a2 = args[2];
53
int c2 = const_args[2];
54
55
- /* Some operands are defined with "rZ" constraint, a register or
56
- the zero register. These need not actually test args[I] == 0. */
57
-#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
58
-
59
switch (opc) {
60
case INDEX_op_goto_ptr:
61
tcg_out_insn(s, 3207, BR, a0);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
63
64
case INDEX_op_st8_i32:
65
case INDEX_op_st8_i64:
66
- tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2, 0);
67
+ tcg_out_ldst(s, I3312_STRB, a0, a1, a2, 0);
68
break;
69
case INDEX_op_st16_i32:
70
case INDEX_op_st16_i64:
71
- tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2, 1);
72
+ tcg_out_ldst(s, I3312_STRH, a0, a1, a2, 1);
73
break;
74
case INDEX_op_st_i32:
75
case INDEX_op_st32_i64:
76
- tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2, 2);
77
+ tcg_out_ldst(s, I3312_STRW, a0, a1, a2, 2);
78
break;
79
case INDEX_op_st_i64:
80
- tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2, 3);
81
+ tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
82
break;
83
84
case INDEX_op_add_i32:
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
86
/* FALLTHRU */
87
case INDEX_op_movcond_i64:
88
tcg_out_cmp(s, ext, args[5], a1, a2, c2);
89
- tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
90
+ tcg_out_insn(s, 3506, CSEL, ext, a0, args[3], args[4], args[5]);
91
break;
92
93
case INDEX_op_qemu_ld_i32:
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
95
break;
96
case INDEX_op_qemu_st_i32:
97
case INDEX_op_qemu_st_i64:
98
- tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
99
+ tcg_out_qemu_st(s, a0, a1, a2, ext);
100
break;
101
case INDEX_op_qemu_ld_i128:
102
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
103
break;
104
case INDEX_op_qemu_st_i128:
105
- tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
106
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
107
break;
108
109
case INDEX_op_bswap64_i64:
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
111
112
case INDEX_op_deposit_i64:
113
case INDEX_op_deposit_i32:
114
- tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
115
+ tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
116
break;
117
118
case INDEX_op_extract_i64:
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
120
121
case INDEX_op_extract2_i64:
122
case INDEX_op_extract2_i32:
123
- tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
124
+ tcg_out_extr(s, ext, a0, a2, a1, args[3]);
125
break;
126
127
case INDEX_op_add2_i32:
128
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
129
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
130
(int32_t)args[4], args[5], const_args[4],
131
const_args[5], false);
132
break;
133
case INDEX_op_add2_i64:
134
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
135
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
136
args[5], const_args[4], const_args[5], false);
137
break;
138
case INDEX_op_sub2_i32:
139
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
140
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
141
(int32_t)args[4], args[5], const_args[4],
142
const_args[5], true);
143
break;
144
case INDEX_op_sub2_i64:
145
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
146
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
147
args[5], const_args[4], const_args[5], true);
148
break;
149
150
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
151
default:
152
g_assert_not_reached();
153
}
154
-
155
-#undef REG0
156
}
157
158
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
159
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
160
case INDEX_op_st16_i64:
161
case INDEX_op_st32_i64:
162
case INDEX_op_st_i64:
163
- return C_O0_I2(rZ, r);
164
+ return C_O0_I2(rz, r);
165
166
case INDEX_op_add_i32:
167
case INDEX_op_add_i64:
168
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
169
170
case INDEX_op_movcond_i32:
171
case INDEX_op_movcond_i64:
172
- return C_O1_I4(r, r, rC, rZ, rZ);
173
+ return C_O1_I4(r, r, rC, rz, rz);
174
175
case INDEX_op_qemu_ld_i32:
176
case INDEX_op_qemu_ld_i64:
177
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
178
return C_O2_I1(r, r, r);
179
case INDEX_op_qemu_st_i32:
180
case INDEX_op_qemu_st_i64:
181
- return C_O0_I2(rZ, r);
182
+ return C_O0_I2(rz, r);
183
case INDEX_op_qemu_st_i128:
184
- return C_O0_I3(rZ, rZ, r);
185
+ return C_O0_I3(rz, rz, r);
186
187
case INDEX_op_deposit_i32:
188
case INDEX_op_deposit_i64:
189
- return C_O1_I2(r, 0, rZ);
190
+ return C_O1_I2(r, 0, rz);
191
192
case INDEX_op_extract2_i32:
193
case INDEX_op_extract2_i64:
194
- return C_O1_I2(r, rZ, rZ);
195
+ return C_O1_I2(r, rz, rz);
196
197
case INDEX_op_add2_i32:
198
case INDEX_op_add2_i64:
199
case INDEX_op_sub2_i32:
200
case INDEX_op_sub2_i64:
201
- return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
202
+ return C_O2_I4(r, r, rz, rz, rA, rMZ);
203
204
case INDEX_op_add_vec:
205
case INDEX_op_sub_vec:
206
--
207
2.43.0
diff view generated by jsdifflib
New patch
1
Replace target-specific 'Z' with generic 'z'.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/loongarch64/tcg-target-con-set.h | 15 ++++++-------
7
tcg/loongarch64/tcg-target-con-str.h | 1 -
8
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++----------------
9
3 files changed, 21 insertions(+), 27 deletions(-)
10
11
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/loongarch64/tcg-target-con-set.h
14
+++ b/tcg/loongarch64/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
17
*/
18
C_O0_I1(r)
19
-C_O0_I2(rZ, r)
20
-C_O0_I2(rZ, rZ)
21
+C_O0_I2(rz, r)
22
+C_O0_I2(rz, rz)
23
C_O0_I2(w, r)
24
C_O0_I3(r, r, r)
25
C_O1_I1(r, r)
26
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
27
C_O1_I2(r, r, rJ)
28
C_O1_I2(r, r, rU)
29
C_O1_I2(r, r, rW)
30
-C_O1_I2(r, r, rZ)
31
-C_O1_I2(r, 0, rZ)
32
-C_O1_I2(r, rZ, ri)
33
-C_O1_I2(r, rZ, rJ)
34
-C_O1_I2(r, rZ, rZ)
35
+C_O1_I2(r, 0, rz)
36
+C_O1_I2(r, rz, ri)
37
+C_O1_I2(r, rz, rJ)
38
+C_O1_I2(r, rz, rz)
39
C_O1_I2(w, w, w)
40
C_O1_I2(w, w, wM)
41
C_O1_I2(w, w, wA)
42
C_O1_I3(w, w, w, w)
43
-C_O1_I4(r, rZ, rJ, rZ, rZ)
44
+C_O1_I4(r, rz, rJ, rz, rz)
45
C_N2_I1(r, r, r)
46
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/loongarch64/tcg-target-con-str.h
49
+++ b/tcg/loongarch64/tcg-target-con-str.h
50
@@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS)
51
CONST('I', TCG_CT_CONST_S12)
52
CONST('J', TCG_CT_CONST_S32)
53
CONST('U', TCG_CT_CONST_U12)
54
-CONST('Z', TCG_CT_CONST_ZERO)
55
CONST('C', TCG_CT_CONST_C12)
56
CONST('W', TCG_CT_CONST_WSZ)
57
CONST('M', TCG_CT_CONST_VCMP)
58
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
59
index XXXXXXX..XXXXXXX 100644
60
--- a/tcg/loongarch64/tcg-target.c.inc
61
+++ b/tcg/loongarch64/tcg-target.c.inc
62
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
63
64
#define TCG_GUEST_BASE_REG TCG_REG_S1
65
66
-#define TCG_CT_CONST_ZERO 0x100
67
-#define TCG_CT_CONST_S12 0x200
68
-#define TCG_CT_CONST_S32 0x400
69
-#define TCG_CT_CONST_U12 0x800
70
-#define TCG_CT_CONST_C12 0x1000
71
-#define TCG_CT_CONST_WSZ 0x2000
72
-#define TCG_CT_CONST_VCMP 0x4000
73
-#define TCG_CT_CONST_VADD 0x8000
74
+#define TCG_CT_CONST_S12 0x100
75
+#define TCG_CT_CONST_S32 0x200
76
+#define TCG_CT_CONST_U12 0x400
77
+#define TCG_CT_CONST_C12 0x800
78
+#define TCG_CT_CONST_WSZ 0x1000
79
+#define TCG_CT_CONST_VCMP 0x2000
80
+#define TCG_CT_CONST_VADD 0x4000
81
82
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
83
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
84
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
85
if (ct & TCG_CT_CONST) {
86
return true;
87
}
88
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
89
- return true;
90
- }
91
if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
92
return true;
93
}
94
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
95
case INDEX_op_st_i64:
96
case INDEX_op_qemu_st_i32:
97
case INDEX_op_qemu_st_i64:
98
- return C_O0_I2(rZ, r);
99
+ return C_O0_I2(rz, r);
100
101
case INDEX_op_qemu_ld_i128:
102
return C_N2_I1(r, r, r);
103
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
104
105
case INDEX_op_brcond_i32:
106
case INDEX_op_brcond_i64:
107
- return C_O0_I2(rZ, rZ);
108
+ return C_O0_I2(rz, rz);
109
110
case INDEX_op_ext8s_i32:
111
case INDEX_op_ext8s_i64:
112
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
113
case INDEX_op_deposit_i32:
114
case INDEX_op_deposit_i64:
115
/* Must deposit into the same register as input */
116
- return C_O1_I2(r, 0, rZ);
117
+ return C_O1_I2(r, 0, rz);
118
119
case INDEX_op_sub_i32:
120
case INDEX_op_setcond_i32:
121
- return C_O1_I2(r, rZ, ri);
122
+ return C_O1_I2(r, rz, ri);
123
case INDEX_op_sub_i64:
124
case INDEX_op_setcond_i64:
125
- return C_O1_I2(r, rZ, rJ);
126
+ return C_O1_I2(r, rz, rJ);
127
128
case INDEX_op_mul_i32:
129
case INDEX_op_mul_i64:
130
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
131
case INDEX_op_rem_i64:
132
case INDEX_op_remu_i32:
133
case INDEX_op_remu_i64:
134
- return C_O1_I2(r, rZ, rZ);
135
+ return C_O1_I2(r, rz, rz);
136
137
case INDEX_op_movcond_i32:
138
case INDEX_op_movcond_i64:
139
- return C_O1_I4(r, rZ, rJ, rZ, rZ);
140
+ return C_O1_I4(r, rz, rJ, rz, rz);
141
142
case INDEX_op_ld_vec:
143
case INDEX_op_dupm_vec:
144
--
145
2.43.0
146
147
diff view generated by jsdifflib
New patch
1
Replace target-specific 'Z' with generic 'z'.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/mips/tcg-target-con-set.h | 26 ++++++++++-----------
7
tcg/mips/tcg-target-con-str.h | 1 -
8
tcg/mips/tcg-target.c.inc | 44 ++++++++++++++---------------------
9
3 files changed, 31 insertions(+), 40 deletions(-)
10
11
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/mips/tcg-target-con-set.h
14
+++ b/tcg/mips/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
17
*/
18
C_O0_I1(r)
19
-C_O0_I2(rZ, r)
20
-C_O0_I2(rZ, rZ)
21
-C_O0_I3(rZ, r, r)
22
-C_O0_I3(rZ, rZ, r)
23
-C_O0_I4(rZ, rZ, rZ, rZ)
24
-C_O0_I4(rZ, rZ, r, r)
25
+C_O0_I2(rz, r)
26
+C_O0_I2(rz, rz)
27
+C_O0_I3(rz, r, r)
28
+C_O0_I3(rz, rz, r)
29
+C_O0_I4(rz, rz, rz, rz)
30
+C_O0_I4(rz, rz, r, r)
31
C_O1_I1(r, r)
32
-C_O1_I2(r, 0, rZ)
33
+C_O1_I2(r, 0, rz)
34
C_O1_I2(r, r, r)
35
C_O1_I2(r, r, ri)
36
C_O1_I2(r, r, rI)
37
C_O1_I2(r, r, rIK)
38
C_O1_I2(r, r, rJ)
39
-C_O1_I2(r, r, rWZ)
40
-C_O1_I2(r, rZ, rN)
41
-C_O1_I2(r, rZ, rZ)
42
-C_O1_I4(r, rZ, rZ, rZ, 0)
43
-C_O1_I4(r, rZ, rZ, rZ, rZ)
44
+C_O1_I2(r, r, rzW)
45
+C_O1_I2(r, rz, rN)
46
+C_O1_I2(r, rz, rz)
47
+C_O1_I4(r, rz, rz, rz, 0)
48
+C_O1_I4(r, rz, rz, rz, rz)
49
C_O2_I1(r, r, r)
50
C_O2_I2(r, r, r, r)
51
-C_O2_I4(r, r, rZ, rZ, rN, rN)
52
+C_O2_I4(r, r, rz, rz, rN, rN)
53
diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/mips/tcg-target-con-str.h
56
+++ b/tcg/mips/tcg-target-con-str.h
57
@@ -XXX,XX +XXX,XX @@ CONST('J', TCG_CT_CONST_S16)
58
CONST('K', TCG_CT_CONST_P2M1)
59
CONST('N', TCG_CT_CONST_N16)
60
CONST('W', TCG_CT_CONST_WSZ)
61
-CONST('Z', TCG_CT_CONST_ZERO)
62
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/mips/tcg-target.c.inc
65
+++ b/tcg/mips/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
67
g_assert_not_reached();
68
}
69
70
-#define TCG_CT_CONST_ZERO 0x100
71
-#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */
72
-#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
73
-#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
74
-#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
75
-#define TCG_CT_CONST_WSZ 0x2000 /* word size */
76
+#define TCG_CT_CONST_U16 0x100 /* Unsigned 16-bit: 0 - 0xffff. */
77
+#define TCG_CT_CONST_S16 0x200 /* Signed 16-bit: -32768 - 32767 */
78
+#define TCG_CT_CONST_P2M1 0x400 /* Power of 2 minus 1. */
79
+#define TCG_CT_CONST_N16 0x800 /* "Negatable" 16-bit: -32767 - 32767 */
80
+#define TCG_CT_CONST_WSZ 0x1000 /* word size */
81
82
#define ALL_GENERAL_REGS 0xffffffffu
83
84
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
85
{
86
if (ct & TCG_CT_CONST) {
87
return 1;
88
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
89
- return 1;
90
} else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
91
return 1;
92
} else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
94
TCGArg a0, a1, a2;
95
int c2;
96
97
- /*
98
- * Note that many operands use the constraint set "rZ".
99
- * We make use of the fact that 0 is the ZERO register,
100
- * and hence such cases need not check for const_args.
101
- */
102
a0 = args[0];
103
a1 = args[1];
104
a2 = args[2];
105
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
106
case INDEX_op_st16_i64:
107
case INDEX_op_st32_i64:
108
case INDEX_op_st_i64:
109
- return C_O0_I2(rZ, r);
110
+ return C_O0_I2(rz, r);
111
112
case INDEX_op_add_i32:
113
case INDEX_op_add_i64:
114
return C_O1_I2(r, r, rJ);
115
case INDEX_op_sub_i32:
116
case INDEX_op_sub_i64:
117
- return C_O1_I2(r, rZ, rN);
118
+ return C_O1_I2(r, rz, rN);
119
case INDEX_op_mul_i32:
120
case INDEX_op_mulsh_i32:
121
case INDEX_op_muluh_i32:
122
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
123
case INDEX_op_remu_i64:
124
case INDEX_op_nor_i64:
125
case INDEX_op_setcond_i64:
126
- return C_O1_I2(r, rZ, rZ);
127
+ return C_O1_I2(r, rz, rz);
128
case INDEX_op_muls2_i32:
129
case INDEX_op_mulu2_i32:
130
case INDEX_op_muls2_i64:
131
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
132
return C_O1_I2(r, r, ri);
133
case INDEX_op_clz_i32:
134
case INDEX_op_clz_i64:
135
- return C_O1_I2(r, r, rWZ);
136
+ return C_O1_I2(r, r, rzW);
137
138
case INDEX_op_deposit_i32:
139
case INDEX_op_deposit_i64:
140
- return C_O1_I2(r, 0, rZ);
141
+ return C_O1_I2(r, 0, rz);
142
case INDEX_op_brcond_i32:
143
case INDEX_op_brcond_i64:
144
- return C_O0_I2(rZ, rZ);
145
+ return C_O0_I2(rz, rz);
146
case INDEX_op_movcond_i32:
147
case INDEX_op_movcond_i64:
148
return (use_mips32r6_instructions
149
- ? C_O1_I4(r, rZ, rZ, rZ, rZ)
150
- : C_O1_I4(r, rZ, rZ, rZ, 0));
151
+ ? C_O1_I4(r, rz, rz, rz, rz)
152
+ : C_O1_I4(r, rz, rz, rz, 0));
153
case INDEX_op_add2_i32:
154
case INDEX_op_sub2_i32:
155
- return C_O2_I4(r, r, rZ, rZ, rN, rN);
156
+ return C_O2_I4(r, r, rz, rz, rN, rN);
157
case INDEX_op_setcond2_i32:
158
- return C_O1_I4(r, rZ, rZ, rZ, rZ);
159
+ return C_O1_I4(r, rz, rz, rz, rz);
160
case INDEX_op_brcond2_i32:
161
- return C_O0_I4(rZ, rZ, rZ, rZ);
162
+ return C_O0_I4(rz, rz, rz, rz);
163
164
case INDEX_op_qemu_ld_i32:
165
return C_O1_I1(r, r);
166
case INDEX_op_qemu_st_i32:
167
- return C_O0_I2(rZ, r);
168
+ return C_O0_I2(rz, r);
169
case INDEX_op_qemu_ld_i64:
170
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
171
case INDEX_op_qemu_st_i64:
172
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rZ, r) : C_O0_I3(rZ, rZ, r);
173
+ return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rz, r) : C_O0_I3(rz, rz, r);
174
175
default:
176
return C_NotImplemented;
177
--
178
2.43.0
179
180
diff view generated by jsdifflib
New patch
1
Replace target-specific 'Z' with generic 'z'.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/riscv/tcg-target-con-set.h | 10 +++++-----
7
tcg/riscv/tcg-target-con-str.h | 1 -
8
tcg/riscv/tcg-target.c.inc | 28 ++++++++++++----------------
9
3 files changed, 17 insertions(+), 22 deletions(-)
10
11
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target-con-set.h
14
+++ b/tcg/riscv/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
17
*/
18
C_O0_I1(r)
19
-C_O0_I2(rZ, r)
20
-C_O0_I2(rZ, rZ)
21
+C_O0_I2(rz, r)
22
+C_O0_I2(rz, rz)
23
C_O1_I1(r, r)
24
C_O1_I2(r, r, ri)
25
C_O1_I2(r, r, rI)
26
C_O1_I2(r, r, rJ)
27
-C_O1_I2(r, rZ, rN)
28
-C_O1_I2(r, rZ, rZ)
29
+C_O1_I2(r, rz, rN)
30
+C_O1_I2(r, rz, rz)
31
C_N1_I2(r, r, rM)
32
C_O1_I4(r, r, rI, rM, rM)
33
-C_O2_I4(r, r, rZ, rZ, rM, rM)
34
+C_O2_I4(r, r, rz, rz, rM, rM)
35
C_O0_I2(v, r)
36
C_O1_I1(v, r)
37
C_O1_I1(v, v)
38
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/riscv/tcg-target-con-str.h
41
+++ b/tcg/riscv/tcg-target-con-str.h
42
@@ -XXX,XX +XXX,XX @@ CONST('K', TCG_CT_CONST_S5)
43
CONST('L', TCG_CT_CONST_CMP_VI)
44
CONST('N', TCG_CT_CONST_N12)
45
CONST('M', TCG_CT_CONST_M12)
46
-CONST('Z', TCG_CT_CONST_ZERO)
47
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/riscv/tcg-target.c.inc
50
+++ b/tcg/riscv/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
52
return TCG_REG_A0 + slot;
53
}
54
55
-#define TCG_CT_CONST_ZERO 0x100
56
-#define TCG_CT_CONST_S12 0x200
57
-#define TCG_CT_CONST_N12 0x400
58
-#define TCG_CT_CONST_M12 0x800
59
-#define TCG_CT_CONST_J12 0x1000
60
-#define TCG_CT_CONST_S5 0x2000
61
-#define TCG_CT_CONST_CMP_VI 0x4000
62
+#define TCG_CT_CONST_S12 0x100
63
+#define TCG_CT_CONST_N12 0x200
64
+#define TCG_CT_CONST_M12 0x400
65
+#define TCG_CT_CONST_J12 0x800
66
+#define TCG_CT_CONST_S5 0x1000
67
+#define TCG_CT_CONST_CMP_VI 0x2000
68
69
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
70
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
71
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
72
if (ct & TCG_CT_CONST) {
73
return 1;
74
}
75
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
76
- return 1;
77
- }
78
if (type >= TCG_TYPE_V64) {
79
/* Val is replicated by VECE; extract the highest element. */
80
val >>= (-8 << vece) & 63;
81
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
82
case INDEX_op_st16_i64:
83
case INDEX_op_st32_i64:
84
case INDEX_op_st_i64:
85
- return C_O0_I2(rZ, r);
86
+ return C_O0_I2(rz, r);
87
88
case INDEX_op_add_i32:
89
case INDEX_op_and_i32:
90
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
91
92
case INDEX_op_sub_i32:
93
case INDEX_op_sub_i64:
94
- return C_O1_I2(r, rZ, rN);
95
+ return C_O1_I2(r, rz, rN);
96
97
case INDEX_op_mul_i32:
98
case INDEX_op_mulsh_i32:
99
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
100
case INDEX_op_divu_i64:
101
case INDEX_op_rem_i64:
102
case INDEX_op_remu_i64:
103
- return C_O1_I2(r, rZ, rZ);
104
+ return C_O1_I2(r, rz, rz);
105
106
case INDEX_op_shl_i32:
107
case INDEX_op_shr_i32:
108
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
109
110
case INDEX_op_brcond_i32:
111
case INDEX_op_brcond_i64:
112
- return C_O0_I2(rZ, rZ);
113
+ return C_O0_I2(rz, rz);
114
115
case INDEX_op_movcond_i32:
116
case INDEX_op_movcond_i64:
117
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
118
case INDEX_op_add2_i64:
119
case INDEX_op_sub2_i32:
120
case INDEX_op_sub2_i64:
121
- return C_O2_I4(r, r, rZ, rZ, rM, rM);
122
+ return C_O2_I4(r, r, rz, rz, rM, rM);
123
124
case INDEX_op_qemu_ld_i32:
125
case INDEX_op_qemu_ld_i64:
126
return C_O1_I1(r, r);
127
case INDEX_op_qemu_st_i32:
128
case INDEX_op_qemu_st_i64:
129
- return C_O0_I2(rZ, r);
130
+ return C_O0_I2(rz, r);
131
132
case INDEX_op_st_vec:
133
return C_O0_I2(v, r);
134
--
135
2.43.0
136
137
diff view generated by jsdifflib
New patch
1
Replace target-specific 'Z' with generic 'z'.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/sparc64/tcg-target-con-set.h | 12 ++++++------
7
tcg/sparc64/tcg-target-con-str.h | 1 -
8
tcg/sparc64/tcg-target.c.inc | 17 +++++++----------
9
3 files changed, 13 insertions(+), 17 deletions(-)
10
11
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/sparc64/tcg-target-con-set.h
14
+++ b/tcg/sparc64/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@
16
* tcg-target-con-str.h; the constraint combination is inclusive or.
17
*/
18
C_O0_I1(r)
19
-C_O0_I2(rZ, r)
20
-C_O0_I2(rZ, rJ)
21
+C_O0_I2(rz, r)
22
+C_O0_I2(rz, rJ)
23
C_O1_I1(r, r)
24
C_O1_I2(r, r, r)
25
-C_O1_I2(r, rZ, rJ)
26
-C_O1_I4(r, rZ, rJ, rI, 0)
27
-C_O2_I2(r, r, rZ, rJ)
28
-C_O2_I4(r, r, rZ, rZ, rJ, rJ)
29
+C_O1_I2(r, rz, rJ)
30
+C_O1_I4(r, rz, rJ, rI, 0)
31
+C_O2_I2(r, r, rz, rJ)
32
+C_O2_I4(r, r, rz, rz, rJ, rJ)
33
diff --git a/tcg/sparc64/tcg-target-con-str.h b/tcg/sparc64/tcg-target-con-str.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/sparc64/tcg-target-con-str.h
36
+++ b/tcg/sparc64/tcg-target-con-str.h
37
@@ -XXX,XX +XXX,XX @@ REGS('r', ALL_GENERAL_REGS)
38
*/
39
CONST('I', TCG_CT_CONST_S11)
40
CONST('J', TCG_CT_CONST_S13)
41
-CONST('Z', TCG_CT_CONST_ZERO)
42
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/sparc64/tcg-target.c.inc
45
+++ b/tcg/sparc64/tcg-target.c.inc
46
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
47
48
#define TCG_CT_CONST_S11 0x100
49
#define TCG_CT_CONST_S13 0x200
50
-#define TCG_CT_CONST_ZERO 0x400
51
52
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
53
54
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
55
val = (int32_t)val;
56
}
57
58
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
59
- return 1;
60
- } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
61
+ if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
62
return 1;
63
} else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
64
return 1;
65
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
66
case INDEX_op_st_i64:
67
case INDEX_op_qemu_st_i32:
68
case INDEX_op_qemu_st_i64:
69
- return C_O0_I2(rZ, r);
70
+ return C_O0_I2(rz, r);
71
72
case INDEX_op_add_i32:
73
case INDEX_op_add_i64:
74
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
75
case INDEX_op_setcond_i64:
76
case INDEX_op_negsetcond_i32:
77
case INDEX_op_negsetcond_i64:
78
- return C_O1_I2(r, rZ, rJ);
79
+ return C_O1_I2(r, rz, rJ);
80
81
case INDEX_op_brcond_i32:
82
case INDEX_op_brcond_i64:
83
- return C_O0_I2(rZ, rJ);
84
+ return C_O0_I2(rz, rJ);
85
case INDEX_op_movcond_i32:
86
case INDEX_op_movcond_i64:
87
- return C_O1_I4(r, rZ, rJ, rI, 0);
88
+ return C_O1_I4(r, rz, rJ, rI, 0);
89
case INDEX_op_add2_i32:
90
case INDEX_op_add2_i64:
91
case INDEX_op_sub2_i32:
92
case INDEX_op_sub2_i64:
93
- return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
94
+ return C_O2_I4(r, r, rz, rz, rJ, rJ);
95
case INDEX_op_mulu2_i32:
96
case INDEX_op_muls2_i32:
97
- return C_O2_I2(r, r, rZ, rJ);
98
+ return C_O2_I2(r, r, rz, rJ);
99
case INDEX_op_muluh_i64:
100
return C_O1_I2(r, r, r);
101
102
--
103
2.43.0
104
105
diff view generated by jsdifflib
New patch
1
From: Fabiano Rosas <farosas@suse.de>
1
2
3
When complying with the alignment requested in the ELF and unmapping
4
the excess reservation, having align_end not aligned to the guest page
5
causes the unmap to be rejected by the alignment check at
6
target_munmap and later brk adjustments hit an EEXIST.
7
8
Fix by aligning the start of region to be unmapped.
9
10
Fixes: c81d1fafa6 ("linux-user: Honor elf alignment when placing images")
11
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1913
12
Signed-off-by: Fabiano Rosas <farosas@suse.de>
13
[rth: Align load_end as well.]
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20250213143558.10504-1-farosas@suse.de>
16
---
17
linux-user/elfload.c | 4 ++--
18
1 file changed, 2 insertions(+), 2 deletions(-)
19
20
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/linux-user/elfload.c
23
+++ b/linux-user/elfload.c
24
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, const ImageSource *src,
25
26
if (align_size != reserve_size) {
27
abi_ulong align_addr = ROUND_UP(load_addr, align);
28
- abi_ulong align_end = align_addr + reserve_size;
29
- abi_ulong load_end = load_addr + align_size;
30
+ abi_ulong align_end = TARGET_PAGE_ALIGN(align_addr + reserve_size);
31
+ abi_ulong load_end = TARGET_PAGE_ALIGN(load_addr + align_size);
32
33
if (align_addr != load_addr) {
34
target_munmap(load_addr, align_addr - load_addr);
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
From: Andreas Schwab <schwab@suse.de>
1
2
3
SA_RESTORER and the associated sa_restorer field of struct sigaction are
4
an obsolete feature, not expected to be used by future architectures.
5
They are also absent on RISC-V, LoongArch, Hexagon and OpenRISC, but
6
defined due to their use of generic/signal.h. This leads to corrupted
7
data and out-of-bounds accesses.
8
9
Move the definition of TARGET_SA_RESTORER out of generic/signal.h into the
10
target_signal.h files that need it. Note that m68k has the sa_restorer
11
field, but does not use it and does not define SA_RESTORER.
12
13
Reported-by: Thomas Weißschuh <thomas@t-8ch.de>
14
Signed-off-by: Andreas Schwab <schwab@suse.de>
15
Reviewed-by: Thomas Weißschuh <thomas@t-8ch.de>
16
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
17
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
18
Message-ID: <mvmed060xc9.fsf@suse.de>
19
---
20
linux-user/aarch64/target_signal.h | 2 ++
21
linux-user/arm/target_signal.h | 2 ++
22
linux-user/generic/signal.h | 1 -
23
linux-user/i386/target_signal.h | 2 ++
24
linux-user/m68k/target_signal.h | 1 +
25
linux-user/microblaze/target_signal.h | 2 ++
26
linux-user/ppc/target_signal.h | 2 ++
27
linux-user/s390x/target_signal.h | 2 ++
28
linux-user/sh4/target_signal.h | 2 ++
29
linux-user/x86_64/target_signal.h | 2 ++
30
linux-user/xtensa/target_signal.h | 2 ++
31
11 files changed, 19 insertions(+), 1 deletion(-)
32
33
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/linux-user/aarch64/target_signal.h
36
+++ b/linux-user/aarch64/target_signal.h
37
@@ -XXX,XX +XXX,XX @@
38
39
#include "../generic/signal.h"
40
41
+#define TARGET_SA_RESTORER 0x04000000
42
+
43
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
44
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
45
46
diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/linux-user/arm/target_signal.h
49
+++ b/linux-user/arm/target_signal.h
50
@@ -XXX,XX +XXX,XX @@
51
52
#include "../generic/signal.h"
53
54
+#define TARGET_SA_RESTORER 0x04000000
55
+
56
#define TARGET_ARCH_HAS_SETUP_FRAME
57
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
58
59
diff --git a/linux-user/generic/signal.h b/linux-user/generic/signal.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/linux-user/generic/signal.h
62
+++ b/linux-user/generic/signal.h
63
@@ -XXX,XX +XXX,XX @@
64
#define TARGET_SA_RESTART 0x10000000
65
#define TARGET_SA_NODEFER 0x40000000
66
#define TARGET_SA_RESETHAND 0x80000000
67
-#define TARGET_SA_RESTORER 0x04000000
68
69
#define TARGET_SIGHUP 1
70
#define TARGET_SIGINT 2
71
diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h
72
index XXXXXXX..XXXXXXX 100644
73
--- a/linux-user/i386/target_signal.h
74
+++ b/linux-user/i386/target_signal.h
75
@@ -XXX,XX +XXX,XX @@
76
77
#include "../generic/signal.h"
78
79
+#define TARGET_SA_RESTORER 0x04000000
80
+
81
#define TARGET_ARCH_HAS_SETUP_FRAME
82
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
83
84
diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/linux-user/m68k/target_signal.h
87
+++ b/linux-user/m68k/target_signal.h
88
@@ -XXX,XX +XXX,XX @@
89
90
#include "../generic/signal.h"
91
92
+#define TARGET_ARCH_HAS_SA_RESTORER 1
93
#define TARGET_ARCH_HAS_SETUP_FRAME
94
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
95
96
diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/linux-user/microblaze/target_signal.h
99
+++ b/linux-user/microblaze/target_signal.h
100
@@ -XXX,XX +XXX,XX @@
101
102
#include "../generic/signal.h"
103
104
+#define TARGET_SA_RESTORER 0x04000000
105
+
106
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
107
108
#endif /* MICROBLAZE_TARGET_SIGNAL_H */
109
diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h
110
index XXXXXXX..XXXXXXX 100644
111
--- a/linux-user/ppc/target_signal.h
112
+++ b/linux-user/ppc/target_signal.h
113
@@ -XXX,XX +XXX,XX @@
114
115
#include "../generic/signal.h"
116
117
+#define TARGET_SA_RESTORER 0x04000000
118
+
119
#if !defined(TARGET_PPC64)
120
#define TARGET_ARCH_HAS_SETUP_FRAME
121
#endif
122
diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/linux-user/s390x/target_signal.h
125
+++ b/linux-user/s390x/target_signal.h
126
@@ -XXX,XX +XXX,XX @@
127
128
#include "../generic/signal.h"
129
130
+#define TARGET_SA_RESTORER 0x04000000
131
+
132
#define TARGET_ARCH_HAS_SETUP_FRAME
133
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
134
135
diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/linux-user/sh4/target_signal.h
138
+++ b/linux-user/sh4/target_signal.h
139
@@ -XXX,XX +XXX,XX @@
140
141
#include "../generic/signal.h"
142
143
+#define TARGET_SA_RESTORER 0x04000000
144
+
145
#define TARGET_ARCH_HAS_SETUP_FRAME
146
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
147
148
diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h
149
index XXXXXXX..XXXXXXX 100644
150
--- a/linux-user/x86_64/target_signal.h
151
+++ b/linux-user/x86_64/target_signal.h
152
@@ -XXX,XX +XXX,XX @@
153
154
#include "../generic/signal.h"
155
156
+#define TARGET_SA_RESTORER 0x04000000
157
+
158
/* For x86_64, use of SA_RESTORER is mandatory. */
159
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0
160
161
diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h
162
index XXXXXXX..XXXXXXX 100644
163
--- a/linux-user/xtensa/target_signal.h
164
+++ b/linux-user/xtensa/target_signal.h
165
@@ -XXX,XX +XXX,XX @@
166
167
#include "../generic/signal.h"
168
169
+#define TARGET_SA_RESTORER 0x04000000
170
+
171
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
172
173
#endif
174
--
175
2.43.0
176
177
diff view generated by jsdifflib
New patch
1
From: Mikael Szreder <git@miszr.win>
1
2
3
A bug was introduced in commit 0bba7572d40d which causes the fdtox
4
and fqtox instructions to incorrectly select the destination registers.
5
More information and a test program can be found in issue #2802.
6
7
Cc: qemu-stable@nongnu.org
8
Fixes: 0bba7572d40d ("target/sparc: Perform DFPREG/QFPREG in decodetree")
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2802
10
Signed-off-by: Mikael Szreder <git@miszr.win>
11
Acked-by: Artyom Tarasenko <atar4qemu@gmail.com>
12
[rth: Squash patches together, since the second fixes a typo in the first.]
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-ID: <20250205090333.19626-3-git@miszr.win>
15
---
16
target/sparc/insns.decode | 12 ++++++------
17
1 file changed, 6 insertions(+), 6 deletions(-)
18
19
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/sparc/insns.decode
22
+++ b/target/sparc/insns.decode
23
@@ -XXX,XX +XXX,XX @@ FdMULq 10 ..... 110100 ..... 0 0110 1110 ..... @q_d_d
24
FNHADDs 10 ..... 110100 ..... 0 0111 0001 ..... @r_r_r
25
FNHADDd 10 ..... 110100 ..... 0 0111 0010 ..... @d_d_d
26
FNsMULd 10 ..... 110100 ..... 0 0111 1001 ..... @d_r_r
27
-FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @r_r2
28
-FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @r_d2
29
-FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @r_q2
30
-FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_r2
31
-FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_r2
32
-FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_r2
33
+FsTOx 10 ..... 110100 00000 0 1000 0001 ..... @d_r2
34
+FdTOx 10 ..... 110100 00000 0 1000 0010 ..... @d_d2
35
+FqTOx 10 ..... 110100 00000 0 1000 0011 ..... @d_q2
36
+FxTOs 10 ..... 110100 00000 0 1000 0100 ..... @r_d2
37
+FxTOd 10 ..... 110100 00000 0 1000 1000 ..... @d_d2
38
+FxTOq 10 ..... 110100 00000 0 1000 1100 ..... @q_d2
39
FiTOs 10 ..... 110100 00000 0 1100 0100 ..... @r_r2
40
FdTOs 10 ..... 110100 00000 0 1100 0110 ..... @r_d2
41
FqTOs 10 ..... 110100 00000 0 1100 0111 ..... @r_q2
42
--
43
2.43.0
diff view generated by jsdifflib
New patch
1
From: Mikael Szreder <git@miszr.win>
1
2
3
The gdbstub implementation for the Sparc architecture would
4
incorrectly calculate the the floating point register offset.
5
This resulted in, for example, registers f32 and f34 to point to
6
the same value.
7
8
The issue was caused by the confusion between even register numbers
9
and even register indexes. For example, the register index of f32 is 64
10
and f34 is 65.
11
12
Cc: qemu-stable@nongnu.org
13
Fixes: 30038fd81808 ("target-sparc: Change fpr representation to doubles.")
14
Signed-off-by: Mikael Szreder <git@miszr.win>
15
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
Message-ID: <20250214070343.11501-1-git@miszr.win>
18
---
19
target/sparc/gdbstub.c | 18 ++++++++++++++----
20
1 file changed, 14 insertions(+), 4 deletions(-)
21
22
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/sparc/gdbstub.c
25
+++ b/target/sparc/gdbstub.c
26
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
27
}
28
}
29
if (n < 80) {
30
- /* f32-f62 (double width, even numbers only) */
31
- return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
32
+ /* f32-f62 (16 double width registers, even register numbers only)
33
+ * n == 64: f32 : env->fpr[16]
34
+ * n == 65: f34 : env->fpr[17]
35
+ * etc...
36
+ * n == 79: f62 : env->fpr[31]
37
+ */
38
+ return gdb_get_reg64(mem_buf, env->fpr[(n - 64) + 16].ll);
39
}
40
switch (n) {
41
case 80:
42
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
43
}
44
return 4;
45
} else if (n < 80) {
46
- /* f32-f62 (double width, even numbers only) */
47
- env->fpr[(n - 32) / 2].ll = tmp;
48
+ /* f32-f62 (16 double width registers, even register numbers only)
49
+ * n == 64: f32 : env->fpr[16]
50
+ * n == 65: f34 : env->fpr[17]
51
+ * etc...
52
+ * n == 79: f62 : env->fpr[31]
53
+ */
54
+ env->fpr[(n - 64) + 16].ll = tmp;
55
} else {
56
switch (n) {
57
case 80:
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
From: Artyom Tarasenko <atar4qemu@gmail.com>
1
2
3
Fake access to
4
PCR Performance Control Register
5
and
6
PIC Performance Instrumentation Counter.
7
8
Ignore writes in privileged mode, and return 0 on reads.
9
10
This allows booting Tribblix, MilaX and v9os under Niagara target.
11
12
Signed-off-by: Artyom Tarasenko <atar4qemu@gmail.com>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20250209211248.50383-1-atar4qemu@gmail.com>
16
---
17
target/sparc/translate.c | 19 +++++++++++++++++++
18
target/sparc/insns.decode | 7 ++++++-
19
2 files changed, 25 insertions(+), 1 deletion(-)
20
21
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/sparc/translate.c
24
+++ b/target/sparc/translate.c
25
@@ -XXX,XX +XXX,XX @@ static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
26
27
TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
28
29
+static TCGv do_rdpic(DisasContext *dc, TCGv dst)
30
+{
31
+ return tcg_constant_tl(0);
32
+}
33
+
34
+TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
35
+
36
+
37
static TCGv do_rdccr(DisasContext *dc, TCGv dst)
38
{
39
gen_helper_rdccr(dst, tcg_env);
40
@@ -XXX,XX +XXX,XX @@ static void do_wrfprs(DisasContext *dc, TCGv src)
41
42
TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
43
44
+static bool do_priv_nop(DisasContext *dc, bool priv)
45
+{
46
+ if (!priv) {
47
+ return raise_priv(dc);
48
+ }
49
+ return advance_pc(dc);
50
+}
51
+
52
+TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
53
+TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
54
+
55
static void do_wrgsr(DisasContext *dc, TCGv src)
56
{
57
gen_trap_ifnofpu(dc);
58
diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/sparc/insns.decode
61
+++ b/target/sparc/insns.decode
62
@@ -XXX,XX +XXX,XX @@ CALL 01 i:s30
63
RDTICK 10 rd:5 101000 00100 0 0000000000000
64
RDPC 10 rd:5 101000 00101 0 0000000000000
65
RDFPRS 10 rd:5 101000 00110 0 0000000000000
66
- RDASR17 10 rd:5 101000 10001 0 0000000000000
67
+ {
68
+ RDASR17 10 rd:5 101000 10001 0 0000000000000
69
+ RDPIC 10 rd:5 101000 10001 0 0000000000000
70
+ }
71
RDGSR 10 rd:5 101000 10011 0 0000000000000
72
RDSOFTINT 10 rd:5 101000 10110 0 0000000000000
73
RDTICK_CMPR 10 rd:5 101000 10111 0 0000000000000
74
@@ -XXX,XX +XXX,XX @@ CALL 01 i:s30
75
WRCCR 10 00010 110000 ..... . ............. @n_r_ri
76
WRASI 10 00011 110000 ..... . ............. @n_r_ri
77
WRFPRS 10 00110 110000 ..... . ............. @n_r_ri
78
+ WRPCR 10 10000 110000 01000 0 0000000000000
79
+ WRPIC 10 10001 110000 01000 0 0000000000000
80
{
81
WRGSR 10 10011 110000 ..... . ............. @n_r_ri
82
WRPOWERDOWN 10 10011 110000 ..... . ............. @n_r_ri
83
--
84
2.43.0
diff view generated by jsdifflib
New patch
1
Eliminate code repetition by using the appropriate helpers.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/i386/tcg-target.c.inc | 65 +++++----------------------------------
7
1 file changed, 8 insertions(+), 57 deletions(-)
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/i386/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
14
tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
15
label_this, small);
16
break;
17
+
18
case TCG_COND_NE:
19
case TCG_COND_TSTNE:
20
tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2],
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
22
tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
23
label_this, small);
24
break;
25
- case TCG_COND_LT:
26
- tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
27
- label_this, small);
28
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
29
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
30
- label_this, small);
31
- break;
32
- case TCG_COND_LE:
33
- tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
34
- label_this, small);
35
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
36
- tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
37
- label_this, small);
38
- break;
39
- case TCG_COND_GT:
40
- tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
41
- label_this, small);
42
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
43
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
44
- label_this, small);
45
- break;
46
- case TCG_COND_GE:
47
- tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
48
- label_this, small);
49
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
50
- tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
51
- label_this, small);
52
- break;
53
- case TCG_COND_LTU:
54
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
55
- label_this, small);
56
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
57
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
58
- label_this, small);
59
- break;
60
- case TCG_COND_LEU:
61
- tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
62
- label_this, small);
63
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
64
- tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
65
- label_this, small);
66
- break;
67
- case TCG_COND_GTU:
68
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
69
- label_this, small);
70
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
71
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
72
- label_this, small);
73
- break;
74
- case TCG_COND_GEU:
75
- tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
76
- label_this, small);
77
- tcg_out_jxx(s, JCC_JNE, label_next, 1);
78
- tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
79
- label_this, small);
80
- break;
81
+
82
default:
83
- g_assert_not_reached();
84
+ tcg_out_brcond(s, 0, tcg_high_cond(cond), args[1],
85
+ args[3], const_args[3], label_this, small);
86
+ tcg_out_jxx(s, JCC_JNE, label_next, 1);
87
+ tcg_out_brcond(s, 0, tcg_unsigned_cond(cond), args[0],
88
+ args[2], const_args[2], label_this, small);
89
+ break;
90
}
91
tcg_out_label(s, label_next);
92
}
93
--
94
2.43.0
95
96
diff view generated by jsdifflib
New patch
1
These defines never should have been added as they were
2
never used. Only 32-bit hosts may have these opcodes and
3
they have them unconditionally.
1
4
5
Fixes: 6cb14e4de29 ("tcg/loongarch64: Add the tcg-target.h file")
6
Fixes: fb1f70f3685 ("tcg/riscv: Add the tcg-target.h file")
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/loongarch64/tcg-target-has.h | 2 --
12
tcg/riscv/tcg-target-has.h | 2 --
13
2 files changed, 4 deletions(-)
14
15
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/loongarch64/tcg-target-has.h
18
+++ b/tcg/loongarch64/tcg-target-has.h
19
@@ -XXX,XX +XXX,XX @@
20
#define TCG_TARGET_HAS_clz_i32 1
21
#define TCG_TARGET_HAS_ctz_i32 1
22
#define TCG_TARGET_HAS_ctpop_i32 0
23
-#define TCG_TARGET_HAS_brcond2 0
24
-#define TCG_TARGET_HAS_setcond2 0
25
#define TCG_TARGET_HAS_qemu_st8_i32 0
26
27
/* 64-bit operations */
28
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/riscv/tcg-target-has.h
31
+++ b/tcg/riscv/tcg-target-has.h
32
@@ -XXX,XX +XXX,XX @@
33
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
34
#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
35
#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
36
-#define TCG_TARGET_HAS_brcond2 1
37
-#define TCG_TARGET_HAS_setcond2 1
38
#define TCG_TARGET_HAS_qemu_st8_i32 0
39
40
#define TCG_TARGET_HAS_negsetcond_i64 1
41
--
42
2.43.0
43
44
diff view generated by jsdifflib