1
The following changes since commit 6eeea6725a70e6fcb5abba0764496bdab07ddfb3:
1
The following changes since commit 91e92cad67caca3bc4b8e920ddb5c8ca64aac9e1:
2
2
3
Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2020-10-06' into staging (2020-10-06 21:13:34 +0100)
3
Merge remote-tracking branch 'remotes/cohuck-gitlab/tags/s390x-20210305' into staging (2021-03-05 19:04:47 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20201008
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210306
8
8
9
for you to fetch changes up to 62475e9d007d83db4d0a6ccebcda8914f392e9c9:
9
for you to fetch changes up to 6cc9d67c6f682cf04eea2d6e64a252b63a7eccdf:
10
10
11
accel/tcg: Fix computing of is_write for MIPS (2020-10-08 05:57:32 -0500)
11
accel/tcg: Precompute curr_cflags into cpu->tcg_cflags (2021-03-06 11:53:57 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Extend maximum gvec vector size
14
TCI build fix and cleanup
15
Fix i386 avx2 dupi
15
Streamline tb_lookup
16
Fix mips host user-only write detection
16
Fixes for tcg/aarch64
17
Misc cleanups.
18
17
19
----------------------------------------------------------------
18
----------------------------------------------------------------
20
Kele Huang (1):
19
Alex Bennée (4):
21
accel/tcg: Fix computing of is_write for MIPS
20
accel/tcg: rename tb_lookup__cpu_state and hoist state extraction
21
accel/tcg: move CF_CLUSTER calculation to curr_cflags
22
accel/tcg: drop the use of CF_HASH_MASK and rename params
23
include/exec: lightly re-arrange TranslationBlock
22
24
23
Richard Henderson (10):
25
Richard Henderson (23):
24
tcg: Adjust simd_desc size encoding
26
tcg/aarch64: Fix constant subtraction in tcg_out_addsub2
25
tcg: Drop union from TCGArgConstraint
27
tcg/aarch64: Fix I3617_CMLE0
26
tcg: Move sorted_args into TCGArgConstraint.sort_index
28
tcg/aarch64: Fix generation of "scalar" vector operations
27
tcg: Remove TCG_CT_REG
29
tcg/tci: Use exec/cpu_ldst.h interfaces
28
tcg: Move some TCG_CT_* bits to TCGArgConstraint bitfields
30
tcg: Split out tcg_raise_tb_overflow
29
tcg: Remove TCGOpDef.used
31
tcg: Manage splitwx in tc_ptr_to_region_tree by hand
30
tcg/i386: Fix dupi for avx2 32-bit hosts
32
tcg/tci: Merge identical cases in generation (arithmetic opcodes)
31
tcg: Fix generation of dupi_vec for 32-bit host
33
tcg/tci: Merge identical cases in generation (exchange opcodes)
32
tcg/optimize: Fold dup2_vec
34
tcg/tci: Merge identical cases in generation (deposit opcode)
33
tcg: Remove TCG_TARGET_HAS_cmp_vec
35
tcg/tci: Merge identical cases in generation (conditional opcodes)
36
tcg/tci: Merge identical cases in generation (load/store opcodes)
37
tcg/tci: Remove tci_read_r8
38
tcg/tci: Remove tci_read_r8s
39
tcg/tci: Remove tci_read_r16
40
tcg/tci: Remove tci_read_r16s
41
tcg/tci: Remove tci_read_r32
42
tcg/tci: Remove tci_read_r32s
43
tcg/tci: Reduce use of tci_read_r64
44
tcg/tci: Merge basic arithmetic operations
45
tcg/tci: Merge extension operations
46
tcg/tci: Merge bswap operations
47
tcg/tci: Merge mov, not and neg operations
48
accel/tcg: Precompute curr_cflags into cpu->tcg_cflags
34
49
35
include/tcg/tcg-gvec-desc.h | 38 ++++++++++++------
50
accel/tcg/tcg-accel-ops.h | 1 +
36
include/tcg/tcg.h | 22 ++++------
51
include/exec/exec-all.h | 22 +-
37
tcg/aarch64/tcg-target.h | 1 -
52
include/exec/tb-lookup.h | 26 +-
38
tcg/i386/tcg-target.h | 1 -
53
include/hw/core/cpu.h | 2 +
39
tcg/ppc/tcg-target.h | 1 -
54
accel/tcg/cpu-exec.c | 34 +--
40
accel/tcg/user-exec.c | 43 ++++++++++++++++++--
55
accel/tcg/tcg-accel-ops-mttcg.c | 3 +-
41
tcg/optimize.c | 15 +++++++
56
accel/tcg/tcg-accel-ops-rr.c | 2 +-
42
tcg/tcg-op-gvec.c | 35 ++++++++++++----
57
accel/tcg/tcg-accel-ops.c | 8 +
43
tcg/tcg-op-vec.c | 12 ++++--
58
accel/tcg/tcg-runtime.c | 6 +-
44
tcg/tcg.c | 96 +++++++++++++++++++-------------------------
59
accel/tcg/translate-all.c | 18 +-
45
tcg/aarch64/tcg-target.c.inc | 17 ++++----
60
linux-user/main.c | 1 +
46
tcg/arm/tcg-target.c.inc | 29 ++++++-------
61
linux-user/sh4/signal.c | 8 +-
47
tcg/i386/tcg-target.c.inc | 39 +++++++-----------
62
linux-user/syscall.c | 18 +-
48
tcg/mips/tcg-target.c.inc | 21 +++++-----
63
softmmu/physmem.c | 2 +-
49
tcg/ppc/tcg-target.c.inc | 29 ++++++-------
64
tcg/tcg.c | 29 ++-
50
tcg/riscv/tcg-target.c.inc | 16 ++++----
65
tcg/tci.c | 526 ++++++++++++----------------------------
51
tcg/s390/tcg-target.c.inc | 22 +++++-----
66
tcg/aarch64/tcg-target.c.inc | 229 ++++++++++++++---
52
tcg/sparc/tcg-target.c.inc | 21 ++++------
67
tcg/tci/tcg-target.c.inc | 204 ++++++----------
53
tcg/tci/tcg-target.c.inc | 3 +-
68
18 files changed, 529 insertions(+), 610 deletions(-)
54
19 files changed, 244 insertions(+), 217 deletions(-)
55
69
diff view generated by jsdifflib
1
This wasn't actually used for anything, really. All variable
1
An hppa guest executing
2
operands must accept registers, and which are indicated by the
2
3
set in TCGArgConstraint.regs.
3
0x000000000000e05c: ldil L%10000,r4
4
0x000000000000e060: ldo 0(r4),r4
5
0x000000000000e064: sub r3,r4,sp
6
7
produces
8
9
---- 000000000000e064 000000000000e068
10
sub2_i32 tmp0,tmp4,r3,$0x1,$0x10000,$0x0
11
12
after folding and constant propagation. Then we hit
13
14
tcg-target.c.inc:640: tcg_out_insn_3401: Assertion `aimm <= 0xfff' failed.
15
16
because aimm is in fact -16, but unsigned.
17
18
The ((bl < 0) ^ sub) condition which negates bl is incorrect and will
19
always lead to this abort. If the constant is positive, sub will make
20
it negative; if the constant is negative, sub will keep it negative.
4
21
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
23
---
7
include/tcg/tcg.h | 1 -
24
tcg/aarch64/tcg-target.c.inc | 16 +++++++++-------
8
tcg/tcg.c | 15 ++++-----------
25
1 file changed, 9 insertions(+), 7 deletions(-)
9
tcg/aarch64/tcg-target.c.inc | 3 ---
10
tcg/arm/tcg-target.c.inc | 3 ---
11
tcg/i386/tcg-target.c.inc | 11 -----------
12
tcg/mips/tcg-target.c.inc | 3 ---
13
tcg/ppc/tcg-target.c.inc | 5 -----
14
tcg/riscv/tcg-target.c.inc | 2 --
15
tcg/s390/tcg-target.c.inc | 4 ----
16
tcg/sparc/tcg-target.c.inc | 5 -----
17
tcg/tci/tcg-target.c.inc | 1 -
18
11 files changed, 4 insertions(+), 49 deletions(-)
19
26
20
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/tcg/tcg.h
23
+++ b/include/tcg/tcg.h
24
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void);
25
#define TCG_CT_ALIAS 0x80
26
#define TCG_CT_IALIAS 0x40
27
#define TCG_CT_NEWREG 0x20 /* output requires a new register */
28
-#define TCG_CT_REG 0x01
29
#define TCG_CT_CONST 0x02 /* any constant of register size */
30
31
typedef struct TCGArgConstraint {
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg.c
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
37
/* we give more priority to constraints with less registers */
38
static int get_constraint_priority(const TCGOpDef *def, int k)
39
{
40
- const TCGArgConstraint *arg_ct;
41
+ const TCGArgConstraint *arg_ct = &def->args_ct[k];
42
+ int n;
43
44
- int i, n;
45
- arg_ct = &def->args_ct[k];
46
if (arg_ct->ct & TCG_CT_ALIAS) {
47
/* an alias is equivalent to a single register */
48
n = 1;
49
} else {
50
- if (!(arg_ct->ct & TCG_CT_REG))
51
- return 0;
52
- n = 0;
53
- for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
54
- if (tcg_regset_test_reg(arg_ct->regs, i))
55
- n++;
56
- }
57
+ n = ctpop64(arg_ct->regs);
58
}
59
return TCG_TARGET_NB_REGS - n + 1;
60
}
61
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
62
int oarg = *ct_str - '0';
63
tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
64
tcg_debug_assert(oarg < def->nb_oargs);
65
- tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
66
+ tcg_debug_assert(def->args_ct[oarg].regs != 0);
67
/* TCG_CT_ALIAS is for the output arguments.
68
The input is tagged with TCG_CT_IALIAS. */
69
def->args_ct[i] = def->args_ct[oarg];
70
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
27
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
71
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/aarch64/tcg-target.c.inc
29
--- a/tcg/aarch64/tcg-target.c.inc
73
+++ b/tcg/aarch64/tcg-target.c.inc
30
+++ b/tcg/aarch64/tcg-target.c.inc
74
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
32
}
33
}
34
35
-static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
36
- TCGReg rh, TCGReg al, TCGReg ah,
37
- tcg_target_long bl, tcg_target_long bh,
38
- bool const_bl, bool const_bh, bool sub)
39
+static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
40
+ TCGReg rh, TCGReg al, TCGReg ah,
41
+ tcg_target_long bl, tcg_target_long bh,
42
+ bool const_bl, bool const_bh, bool sub)
75
{
43
{
76
switch (*ct_str++) {
44
TCGReg orig_rl = rl;
77
case 'r': /* general registers */
45
AArch64Insn insn;
78
- ct->ct |= TCG_CT_REG;
46
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
79
ct->regs |= 0xffffffffu;
47
}
80
break;
48
81
case 'w': /* advsimd registers */
49
if (const_bl) {
82
- ct->ct |= TCG_CT_REG;
50
- insn = I3401_ADDSI;
83
ct->regs |= 0xffffffff00000000ull;
51
- if ((bl < 0) ^ sub) {
84
break;
52
- insn = I3401_SUBSI;
85
case 'l': /* qemu_ld / qemu_st address, data_reg */
53
+ if (bl < 0) {
86
- ct->ct |= TCG_CT_REG;
54
bl = -bl;
87
ct->regs = 0xffffffffu;
55
+ insn = sub ? I3401_ADDSI : I3401_SUBSI;
88
#ifdef CONFIG_SOFTMMU
56
+ } else {
89
/* x0 and x1 will be overwritten when reading the tlb entry,
57
+ insn = sub ? I3401_SUBSI : I3401_ADDSI;
90
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
58
}
91
index XXXXXXX..XXXXXXX 100644
59
+
92
--- a/tcg/arm/tcg-target.c.inc
60
if (unlikely(al == TCG_REG_XZR)) {
93
+++ b/tcg/arm/tcg-target.c.inc
61
/* ??? We want to allow al to be zero for the benefit of
94
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
62
negation via subtraction. However, that leaves open the
95
break;
96
97
case 'r':
98
- ct->ct |= TCG_CT_REG;
99
ct->regs = 0xffff;
100
break;
101
102
/* qemu_ld address */
103
case 'l':
104
- ct->ct |= TCG_CT_REG;
105
ct->regs = 0xffff;
106
#ifdef CONFIG_SOFTMMU
107
/* r0-r2,lr will be overwritten when reading the tlb entry,
108
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
109
110
/* qemu_st address & data */
111
case 's':
112
- ct->ct |= TCG_CT_REG;
113
ct->regs = 0xffff;
114
/* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
115
and r0-r1 doing the byte swapping, so don't use these. */
116
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
117
index XXXXXXX..XXXXXXX 100644
118
--- a/tcg/i386/tcg-target.c.inc
119
+++ b/tcg/i386/tcg-target.c.inc
120
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
121
{
122
switch(*ct_str++) {
123
case 'a':
124
- ct->ct |= TCG_CT_REG;
125
tcg_regset_set_reg(ct->regs, TCG_REG_EAX);
126
break;
127
case 'b':
128
- ct->ct |= TCG_CT_REG;
129
tcg_regset_set_reg(ct->regs, TCG_REG_EBX);
130
break;
131
case 'c':
132
- ct->ct |= TCG_CT_REG;
133
tcg_regset_set_reg(ct->regs, TCG_REG_ECX);
134
break;
135
case 'd':
136
- ct->ct |= TCG_CT_REG;
137
tcg_regset_set_reg(ct->regs, TCG_REG_EDX);
138
break;
139
case 'S':
140
- ct->ct |= TCG_CT_REG;
141
tcg_regset_set_reg(ct->regs, TCG_REG_ESI);
142
break;
143
case 'D':
144
- ct->ct |= TCG_CT_REG;
145
tcg_regset_set_reg(ct->regs, TCG_REG_EDI);
146
break;
147
case 'q':
148
/* A register that can be used as a byte operand. */
149
- ct->ct |= TCG_CT_REG;
150
ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
151
break;
152
case 'Q':
153
/* A register with an addressable second byte (e.g. %ah). */
154
- ct->ct |= TCG_CT_REG;
155
ct->regs = 0xf;
156
break;
157
case 'r':
158
/* A general register. */
159
- ct->ct |= TCG_CT_REG;
160
ct->regs |= ALL_GENERAL_REGS;
161
break;
162
case 'W':
163
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
164
break;
165
case 'x':
166
/* A vector register. */
167
- ct->ct |= TCG_CT_REG;
168
ct->regs |= ALL_VECTOR_REGS;
169
break;
170
171
/* qemu_ld/st address constraint */
172
case 'L':
173
- ct->ct |= TCG_CT_REG;
174
ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
175
tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
176
tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
177
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
178
index XXXXXXX..XXXXXXX 100644
179
--- a/tcg/mips/tcg-target.c.inc
180
+++ b/tcg/mips/tcg-target.c.inc
181
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
182
{
183
switch(*ct_str++) {
184
case 'r':
185
- ct->ct |= TCG_CT_REG;
186
ct->regs = 0xffffffff;
187
break;
188
case 'L': /* qemu_ld input arg constraint */
189
- ct->ct |= TCG_CT_REG;
190
ct->regs = 0xffffffff;
191
tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
192
#if defined(CONFIG_SOFTMMU)
193
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
194
#endif
195
break;
196
case 'S': /* qemu_st constraint */
197
- ct->ct |= TCG_CT_REG;
198
ct->regs = 0xffffffff;
199
tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
200
#if defined(CONFIG_SOFTMMU)
201
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/ppc/tcg-target.c.inc
204
+++ b/tcg/ppc/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
206
{
207
switch (*ct_str++) {
208
case 'A': case 'B': case 'C': case 'D':
209
- ct->ct |= TCG_CT_REG;
210
tcg_regset_set_reg(ct->regs, 3 + ct_str[0] - 'A');
211
break;
212
case 'r':
213
- ct->ct |= TCG_CT_REG;
214
ct->regs = 0xffffffff;
215
break;
216
case 'v':
217
- ct->ct |= TCG_CT_REG;
218
ct->regs = 0xffffffff00000000ull;
219
break;
220
case 'L': /* qemu_ld constraint */
221
- ct->ct |= TCG_CT_REG;
222
ct->regs = 0xffffffff;
223
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
224
#ifdef CONFIG_SOFTMMU
225
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
226
#endif
227
break;
228
case 'S': /* qemu_st constraint */
229
- ct->ct |= TCG_CT_REG;
230
ct->regs = 0xffffffff;
231
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
232
#ifdef CONFIG_SOFTMMU
233
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/riscv/tcg-target.c.inc
236
+++ b/tcg/riscv/tcg-target.c.inc
237
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
238
{
239
switch (*ct_str++) {
240
case 'r':
241
- ct->ct |= TCG_CT_REG;
242
ct->regs = 0xffffffff;
243
break;
244
case 'L':
245
/* qemu_ld/qemu_st constraint */
246
- ct->ct |= TCG_CT_REG;
247
ct->regs = 0xffffffff;
248
/* qemu_ld/qemu_st uses TCG_REG_TMP0 */
249
#if defined(CONFIG_SOFTMMU)
250
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
251
index XXXXXXX..XXXXXXX 100644
252
--- a/tcg/s390/tcg-target.c.inc
253
+++ b/tcg/s390/tcg-target.c.inc
254
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
255
{
256
switch (*ct_str++) {
257
case 'r': /* all registers */
258
- ct->ct |= TCG_CT_REG;
259
ct->regs = 0xffff;
260
break;
261
case 'L': /* qemu_ld/st constraint */
262
- ct->ct |= TCG_CT_REG;
263
ct->regs = 0xffff;
264
tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
265
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
266
tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
267
break;
268
case 'a': /* force R2 for division */
269
- ct->ct |= TCG_CT_REG;
270
ct->regs = 0;
271
tcg_regset_set_reg(ct->regs, TCG_REG_R2);
272
break;
273
case 'b': /* force R3 for division */
274
- ct->ct |= TCG_CT_REG;
275
ct->regs = 0;
276
tcg_regset_set_reg(ct->regs, TCG_REG_R3);
277
break;
278
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
279
index XXXXXXX..XXXXXXX 100644
280
--- a/tcg/sparc/tcg-target.c.inc
281
+++ b/tcg/sparc/tcg-target.c.inc
282
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
283
{
284
switch (*ct_str++) {
285
case 'r':
286
- ct->ct |= TCG_CT_REG;
287
ct->regs = 0xffffffff;
288
break;
289
case 'R':
290
- ct->ct |= TCG_CT_REG;
291
ct->regs = ALL_64;
292
break;
293
case 'A': /* qemu_ld/st address constraint */
294
- ct->ct |= TCG_CT_REG;
295
ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
296
reserve_helpers:
297
tcg_regset_reset_reg(ct->regs, TCG_REG_O0);
298
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
299
tcg_regset_reset_reg(ct->regs, TCG_REG_O2);
300
break;
301
case 's': /* qemu_st data 32-bit constraint */
302
- ct->ct |= TCG_CT_REG;
303
ct->regs = 0xffffffff;
304
goto reserve_helpers;
305
case 'S': /* qemu_st data 64-bit constraint */
306
- ct->ct |= TCG_CT_REG;
307
ct->regs = ALL_64;
308
goto reserve_helpers;
309
case 'I':
310
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tcg/tci/tcg-target.c.inc
313
+++ b/tcg/tci/tcg-target.c.inc
314
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
315
case 'r':
316
case 'L': /* qemu_ld constraint */
317
case 'S': /* qemu_st constraint */
318
- ct->ct |= TCG_CT_REG;
319
ct->regs = BIT(TCG_TARGET_NB_REGS) - 1;
320
break;
321
default:
322
--
63
--
323
2.25.1
64
2.25.1
324
65
325
66
diff view generated by jsdifflib
1
The previous change wrongly stated that 32-bit avx2 should have
1
Fix a typo in the encodeing of the cmle (zero) instruction.
2
used VPBROADCASTW. But that's a 16-bit broadcast and we want a
3
32-bit broadcast.
4
2
5
Fixes: 7b60ef3264e
3
Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations")
6
Cc: qemu-stable@nongnu.org
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/i386/tcg-target.c.inc | 2 +-
6
tcg/aarch64/tcg-target.c.inc | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
7
1 file changed, 1 insertion(+), 1 deletion(-)
11
8
12
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target.c.inc
11
--- a/tcg/aarch64/tcg-target.c.inc
15
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/aarch64/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
13
@@ -XXX,XX +XXX,XX @@ typedef enum {
17
new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
14
I3617_CMEQ0 = 0x0e209800,
18
} else {
15
I3617_CMLT0 = 0x0e20a800,
19
if (have_avx2) {
16
I3617_CMGE0 = 0x2e208800,
20
- tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTW + vex_l, ret);
17
- I3617_CMLE0 = 0x2e20a800,
21
+ tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
18
+ I3617_CMLE0 = 0x2e209800,
22
} else {
19
I3617_NOT = 0x2e205800,
23
tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
20
I3617_ABS = 0x0e20b800,
24
}
21
I3617_NEG = 0x2e20b800,
25
--
22
--
26
2.25.1
23
2.25.1
27
24
28
25
diff view generated by jsdifflib
New patch
1
For some vector operations, "1D" is not a valid type, and there
2
are separate instructions for the 64-bit scalar operation.
1
3
4
Tested-by: Stefan Weil <sw@weilnetz.de>
5
Buglink: https://bugs.launchpad.net/qemu/+bug/1916112
6
Fixes: 14e4c1e2355 ("tcg/aarch64: Add vector operations")
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/aarch64/tcg-target.c.inc | 211 ++++++++++++++++++++++++++++++-----
10
1 file changed, 181 insertions(+), 30 deletions(-)
11
12
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/aarch64/tcg-target.c.inc
15
+++ b/tcg/aarch64/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ typedef enum {
17
I3606_BIC = 0x2f001400,
18
I3606_ORR = 0x0f001400,
19
20
+ /* AdvSIMD scalar shift by immediate */
21
+ I3609_SSHR = 0x5f000400,
22
+ I3609_SSRA = 0x5f001400,
23
+ I3609_SHL = 0x5f005400,
24
+ I3609_USHR = 0x7f000400,
25
+ I3609_USRA = 0x7f001400,
26
+ I3609_SLI = 0x7f005400,
27
+
28
+ /* AdvSIMD scalar three same */
29
+ I3611_SQADD = 0x5e200c00,
30
+ I3611_SQSUB = 0x5e202c00,
31
+ I3611_CMGT = 0x5e203400,
32
+ I3611_CMGE = 0x5e203c00,
33
+ I3611_SSHL = 0x5e204400,
34
+ I3611_ADD = 0x5e208400,
35
+ I3611_CMTST = 0x5e208c00,
36
+ I3611_UQADD = 0x7e200c00,
37
+ I3611_UQSUB = 0x7e202c00,
38
+ I3611_CMHI = 0x7e203400,
39
+ I3611_CMHS = 0x7e203c00,
40
+ I3611_USHL = 0x7e204400,
41
+ I3611_SUB = 0x7e208400,
42
+ I3611_CMEQ = 0x7e208c00,
43
+
44
+ /* AdvSIMD scalar two-reg misc */
45
+ I3612_CMGT0 = 0x5e208800,
46
+ I3612_CMEQ0 = 0x5e209800,
47
+ I3612_CMLT0 = 0x5e20a800,
48
+ I3612_ABS = 0x5e20b800,
49
+ I3612_CMGE0 = 0x7e208800,
50
+ I3612_CMLE0 = 0x7e209800,
51
+ I3612_NEG = 0x7e20b800,
52
+
53
/* AdvSIMD shift by immediate */
54
I3614_SSHR = 0x0f000400,
55
I3614_SSRA = 0x0f001400,
56
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
57
| (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
58
}
59
60
+static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
61
+ TCGReg rd, TCGReg rn, unsigned immhb)
62
+{
63
+ tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
64
+}
65
+
66
+static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
67
+ unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
68
+{
69
+ tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
70
+ | (rn & 0x1f) << 5 | (rd & 0x1f));
71
+}
72
+
73
+static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
74
+ unsigned size, TCGReg rd, TCGReg rn)
75
+{
76
+ tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
77
+}
78
+
79
static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
80
TCGReg rd, TCGReg rn, unsigned immhb)
81
{
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
83
unsigned vecl, unsigned vece,
84
const TCGArg *args, const int *const_args)
85
{
86
- static const AArch64Insn cmp_insn[16] = {
87
+ static const AArch64Insn cmp_vec_insn[16] = {
88
[TCG_COND_EQ] = I3616_CMEQ,
89
[TCG_COND_GT] = I3616_CMGT,
90
[TCG_COND_GE] = I3616_CMGE,
91
[TCG_COND_GTU] = I3616_CMHI,
92
[TCG_COND_GEU] = I3616_CMHS,
93
};
94
- static const AArch64Insn cmp0_insn[16] = {
95
+ static const AArch64Insn cmp_scalar_insn[16] = {
96
+ [TCG_COND_EQ] = I3611_CMEQ,
97
+ [TCG_COND_GT] = I3611_CMGT,
98
+ [TCG_COND_GE] = I3611_CMGE,
99
+ [TCG_COND_GTU] = I3611_CMHI,
100
+ [TCG_COND_GEU] = I3611_CMHS,
101
+ };
102
+ static const AArch64Insn cmp0_vec_insn[16] = {
103
[TCG_COND_EQ] = I3617_CMEQ0,
104
[TCG_COND_GT] = I3617_CMGT0,
105
[TCG_COND_GE] = I3617_CMGE0,
106
[TCG_COND_LT] = I3617_CMLT0,
107
[TCG_COND_LE] = I3617_CMLE0,
108
};
109
+ static const AArch64Insn cmp0_scalar_insn[16] = {
110
+ [TCG_COND_EQ] = I3612_CMEQ0,
111
+ [TCG_COND_GT] = I3612_CMGT0,
112
+ [TCG_COND_GE] = I3612_CMGE0,
113
+ [TCG_COND_LT] = I3612_CMLT0,
114
+ [TCG_COND_LE] = I3612_CMLE0,
115
+ };
116
117
TCGType type = vecl + TCG_TYPE_V64;
118
unsigned is_q = vecl;
119
+ bool is_scalar = !is_q && vece == MO_64;
120
TCGArg a0, a1, a2, a3;
121
int cmode, imm8;
122
123
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
124
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
125
break;
126
case INDEX_op_add_vec:
127
- tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
128
+ if (is_scalar) {
129
+ tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
130
+ } else {
131
+ tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
132
+ }
133
break;
134
case INDEX_op_sub_vec:
135
- tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
136
+ if (is_scalar) {
137
+ tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
138
+ } else {
139
+ tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
140
+ }
141
break;
142
case INDEX_op_mul_vec:
143
tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
144
break;
145
case INDEX_op_neg_vec:
146
- tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
147
+ if (is_scalar) {
148
+ tcg_out_insn(s, 3612, NEG, vece, a0, a1);
149
+ } else {
150
+ tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
151
+ }
152
break;
153
case INDEX_op_abs_vec:
154
- tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
155
+ if (is_scalar) {
156
+ tcg_out_insn(s, 3612, ABS, vece, a0, a1);
157
+ } else {
158
+ tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
159
+ }
160
break;
161
case INDEX_op_and_vec:
162
if (const_args[2]) {
163
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
164
tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
165
break;
166
case INDEX_op_ssadd_vec:
167
- tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
168
+ if (is_scalar) {
169
+ tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
170
+ } else {
171
+ tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
172
+ }
173
break;
174
case INDEX_op_sssub_vec:
175
- tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
176
+ if (is_scalar) {
177
+ tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
178
+ } else {
179
+ tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
180
+ }
181
break;
182
case INDEX_op_usadd_vec:
183
- tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
184
+ if (is_scalar) {
185
+ tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
186
+ } else {
187
+ tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
188
+ }
189
break;
190
case INDEX_op_ussub_vec:
191
- tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
192
+ if (is_scalar) {
193
+ tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
194
+ } else {
195
+ tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
196
+ }
197
break;
198
case INDEX_op_smax_vec:
199
tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
200
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
201
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
202
break;
203
case INDEX_op_shli_vec:
204
- tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
205
+ if (is_scalar) {
206
+ tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
207
+ } else {
208
+ tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
209
+ }
210
break;
211
case INDEX_op_shri_vec:
212
- tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
213
+ if (is_scalar) {
214
+ tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
215
+ } else {
216
+ tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
217
+ }
218
break;
219
case INDEX_op_sari_vec:
220
- tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
221
+ if (is_scalar) {
222
+ tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
223
+ } else {
224
+ tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
225
+ }
226
break;
227
case INDEX_op_aa64_sli_vec:
228
- tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
229
+ if (is_scalar) {
230
+ tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
231
+ } else {
232
+ tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
233
+ }
234
break;
235
case INDEX_op_shlv_vec:
236
- tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
237
+ if (is_scalar) {
238
+ tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
239
+ } else {
240
+ tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
241
+ }
242
break;
243
case INDEX_op_aa64_sshl_vec:
244
- tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
245
+ if (is_scalar) {
246
+ tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
247
+ } else {
248
+ tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
249
+ }
250
break;
251
case INDEX_op_cmp_vec:
252
{
253
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
254
255
if (cond == TCG_COND_NE) {
256
if (const_args[2]) {
257
- tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
258
+ if (is_scalar) {
259
+ tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
260
+ } else {
261
+ tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
262
+ }
263
} else {
264
- tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
265
+ if (is_scalar) {
266
+ tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
267
+ } else {
268
+ tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
269
+ }
270
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
271
}
272
} else {
273
if (const_args[2]) {
274
- insn = cmp0_insn[cond];
275
- if (insn) {
276
- tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
277
- break;
278
+ if (is_scalar) {
279
+ insn = cmp0_scalar_insn[cond];
280
+ if (insn) {
281
+ tcg_out_insn_3612(s, insn, vece, a0, a1);
282
+ break;
283
+ }
284
+ } else {
285
+ insn = cmp0_vec_insn[cond];
286
+ if (insn) {
287
+ tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
288
+ break;
289
+ }
290
}
291
tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
292
a2 = TCG_VEC_TMP;
293
}
294
- insn = cmp_insn[cond];
295
- if (insn == 0) {
296
- TCGArg t;
297
- t = a1, a1 = a2, a2 = t;
298
- cond = tcg_swap_cond(cond);
299
- insn = cmp_insn[cond];
300
- tcg_debug_assert(insn != 0);
301
+ if (is_scalar) {
302
+ insn = cmp_scalar_insn[cond];
303
+ if (insn == 0) {
304
+ TCGArg t;
305
+ t = a1, a1 = a2, a2 = t;
306
+ cond = tcg_swap_cond(cond);
307
+ insn = cmp_scalar_insn[cond];
308
+ tcg_debug_assert(insn != 0);
309
+ }
310
+ tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
311
+ } else {
312
+ insn = cmp_vec_insn[cond];
313
+ if (insn == 0) {
314
+ TCGArg t;
315
+ t = a1, a1 = a2, a2 = t;
316
+ cond = tcg_swap_cond(cond);
317
+ insn = cmp_vec_insn[cond];
318
+ tcg_debug_assert(insn != 0);
319
+ }
320
+ tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
321
}
322
- tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
323
}
324
}
325
break;
326
--
327
2.25.1
328
329
diff view generated by jsdifflib
New patch
1
Use the provided cpu_ldst.h interfaces. This fixes the build vs
2
the unconverted uses of g2h(), adds missed memory trace events,
3
and correctly recognizes when a SIGSEGV belongs to the guest via
4
set_helper_retaddr().
1
5
6
Fixes: 3e8f1628e864
7
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/tci.c | 73 +++++++++++++++++++++----------------------------------
11
1 file changed, 28 insertions(+), 45 deletions(-)
12
13
diff --git a/tcg/tci.c b/tcg/tci.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tci.c
16
+++ b/tcg/tci.c
17
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
18
return result;
19
}
20
21
-#ifdef CONFIG_SOFTMMU
22
-# define qemu_ld_ub \
23
- helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
24
-# define qemu_ld_leuw \
25
- helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
26
-# define qemu_ld_leul \
27
- helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
28
-# define qemu_ld_leq \
29
- helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
30
-# define qemu_ld_beuw \
31
- helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
32
-# define qemu_ld_beul \
33
- helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
34
-# define qemu_ld_beq \
35
- helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
36
-# define qemu_st_b(X) \
37
- helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
38
-# define qemu_st_lew(X) \
39
- helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
40
-# define qemu_st_lel(X) \
41
- helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
42
-# define qemu_st_leq(X) \
43
- helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
44
-# define qemu_st_bew(X) \
45
- helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
46
-# define qemu_st_bel(X) \
47
- helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
48
-# define qemu_st_beq(X) \
49
- helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
50
-#else
51
-# define qemu_ld_ub ldub_p(g2h(taddr))
52
-# define qemu_ld_leuw lduw_le_p(g2h(taddr))
53
-# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr))
54
-# define qemu_ld_leq ldq_le_p(g2h(taddr))
55
-# define qemu_ld_beuw lduw_be_p(g2h(taddr))
56
-# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr))
57
-# define qemu_ld_beq ldq_be_p(g2h(taddr))
58
-# define qemu_st_b(X) stb_p(g2h(taddr), X)
59
-# define qemu_st_lew(X) stw_le_p(g2h(taddr), X)
60
-# define qemu_st_lel(X) stl_le_p(g2h(taddr), X)
61
-# define qemu_st_leq(X) stq_le_p(g2h(taddr), X)
62
-# define qemu_st_bew(X) stw_be_p(g2h(taddr), X)
63
-# define qemu_st_bel(X) stl_be_p(g2h(taddr), X)
64
-# define qemu_st_beq(X) stq_be_p(g2h(taddr), X)
65
-#endif
66
+#define qemu_ld_ub \
67
+ cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
68
+#define qemu_ld_leuw \
69
+ cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
70
+#define qemu_ld_leul \
71
+ cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
72
+#define qemu_ld_leq \
73
+ cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
74
+#define qemu_ld_beuw \
75
+ cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
76
+#define qemu_ld_beul \
77
+ cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
78
+#define qemu_ld_beq \
79
+ cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
80
+#define qemu_st_b(X) \
81
+ cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
82
+#define qemu_st_lew(X) \
83
+ cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
84
+#define qemu_st_lel(X) \
85
+ cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
86
+#define qemu_st_leq(X) \
87
+ cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
88
+#define qemu_st_bew(X) \
89
+ cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
90
+#define qemu_st_bel(X) \
91
+ cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
92
+#define qemu_st_beq(X) \
93
+ cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
94
95
#if TCG_TARGET_REG_BITS == 64
96
# define CASE_32_64(x) \
97
--
98
2.25.1
99
100
diff view generated by jsdifflib
1
The union is unused; let "regs" appear in the main structure
1
Allow other places in tcg to restart with a smaller tb.
2
without the "u.regs" wrapping.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/tcg/tcg.h | 4 +---
6
tcg/tcg.c | 9 +++++++--
8
tcg/tcg.c | 22 +++++++++++-----------
7
1 file changed, 7 insertions(+), 2 deletions(-)
9
tcg/aarch64/tcg-target.c.inc | 14 +++++++-------
10
tcg/arm/tcg-target.c.inc | 26 +++++++++++++-------------
11
tcg/i386/tcg-target.c.inc | 26 +++++++++++++-------------
12
tcg/mips/tcg-target.c.inc | 18 +++++++++---------
13
tcg/ppc/tcg-target.c.inc | 24 ++++++++++++------------
14
tcg/riscv/tcg-target.c.inc | 14 +++++++-------
15
tcg/s390/tcg-target.c.inc | 18 +++++++++---------
16
tcg/sparc/tcg-target.c.inc | 16 ++++++++--------
17
tcg/tci/tcg-target.c.inc | 2 +-
18
11 files changed, 91 insertions(+), 93 deletions(-)
19
8
20
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/tcg/tcg.h
23
+++ b/include/tcg/tcg.h
24
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void);
25
typedef struct TCGArgConstraint {
26
uint16_t ct;
27
uint8_t alias_index;
28
- union {
29
- TCGRegSet regs;
30
- } u;
31
+ TCGRegSet regs;
32
} TCGArgConstraint;
33
34
#define TCG_MAX_OP_ARGS 16
35
diff --git a/tcg/tcg.c b/tcg/tcg.c
9
diff --git a/tcg/tcg.c b/tcg/tcg.c
36
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg.c
11
--- a/tcg/tcg.c
38
+++ b/tcg/tcg.c
12
+++ b/tcg/tcg.c
39
@@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k)
13
@@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which)
40
return 0;
14
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
41
n = 0;
15
}
42
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
16
43
- if (tcg_regset_test_reg(arg_ct->u.regs, i))
17
+/* Signal overflow, starting over with fewer guest insns. */
44
+ if (tcg_regset_test_reg(arg_ct->regs, i))
18
+static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s)
45
n++;
19
+{
46
}
20
+ siglongjmp(s->jmp_trans, -2);
21
+}
22
+
23
#define C_PFX1(P, A) P##A
24
#define C_PFX2(P, A, B) P##A##_##B
25
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
26
@@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_temp_alloc(TCGContext *s)
27
int n = s->nb_temps++;
28
29
if (n >= TCG_MAX_TEMPS) {
30
- /* Signal overflow, starting over with fewer guest insns. */
31
- siglongjmp(s->jmp_trans, -2);
32
+ tcg_raise_tb_overflow(s);
47
}
33
}
48
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
34
return memset(&s->temps[n], 0, sizeof(TCGTemp));
49
/* Incomplete TCGTargetOpDef entry. */
35
}
50
tcg_debug_assert(ct_str != NULL);
51
52
- def->args_ct[i].u.regs = 0;
53
+ def->args_ct[i].regs = 0;
54
def->args_ct[i].ct = 0;
55
while (*ct_str != '\0') {
56
switch(*ct_str) {
57
@@ -XXX,XX +XXX,XX @@ static void liveness_pass_1(TCGContext *s)
58
pset = la_temp_pref(ts);
59
set = *pset;
60
61
- set &= ct->u.regs;
62
+ set &= ct->regs;
63
if (ct->ct & TCG_CT_IALIAS) {
64
set &= op->output_pref[ct->alias_index];
65
}
66
/* If the combination is not possible, restart. */
67
if (set == 0) {
68
- set = ct->u.regs;
69
+ set = ct->regs;
70
}
71
*pset = set;
72
}
73
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
74
return;
75
}
76
77
- dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].u.regs;
78
- dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].u.regs;
79
+ dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
80
+ dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
81
82
/* Allocate the output register now. */
83
if (ots->val_type != TEMP_VAL_REG) {
84
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
85
}
86
}
87
88
- temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs);
89
+ temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs);
90
reg = ts->reg;
91
92
- if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
93
+ if (tcg_regset_test_reg(arg_ct->regs, reg)) {
94
/* nothing to do : the constraint is satisfied */
95
} else {
96
allocate_in_reg:
97
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
98
and move the temporary register into it */
99
temp_load(s, ts, tcg_target_available_regs[ts->type],
100
i_allocated_regs, 0);
101
- reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
102
+ reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs,
103
o_preferred_regs, ts->indirect_base);
104
if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
105
/*
106
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
107
&& !const_args[arg_ct->alias_index]) {
108
reg = new_args[arg_ct->alias_index];
109
} else if (arg_ct->ct & TCG_CT_NEWREG) {
110
- reg = tcg_reg_alloc(s, arg_ct->u.regs,
111
+ reg = tcg_reg_alloc(s, arg_ct->regs,
112
i_allocated_regs | o_allocated_regs,
113
op->output_pref[k], ts->indirect_base);
114
} else {
115
- reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
116
+ reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
117
op->output_pref[k], ts->indirect_base);
118
}
119
tcg_regset_set_reg(o_allocated_regs, reg);
120
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
121
index XXXXXXX..XXXXXXX 100644
122
--- a/tcg/aarch64/tcg-target.c.inc
123
+++ b/tcg/aarch64/tcg-target.c.inc
124
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
125
switch (*ct_str++) {
126
case 'r': /* general registers */
127
ct->ct |= TCG_CT_REG;
128
- ct->u.regs |= 0xffffffffu;
129
+ ct->regs |= 0xffffffffu;
130
break;
131
case 'w': /* advsimd registers */
132
ct->ct |= TCG_CT_REG;
133
- ct->u.regs |= 0xffffffff00000000ull;
134
+ ct->regs |= 0xffffffff00000000ull;
135
break;
136
case 'l': /* qemu_ld / qemu_st address, data_reg */
137
ct->ct |= TCG_CT_REG;
138
- ct->u.regs = 0xffffffffu;
139
+ ct->regs = 0xffffffffu;
140
#ifdef CONFIG_SOFTMMU
141
/* x0 and x1 will be overwritten when reading the tlb entry,
142
and x2, and x3 for helper args, better to avoid using them. */
143
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0);
144
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1);
145
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2);
146
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3);
147
+ tcg_regset_reset_reg(ct->regs, TCG_REG_X0);
148
+ tcg_regset_reset_reg(ct->regs, TCG_REG_X1);
149
+ tcg_regset_reset_reg(ct->regs, TCG_REG_X2);
150
+ tcg_regset_reset_reg(ct->regs, TCG_REG_X3);
151
#endif
152
break;
153
case 'A': /* Valid for arithmetic immediate (positive or negative). */
154
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/arm/tcg-target.c.inc
157
+++ b/tcg/arm/tcg-target.c.inc
158
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
159
160
case 'r':
161
ct->ct |= TCG_CT_REG;
162
- ct->u.regs = 0xffff;
163
+ ct->regs = 0xffff;
164
break;
165
166
/* qemu_ld address */
167
case 'l':
168
ct->ct |= TCG_CT_REG;
169
- ct->u.regs = 0xffff;
170
+ ct->regs = 0xffff;
171
#ifdef CONFIG_SOFTMMU
172
/* r0-r2,lr will be overwritten when reading the tlb entry,
173
so don't use these. */
174
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
175
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
176
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
177
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
178
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
179
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R0);
180
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R1);
181
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
182
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
183
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R14);
184
#endif
185
break;
186
187
/* qemu_st address & data */
188
case 's':
189
ct->ct |= TCG_CT_REG;
190
- ct->u.regs = 0xffff;
191
+ ct->regs = 0xffff;
192
/* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
193
and r0-r1 doing the byte swapping, so don't use these. */
194
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
195
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
196
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R0);
197
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R1);
198
#if defined(CONFIG_SOFTMMU)
199
/* Avoid clashes with registers being used for helper args */
200
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
201
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
202
#if TARGET_LONG_BITS == 64
203
/* Avoid clashes with registers being used for helper args */
204
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
205
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
206
#endif
207
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
208
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R14);
209
#endif
210
break;
211
212
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/i386/tcg-target.c.inc
215
+++ b/tcg/i386/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
217
switch(*ct_str++) {
218
case 'a':
219
ct->ct |= TCG_CT_REG;
220
- tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
221
+ tcg_regset_set_reg(ct->regs, TCG_REG_EAX);
222
break;
223
case 'b':
224
ct->ct |= TCG_CT_REG;
225
- tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
226
+ tcg_regset_set_reg(ct->regs, TCG_REG_EBX);
227
break;
228
case 'c':
229
ct->ct |= TCG_CT_REG;
230
- tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
231
+ tcg_regset_set_reg(ct->regs, TCG_REG_ECX);
232
break;
233
case 'd':
234
ct->ct |= TCG_CT_REG;
235
- tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
236
+ tcg_regset_set_reg(ct->regs, TCG_REG_EDX);
237
break;
238
case 'S':
239
ct->ct |= TCG_CT_REG;
240
- tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
241
+ tcg_regset_set_reg(ct->regs, TCG_REG_ESI);
242
break;
243
case 'D':
244
ct->ct |= TCG_CT_REG;
245
- tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
246
+ tcg_regset_set_reg(ct->regs, TCG_REG_EDI);
247
break;
248
case 'q':
249
/* A register that can be used as a byte operand. */
250
ct->ct |= TCG_CT_REG;
251
- ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
252
+ ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
253
break;
254
case 'Q':
255
/* A register with an addressable second byte (e.g. %ah). */
256
ct->ct |= TCG_CT_REG;
257
- ct->u.regs = 0xf;
258
+ ct->regs = 0xf;
259
break;
260
case 'r':
261
/* A general register. */
262
ct->ct |= TCG_CT_REG;
263
- ct->u.regs |= ALL_GENERAL_REGS;
264
+ ct->regs |= ALL_GENERAL_REGS;
265
break;
266
case 'W':
267
/* With TZCNT/LZCNT, we can have operand-size as an input. */
268
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
269
case 'x':
270
/* A vector register. */
271
ct->ct |= TCG_CT_REG;
272
- ct->u.regs |= ALL_VECTOR_REGS;
273
+ ct->regs |= ALL_VECTOR_REGS;
274
break;
275
276
/* qemu_ld/st address constraint */
277
case 'L':
278
ct->ct |= TCG_CT_REG;
279
- ct->u.regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
280
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
281
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
282
+ ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
283
+ tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
284
+ tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
285
break;
286
287
case 'e':
288
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
289
index XXXXXXX..XXXXXXX 100644
290
--- a/tcg/mips/tcg-target.c.inc
291
+++ b/tcg/mips/tcg-target.c.inc
292
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
293
switch(*ct_str++) {
294
case 'r':
295
ct->ct |= TCG_CT_REG;
296
- ct->u.regs = 0xffffffff;
297
+ ct->regs = 0xffffffff;
298
break;
299
case 'L': /* qemu_ld input arg constraint */
300
ct->ct |= TCG_CT_REG;
301
- ct->u.regs = 0xffffffff;
302
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
303
+ ct->regs = 0xffffffff;
304
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
305
#if defined(CONFIG_SOFTMMU)
306
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
307
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
308
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A2);
309
}
310
#endif
311
break;
312
case 'S': /* qemu_st constraint */
313
ct->ct |= TCG_CT_REG;
314
- ct->u.regs = 0xffffffff;
315
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
316
+ ct->regs = 0xffffffff;
317
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
318
#if defined(CONFIG_SOFTMMU)
319
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
320
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
321
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
322
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A2);
323
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A3);
324
} else {
325
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
326
+ tcg_regset_reset_reg(ct->regs, TCG_REG_A1);
327
}
328
#endif
329
break;
330
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
331
index XXXXXXX..XXXXXXX 100644
332
--- a/tcg/ppc/tcg-target.c.inc
333
+++ b/tcg/ppc/tcg-target.c.inc
334
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
335
switch (*ct_str++) {
336
case 'A': case 'B': case 'C': case 'D':
337
ct->ct |= TCG_CT_REG;
338
- tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
339
+ tcg_regset_set_reg(ct->regs, 3 + ct_str[0] - 'A');
340
break;
341
case 'r':
342
ct->ct |= TCG_CT_REG;
343
- ct->u.regs = 0xffffffff;
344
+ ct->regs = 0xffffffff;
345
break;
346
case 'v':
347
ct->ct |= TCG_CT_REG;
348
- ct->u.regs = 0xffffffff00000000ull;
349
+ ct->regs = 0xffffffff00000000ull;
350
break;
351
case 'L': /* qemu_ld constraint */
352
ct->ct |= TCG_CT_REG;
353
- ct->u.regs = 0xffffffff;
354
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
355
+ ct->regs = 0xffffffff;
356
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
357
#ifdef CONFIG_SOFTMMU
358
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
359
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
360
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
361
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R5);
362
#endif
363
break;
364
case 'S': /* qemu_st constraint */
365
ct->ct |= TCG_CT_REG;
366
- ct->u.regs = 0xffffffff;
367
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
368
+ ct->regs = 0xffffffff;
369
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
370
#ifdef CONFIG_SOFTMMU
371
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
372
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
373
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
374
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
375
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R5);
376
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R6);
377
#endif
378
break;
379
case 'I':
380
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
381
index XXXXXXX..XXXXXXX 100644
382
--- a/tcg/riscv/tcg-target.c.inc
383
+++ b/tcg/riscv/tcg-target.c.inc
384
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
385
switch (*ct_str++) {
386
case 'r':
387
ct->ct |= TCG_CT_REG;
388
- ct->u.regs = 0xffffffff;
389
+ ct->regs = 0xffffffff;
390
break;
391
case 'L':
392
/* qemu_ld/qemu_st constraint */
393
ct->ct |= TCG_CT_REG;
394
- ct->u.regs = 0xffffffff;
395
+ ct->regs = 0xffffffff;
396
/* qemu_ld/qemu_st uses TCG_REG_TMP0 */
397
#if defined(CONFIG_SOFTMMU)
398
- tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]);
399
- tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]);
400
- tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]);
401
- tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[3]);
402
- tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[4]);
403
+ tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[0]);
404
+ tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[1]);
405
+ tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[2]);
406
+ tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[3]);
407
+ tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[4]);
408
#endif
409
break;
410
case 'I':
411
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
412
index XXXXXXX..XXXXXXX 100644
413
--- a/tcg/s390/tcg-target.c.inc
414
+++ b/tcg/s390/tcg-target.c.inc
415
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
416
switch (*ct_str++) {
417
case 'r': /* all registers */
418
ct->ct |= TCG_CT_REG;
419
- ct->u.regs = 0xffff;
420
+ ct->regs = 0xffff;
421
break;
422
case 'L': /* qemu_ld/st constraint */
423
ct->ct |= TCG_CT_REG;
424
- ct->u.regs = 0xffff;
425
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
426
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
427
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
428
+ ct->regs = 0xffff;
429
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
430
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
431
+ tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
432
break;
433
case 'a': /* force R2 for division */
434
ct->ct |= TCG_CT_REG;
435
- ct->u.regs = 0;
436
- tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
437
+ ct->regs = 0;
438
+ tcg_regset_set_reg(ct->regs, TCG_REG_R2);
439
break;
440
case 'b': /* force R3 for division */
441
ct->ct |= TCG_CT_REG;
442
- ct->u.regs = 0;
443
- tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
444
+ ct->regs = 0;
445
+ tcg_regset_set_reg(ct->regs, TCG_REG_R3);
446
break;
447
case 'A':
448
ct->ct |= TCG_CT_CONST_S33;
449
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
450
index XXXXXXX..XXXXXXX 100644
451
--- a/tcg/sparc/tcg-target.c.inc
452
+++ b/tcg/sparc/tcg-target.c.inc
453
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
454
switch (*ct_str++) {
455
case 'r':
456
ct->ct |= TCG_CT_REG;
457
- ct->u.regs = 0xffffffff;
458
+ ct->regs = 0xffffffff;
459
break;
460
case 'R':
461
ct->ct |= TCG_CT_REG;
462
- ct->u.regs = ALL_64;
463
+ ct->regs = ALL_64;
464
break;
465
case 'A': /* qemu_ld/st address constraint */
466
ct->ct |= TCG_CT_REG;
467
- ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
468
+ ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
469
reserve_helpers:
470
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
471
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
472
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
473
+ tcg_regset_reset_reg(ct->regs, TCG_REG_O0);
474
+ tcg_regset_reset_reg(ct->regs, TCG_REG_O1);
475
+ tcg_regset_reset_reg(ct->regs, TCG_REG_O2);
476
break;
477
case 's': /* qemu_st data 32-bit constraint */
478
ct->ct |= TCG_CT_REG;
479
- ct->u.regs = 0xffffffff;
480
+ ct->regs = 0xffffffff;
481
goto reserve_helpers;
482
case 'S': /* qemu_st data 64-bit constraint */
483
ct->ct |= TCG_CT_REG;
484
- ct->u.regs = ALL_64;
485
+ ct->regs = ALL_64;
486
goto reserve_helpers;
487
case 'I':
488
ct->ct |= TCG_CT_CONST_S11;
489
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
490
index XXXXXXX..XXXXXXX 100644
491
--- a/tcg/tci/tcg-target.c.inc
492
+++ b/tcg/tci/tcg-target.c.inc
493
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
494
case 'L': /* qemu_ld constraint */
495
case 'S': /* qemu_st constraint */
496
ct->ct |= TCG_CT_REG;
497
- ct->u.regs = BIT(TCG_TARGET_NB_REGS) - 1;
498
+ ct->regs = BIT(TCG_TARGET_NB_REGS) - 1;
499
break;
500
default:
501
return NULL;
502
--
36
--
503
2.25.1
37
2.25.1
504
38
505
39
diff view generated by jsdifflib
1
These are easier to set and test when they have their own fields.
1
The use in tcg_tb_lookup is given a random pc that comes from the pc
2
Reduce the size of alias_index and sort_index to 4 bits, which is
2
of a signal handler. Do not assert that the pointer is already within
3
sufficient for TCG_MAX_OP_ARGS. This leaves only the bits indicating
3
the code gen buffer at all, much less the writable mirror of it.
4
constants within the ct field.
5
4
6
Move all initialization to allocation time, rather than init
5
Fixes: db0c51a3803
7
individual fields in process_op_defs.
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/tcg/tcg.h | 14 +++++++-------
8
tcg/tcg.c | 20 ++++++++++++++++++--
12
tcg/tcg.c | 28 ++++++++++++----------------
9
1 file changed, 18 insertions(+), 2 deletions(-)
13
2 files changed, 19 insertions(+), 23 deletions(-)
14
10
15
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg.h
18
+++ b/include/tcg/tcg.h
19
@@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void);
20
void tcg_dump_info(void);
21
void tcg_dump_op_count(void);
22
23
-#define TCG_CT_ALIAS 0x80
24
-#define TCG_CT_IALIAS 0x40
25
-#define TCG_CT_NEWREG 0x20 /* output requires a new register */
26
-#define TCG_CT_CONST 0x02 /* any constant of register size */
27
+#define TCG_CT_CONST 1 /* any constant of register size */
28
29
typedef struct TCGArgConstraint {
30
- uint16_t ct;
31
- uint8_t alias_index;
32
- uint8_t sort_index;
33
+ unsigned ct : 16;
34
+ unsigned alias_index : 4;
35
+ unsigned sort_index : 4;
36
+ bool oalias : 1;
37
+ bool ialias : 1;
38
+ bool newreg : 1;
39
TCGRegSet regs;
40
} TCGArgConstraint;
41
42
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
43
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/tcg.c
13
--- a/tcg/tcg.c
45
+++ b/tcg/tcg.c
14
+++ b/tcg/tcg.c
46
@@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s)
15
@@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void)
47
total_args += n;
48
}
16
}
49
17
}
50
- args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
18
51
+ args_ct = g_new0(TCGArgConstraint, total_args);
19
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp)
52
20
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
53
for(op = 0; op < NB_OPS; op++) {
21
{
54
def = &tcg_op_defs[op];
22
- void *p = tcg_splitwx_to_rw(cp);
55
@@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k)
23
size_t region_idx;
56
const TCGArgConstraint *arg_ct = &def->args_ct[k];
24
57
int n;
25
+ /*
58
26
+ * Like tcg_splitwx_to_rw, with no assert. The pc may come from
59
- if (arg_ct->ct & TCG_CT_ALIAS) {
27
+ * a signal handler over which the caller has no control.
60
+ if (arg_ct->oalias) {
28
+ */
61
/* an alias is equivalent to a single register */
29
+ if (!in_code_gen_buffer(p)) {
62
n = 1;
30
+ p -= tcg_splitwx_diff;
31
+ if (!in_code_gen_buffer(p)) {
32
+ return NULL;
33
+ }
34
+ }
35
+
36
if (p < region.start_aligned) {
37
region_idx = 0;
63
} else {
38
} else {
64
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
39
@@ -XXX,XX +XXX,XX @@ void tcg_tb_insert(TranslationBlock *tb)
65
/* Incomplete TCGTargetOpDef entry. */
40
{
66
tcg_debug_assert(ct_str != NULL);
41
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
67
42
68
- def->args_ct[i].regs = 0;
43
+ g_assert(rt != NULL);
69
- def->args_ct[i].ct = 0;
44
qemu_mutex_lock(&rt->lock);
70
while (*ct_str != '\0') {
45
g_tree_insert(rt->tree, &tb->tc, tb);
71
switch(*ct_str) {
46
qemu_mutex_unlock(&rt->lock);
72
case '0' ... '9':
47
@@ -XXX,XX +XXX,XX @@ void tcg_tb_remove(TranslationBlock *tb)
73
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
48
{
74
tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
49
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
75
tcg_debug_assert(oarg < def->nb_oargs);
50
76
tcg_debug_assert(def->args_ct[oarg].regs != 0);
51
+ g_assert(rt != NULL);
77
- /* TCG_CT_ALIAS is for the output arguments.
52
qemu_mutex_lock(&rt->lock);
78
- The input is tagged with TCG_CT_IALIAS. */
53
g_tree_remove(rt->tree, &tb->tc);
79
def->args_ct[i] = def->args_ct[oarg];
54
qemu_mutex_unlock(&rt->lock);
80
- def->args_ct[oarg].ct |= TCG_CT_ALIAS;
55
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
81
+ /* The output sets oalias. */
56
TranslationBlock *tb;
82
+ def->args_ct[oarg].oalias = true;
57
struct tb_tc s = { .ptr = (void *)tc_ptr };
83
def->args_ct[oarg].alias_index = i;
58
84
- def->args_ct[i].ct |= TCG_CT_IALIAS;
59
+ if (rt == NULL) {
85
+ /* The input sets ialias. */
60
+ return NULL;
86
+ def->args_ct[i].ialias = true;
61
+ }
87
def->args_ct[i].alias_index = oarg;
62
+
88
}
63
qemu_mutex_lock(&rt->lock);
89
ct_str++;
64
tb = g_tree_lookup(rt->tree, &s);
90
break;
65
qemu_mutex_unlock(&rt->lock);
91
case '&':
92
- def->args_ct[i].ct |= TCG_CT_NEWREG;
93
+ def->args_ct[i].newreg = true;
94
ct_str++;
95
break;
96
case 'i':
97
@@ -XXX,XX +XXX,XX @@ static void liveness_pass_1(TCGContext *s)
98
set = *pset;
99
100
set &= ct->regs;
101
- if (ct->ct & TCG_CT_IALIAS) {
102
+ if (ct->ialias) {
103
set &= op->output_pref[ct->alias_index];
104
}
105
/* If the combination is not possible, restart. */
106
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
107
}
108
109
i_preferred_regs = o_preferred_regs = 0;
110
- if (arg_ct->ct & TCG_CT_IALIAS) {
111
+ if (arg_ct->ialias) {
112
o_preferred_regs = op->output_pref[arg_ct->alias_index];
113
if (ts->fixed_reg) {
114
/* if fixed register, we must allocate a new register
115
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
116
reg = ts->reg;
117
for (k2 = 0 ; k2 < k ; k2++) {
118
i2 = def->args_ct[nb_oargs + k2].sort_index;
119
- if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
120
- reg == new_args[i2]) {
121
+ if (def->args_ct[i2].ialias && reg == new_args[i2]) {
122
goto allocate_in_reg;
123
}
124
}
125
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
126
/* ENV should not be modified. */
127
tcg_debug_assert(!ts->fixed_reg);
128
129
- if ((arg_ct->ct & TCG_CT_ALIAS)
130
- && !const_args[arg_ct->alias_index]) {
131
+ if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
132
reg = new_args[arg_ct->alias_index];
133
- } else if (arg_ct->ct & TCG_CT_NEWREG) {
134
+ } else if (arg_ct->newreg) {
135
reg = tcg_reg_alloc(s, arg_ct->regs,
136
i_allocated_regs | o_allocated_regs,
137
op->output_pref[k], ts->indirect_base);
138
--
66
--
139
2.25.1
67
2.25.1
140
68
141
69
diff view generated by jsdifflib
New patch
1
Use CASE_32_64 and CASE_64 to reduce ifdefs and merge
2
cases that are identical between 32-bit and 64-bit hosts.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org>
7
[PMD: Split patch as 1/5]
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20210218232840.1760806-2-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tci/tcg-target.c.inc | 85 +++++++++++++++++-----------------------
13
1 file changed, 37 insertions(+), 48 deletions(-)
14
15
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target.c.inc
18
+++ b/tcg/tci/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
20
old_code_ptr[1] = s->code_ptr - old_code_ptr;
21
}
22
23
+#if TCG_TARGET_REG_BITS == 64
24
+# define CASE_32_64(x) \
25
+ case glue(glue(INDEX_op_, x), _i64): \
26
+ case glue(glue(INDEX_op_, x), _i32):
27
+# define CASE_64(x) \
28
+ case glue(glue(INDEX_op_, x), _i64):
29
+#else
30
+# define CASE_32_64(x) \
31
+ case glue(glue(INDEX_op_, x), _i32):
32
+# define CASE_64(x)
33
+#endif
34
+
35
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
36
const int *const_args)
37
{
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
39
case INDEX_op_exit_tb:
40
tcg_out64(s, args[0]);
41
break;
42
+
43
case INDEX_op_goto_tb:
44
if (s->tb_jmp_insn_offset) {
45
/* Direct jump method. */
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
47
tcg_debug_assert(args[2] == (int32_t)args[2]);
48
tcg_out32(s, args[2]);
49
break;
50
- case INDEX_op_add_i32:
51
- case INDEX_op_sub_i32:
52
- case INDEX_op_mul_i32:
53
- case INDEX_op_and_i32:
54
- case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
55
- case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
56
- case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
57
- case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
58
- case INDEX_op_or_i32:
59
- case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
60
- case INDEX_op_xor_i32:
61
- case INDEX_op_shl_i32:
62
- case INDEX_op_shr_i32:
63
- case INDEX_op_sar_i32:
64
- case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
65
- case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
66
+
67
+ CASE_32_64(add)
68
+ CASE_32_64(sub)
69
+ CASE_32_64(mul)
70
+ CASE_32_64(and)
71
+ CASE_32_64(or)
72
+ CASE_32_64(xor)
73
+ CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
74
+ CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
75
+ CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
76
+ CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
77
+ CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
78
+ CASE_32_64(shl)
79
+ CASE_32_64(shr)
80
+ CASE_32_64(sar)
81
+ CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
82
+ CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
83
+ CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
84
+ CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
85
+ CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
86
+ CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
87
tcg_out_r(s, args[0]);
88
tcg_out_r(s, args[1]);
89
tcg_out_r(s, args[2]);
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
91
break;
92
93
#if TCG_TARGET_REG_BITS == 64
94
- case INDEX_op_add_i64:
95
- case INDEX_op_sub_i64:
96
- case INDEX_op_mul_i64:
97
- case INDEX_op_and_i64:
98
- case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
99
- case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
100
- case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
101
- case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
102
- case INDEX_op_or_i64:
103
- case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
104
- case INDEX_op_xor_i64:
105
- case INDEX_op_shl_i64:
106
- case INDEX_op_shr_i64:
107
- case INDEX_op_sar_i64:
108
- case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
109
- case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
110
- case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
111
- case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
112
- case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
113
- case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
114
- tcg_out_r(s, args[0]);
115
- tcg_out_r(s, args[1]);
116
- tcg_out_r(s, args[2]);
117
- break;
118
case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
119
tcg_out_r(s, args[0]);
120
tcg_out_r(s, args[1]);
121
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
122
tcg_out_r(s, args[0]);
123
tcg_out_r(s, args[1]);
124
break;
125
- case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
126
- case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
127
- case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
128
- case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
129
- tcg_out_r(s, args[0]);
130
- tcg_out_r(s, args[1]);
131
- tcg_out_r(s, args[2]);
132
- break;
133
+
134
#if TCG_TARGET_REG_BITS == 32
135
case INDEX_op_add2_i32:
136
case INDEX_op_sub2_i32:
137
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
138
}
139
tcg_out_i(s, *args++);
140
break;
141
+
142
case INDEX_op_mb:
143
break;
144
+
145
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
146
case INDEX_op_mov_i64:
147
case INDEX_op_call: /* Always emitted via tcg_out_call. */
148
--
149
2.25.1
150
151
diff view generated by jsdifflib
New patch
1
Use CASE_32_64 and CASE_64 to reduce ifdefs and merge
2
cases that are identical between 32-bit and 64-bit hosts.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org>
7
[PMD: Split patch as 2/5]
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20210218232840.1760806-3-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tci/tcg-target.c.inc | 35 ++++++++++++++---------------------
13
1 file changed, 14 insertions(+), 21 deletions(-)
14
15
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target.c.inc
18
+++ b/tcg/tci/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
20
tcg_out8(s, args[2]); /* condition */
21
tci_out_label(s, arg_label(args[3]));
22
break;
23
- case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
24
- case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
25
- case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
26
- case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
27
- case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
28
- case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
29
- case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
30
- case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
31
- case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
32
- case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
33
- case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
34
- case INDEX_op_ext_i32_i64:
35
- case INDEX_op_extu_i32_i64:
36
#endif /* TCG_TARGET_REG_BITS == 64 */
37
- case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
38
- case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
39
- case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
40
- case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
41
- case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
42
- case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
43
- case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
44
- case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
45
+
46
+ CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
47
+ CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
48
+ CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
49
+ CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
50
+ CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
51
+ CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
52
+ CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
53
+ CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
54
+ CASE_64(ext_i32)
55
+ CASE_64(extu_i32)
56
+ CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */
57
+ CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */
58
+ CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */
59
tcg_out_r(s, args[0]);
60
tcg_out_r(s, args[1]);
61
break;
62
--
63
2.25.1
64
65
diff view generated by jsdifflib
New patch
1
Use CASE_32_64 and CASE_64 to reduce ifdefs and merge
2
cases that are identical between 32-bit and 64-bit hosts.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org>
7
[PMD: Split patch as 3/5]
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20210218232840.1760806-4-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tci/tcg-target.c.inc | 12 ++----------
13
1 file changed, 2 insertions(+), 10 deletions(-)
14
15
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target.c.inc
18
+++ b/tcg/tci/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
20
tcg_out_r(s, args[1]);
21
tcg_out_r(s, args[2]);
22
break;
23
- case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
24
+
25
+ CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
26
tcg_out_r(s, args[0]);
27
tcg_out_r(s, args[1]);
28
tcg_out_r(s, args[2]);
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
30
break;
31
32
#if TCG_TARGET_REG_BITS == 64
33
- case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
34
- tcg_out_r(s, args[0]);
35
- tcg_out_r(s, args[1]);
36
- tcg_out_r(s, args[2]);
37
- tcg_debug_assert(args[3] <= UINT8_MAX);
38
- tcg_out8(s, args[3]);
39
- tcg_debug_assert(args[4] <= UINT8_MAX);
40
- tcg_out8(s, args[4]);
41
- break;
42
case INDEX_op_brcond_i64:
43
tcg_out_r(s, args[0]);
44
tcg_out_r(s, args[1]);
45
--
46
2.25.1
47
48
diff view generated by jsdifflib
New patch
1
Use CASE_32_64 and CASE_64 to reduce ifdefs and merge
2
cases that are identical between 32-bit and 64-bit hosts.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org>
7
[PMD: Split patch as 4/5]
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20210218232840.1760806-5-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tci/tcg-target.c.inc | 23 ++++++-----------------
13
1 file changed, 6 insertions(+), 17 deletions(-)
14
15
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target.c.inc
18
+++ b/tcg/tci/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
20
}
21
set_jmp_reset_offset(s, args[0]);
22
break;
23
+
24
case INDEX_op_br:
25
tci_out_label(s, arg_label(args[0]));
26
break;
27
- case INDEX_op_setcond_i32:
28
+
29
+ CASE_32_64(setcond)
30
tcg_out_r(s, args[0]);
31
tcg_out_r(s, args[1]);
32
tcg_out_r(s, args[2]);
33
tcg_out8(s, args[3]); /* condition */
34
break;
35
+
36
#if TCG_TARGET_REG_BITS == 32
37
case INDEX_op_setcond2_i32:
38
/* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
40
tcg_out_r(s, args[4]);
41
tcg_out8(s, args[5]); /* condition */
42
break;
43
-#elif TCG_TARGET_REG_BITS == 64
44
- case INDEX_op_setcond_i64:
45
- tcg_out_r(s, args[0]);
46
- tcg_out_r(s, args[1]);
47
- tcg_out_r(s, args[2]);
48
- tcg_out8(s, args[3]); /* condition */
49
- break;
50
#endif
51
case INDEX_op_ld8u_i32:
52
case INDEX_op_ld8s_i32:
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
54
tcg_out8(s, args[4]);
55
break;
56
57
-#if TCG_TARGET_REG_BITS == 64
58
- case INDEX_op_brcond_i64:
59
+ CASE_32_64(brcond)
60
tcg_out_r(s, args[0]);
61
tcg_out_r(s, args[1]);
62
tcg_out8(s, args[2]); /* condition */
63
tci_out_label(s, arg_label(args[3]));
64
break;
65
-#endif /* TCG_TARGET_REG_BITS == 64 */
66
67
CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
68
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
70
tcg_out_r(s, args[3]);
71
break;
72
#endif
73
- case INDEX_op_brcond_i32:
74
- tcg_out_r(s, args[0]);
75
- tcg_out_r(s, args[1]);
76
- tcg_out8(s, args[2]); /* condition */
77
- tci_out_label(s, arg_label(args[3]));
78
- break;
79
+
80
case INDEX_op_qemu_ld_i32:
81
tcg_out_r(s, *args++);
82
tcg_out_r(s, *args++);
83
--
84
2.25.1
85
86
diff view generated by jsdifflib
New patch
1
Use CASE_32_64 and CASE_64 to reduce ifdefs and merge
2
cases that are identical between 32-bit and 64-bit hosts.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Message-Id: <20210217202036.1724901-5-richard.henderson@linaro.org>
7
[PMD: Split patch as 5/5]
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20210218232840.1760806-6-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tci/tcg-target.c.inc | 49 ++++++++++++----------------------------
13
1 file changed, 14 insertions(+), 35 deletions(-)
14
15
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target.c.inc
18
+++ b/tcg/tci/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
20
tcg_out8(s, args[5]); /* condition */
21
break;
22
#endif
23
- case INDEX_op_ld8u_i32:
24
- case INDEX_op_ld8s_i32:
25
- case INDEX_op_ld16u_i32:
26
- case INDEX_op_ld16s_i32:
27
+
28
+ CASE_32_64(ld8u)
29
+ CASE_32_64(ld8s)
30
+ CASE_32_64(ld16u)
31
+ CASE_32_64(ld16s)
32
case INDEX_op_ld_i32:
33
- case INDEX_op_st8_i32:
34
- case INDEX_op_st16_i32:
35
+ CASE_64(ld32u)
36
+ CASE_64(ld32s)
37
+ CASE_64(ld)
38
+ CASE_32_64(st8)
39
+ CASE_32_64(st16)
40
case INDEX_op_st_i32:
41
- case INDEX_op_ld8u_i64:
42
- case INDEX_op_ld8s_i64:
43
- case INDEX_op_ld16u_i64:
44
- case INDEX_op_ld16s_i64:
45
- case INDEX_op_ld32u_i64:
46
- case INDEX_op_ld32s_i64:
47
- case INDEX_op_ld_i64:
48
- case INDEX_op_st8_i64:
49
- case INDEX_op_st16_i64:
50
- case INDEX_op_st32_i64:
51
- case INDEX_op_st_i64:
52
+ CASE_64(st32)
53
+ CASE_64(st)
54
stack_bounds_check(args[1], args[2]);
55
tcg_out_r(s, args[0]);
56
tcg_out_r(s, args[1]);
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
58
#endif
59
60
case INDEX_op_qemu_ld_i32:
61
- tcg_out_r(s, *args++);
62
- tcg_out_r(s, *args++);
63
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
64
- tcg_out_r(s, *args++);
65
- }
66
- tcg_out_i(s, *args++);
67
- break;
68
- case INDEX_op_qemu_ld_i64:
69
- tcg_out_r(s, *args++);
70
- if (TCG_TARGET_REG_BITS == 32) {
71
- tcg_out_r(s, *args++);
72
- }
73
- tcg_out_r(s, *args++);
74
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
75
- tcg_out_r(s, *args++);
76
- }
77
- tcg_out_i(s, *args++);
78
- break;
79
case INDEX_op_qemu_st_i32:
80
tcg_out_r(s, *args++);
81
tcg_out_r(s, *args++);
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
83
}
84
tcg_out_i(s, *args++);
85
break;
86
+
87
+ case INDEX_op_qemu_ld_i64:
88
case INDEX_op_qemu_st_i64:
89
tcg_out_r(s, *args++);
90
if (TCG_TARGET_REG_BITS == 32) {
91
--
92
2.25.1
93
94
diff view generated by jsdifflib
New patch
1
Use explicit casts for ext8u opcodes, and allow truncation
2
to happen with the store for st8 opcodes.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tci.c | 23 +++++------------------
8
1 file changed, 5 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/tci.c b/tcg/tci.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tci.c
13
+++ b/tcg/tci.c
14
@@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
15
}
16
#endif
17
18
-static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index)
19
-{
20
- return (uint8_t)tci_read_reg(regs, index);
21
-}
22
-
23
static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index)
24
{
25
return (uint16_t)tci_read_reg(regs, index);
26
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
27
return value;
28
}
29
30
-/* Read indexed register (8 bit) from bytecode. */
31
-static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
32
-{
33
- uint8_t value = tci_read_reg8(regs, **tb_ptr);
34
- *tb_ptr += 1;
35
- return value;
36
-}
37
-
38
#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
39
/* Read indexed register (8 bit signed) from bytecode. */
40
static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
41
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
42
tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
43
break;
44
CASE_32_64(st8)
45
- t0 = tci_read_r8(regs, &tb_ptr);
46
+ t0 = tci_read_r(regs, &tb_ptr);
47
t1 = tci_read_r(regs, &tb_ptr);
48
t2 = tci_read_s32(&tb_ptr);
49
*(uint8_t *)(t1 + t2) = t0;
50
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
51
#if TCG_TARGET_HAS_ext8u_i32
52
case INDEX_op_ext8u_i32:
53
t0 = *tb_ptr++;
54
- t1 = tci_read_r8(regs, &tb_ptr);
55
- tci_write_reg(regs, t0, t1);
56
+ t1 = tci_read_r(regs, &tb_ptr);
57
+ tci_write_reg(regs, t0, (uint8_t)t1);
58
break;
59
#endif
60
#if TCG_TARGET_HAS_ext16u_i32
61
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
62
#if TCG_TARGET_HAS_ext8u_i64
63
case INDEX_op_ext8u_i64:
64
t0 = *tb_ptr++;
65
- t1 = tci_read_r8(regs, &tb_ptr);
66
- tci_write_reg(regs, t0, t1);
67
+ t1 = tci_read_r(regs, &tb_ptr);
68
+ tci_write_reg(regs, t0, (uint8_t)t1);
69
break;
70
#endif
71
#if TCG_TARGET_HAS_ext8s_i64
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
New patch
1
Use explicit casts for ext8s opcodes.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tci.c | 25 ++++---------------------
7
1 file changed, 4 insertions(+), 21 deletions(-)
8
9
diff --git a/tcg/tci.c b/tcg/tci.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tci.c
12
+++ b/tcg/tci.c
13
@@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
14
return regs[index];
15
}
16
17
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
18
-static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index)
19
-{
20
- return (int8_t)tci_read_reg(regs, index);
21
-}
22
-#endif
23
-
24
#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
25
static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index)
26
{
27
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
28
return value;
29
}
30
31
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
32
-/* Read indexed register (8 bit signed) from bytecode. */
33
-static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
34
-{
35
- int8_t value = tci_read_reg8s(regs, **tb_ptr);
36
- *tb_ptr += 1;
37
- return value;
38
-}
39
-#endif
40
-
41
/* Read indexed register (16 bit) from bytecode. */
42
static uint16_t tci_read_r16(const tcg_target_ulong *regs,
43
const uint8_t **tb_ptr)
44
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
45
#if TCG_TARGET_HAS_ext8s_i32
46
case INDEX_op_ext8s_i32:
47
t0 = *tb_ptr++;
48
- t1 = tci_read_r8s(regs, &tb_ptr);
49
- tci_write_reg(regs, t0, t1);
50
+ t1 = tci_read_r(regs, &tb_ptr);
51
+ tci_write_reg(regs, t0, (int8_t)t1);
52
break;
53
#endif
54
#if TCG_TARGET_HAS_ext16s_i32
55
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
56
#if TCG_TARGET_HAS_ext8s_i64
57
case INDEX_op_ext8s_i64:
58
t0 = *tb_ptr++;
59
- t1 = tci_read_r8s(regs, &tb_ptr);
60
- tci_write_reg(regs, t0, t1);
61
+ t1 = tci_read_r(regs, &tb_ptr);
62
+ tci_write_reg(regs, t0, (int8_t)t1);
63
break;
64
#endif
65
#if TCG_TARGET_HAS_ext16s_i64
66
--
67
2.25.1
68
69
diff view generated by jsdifflib
New patch
1
Use explicit casts for ext16u opcodes, and allow truncation
2
to happen with the store for st16 opcodes, and with the call
3
for bswap16 opcodes.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/tci.c | 28 +++++++---------------------
9
1 file changed, 7 insertions(+), 21 deletions(-)
10
11
diff --git a/tcg/tci.c b/tcg/tci.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tci.c
14
+++ b/tcg/tci.c
15
@@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
16
}
17
#endif
18
19
-static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index)
20
-{
21
- return (uint16_t)tci_read_reg(regs, index);
22
-}
23
-
24
static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index)
25
{
26
return (uint32_t)tci_read_reg(regs, index);
27
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
28
return value;
29
}
30
31
-/* Read indexed register (16 bit) from bytecode. */
32
-static uint16_t tci_read_r16(const tcg_target_ulong *regs,
33
- const uint8_t **tb_ptr)
34
-{
35
- uint16_t value = tci_read_reg16(regs, **tb_ptr);
36
- *tb_ptr += 1;
37
- return value;
38
-}
39
-
40
#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
41
/* Read indexed register (16 bit signed) from bytecode. */
42
static int16_t tci_read_r16s(const tcg_target_ulong *regs,
43
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
44
*(uint8_t *)(t1 + t2) = t0;
45
break;
46
CASE_32_64(st16)
47
- t0 = tci_read_r16(regs, &tb_ptr);
48
+ t0 = tci_read_r(regs, &tb_ptr);
49
t1 = tci_read_r(regs, &tb_ptr);
50
t2 = tci_read_s32(&tb_ptr);
51
*(uint16_t *)(t1 + t2) = t0;
52
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
53
#if TCG_TARGET_HAS_ext16u_i32
54
case INDEX_op_ext16u_i32:
55
t0 = *tb_ptr++;
56
- t1 = tci_read_r16(regs, &tb_ptr);
57
- tci_write_reg(regs, t0, t1);
58
+ t1 = tci_read_r(regs, &tb_ptr);
59
+ tci_write_reg(regs, t0, (uint16_t)t1);
60
break;
61
#endif
62
#if TCG_TARGET_HAS_bswap16_i32
63
case INDEX_op_bswap16_i32:
64
t0 = *tb_ptr++;
65
- t1 = tci_read_r16(regs, &tb_ptr);
66
+ t1 = tci_read_r(regs, &tb_ptr);
67
tci_write_reg(regs, t0, bswap16(t1));
68
break;
69
#endif
70
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
71
#if TCG_TARGET_HAS_ext16u_i64
72
case INDEX_op_ext16u_i64:
73
t0 = *tb_ptr++;
74
- t1 = tci_read_r16(regs, &tb_ptr);
75
- tci_write_reg(regs, t0, t1);
76
+ t1 = tci_read_r(regs, &tb_ptr);
77
+ tci_write_reg(regs, t0, (uint16_t)t1);
78
break;
79
#endif
80
#if TCG_TARGET_HAS_ext32s_i64
81
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
82
#if TCG_TARGET_HAS_bswap16_i64
83
case INDEX_op_bswap16_i64:
84
t0 = *tb_ptr++;
85
- t1 = tci_read_r16(regs, &tb_ptr);
86
+ t1 = tci_read_r(regs, &tb_ptr);
87
tci_write_reg(regs, t0, bswap16(t1));
88
break;
89
#endif
90
--
91
2.25.1
92
93
diff view generated by jsdifflib
New patch
1
Use explicit casts for ext16s opcodes.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tci.c | 26 ++++----------------------
7
1 file changed, 4 insertions(+), 22 deletions(-)
8
9
diff --git a/tcg/tci.c b/tcg/tci.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tci.c
12
+++ b/tcg/tci.c
13
@@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
14
return regs[index];
15
}
16
17
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
18
-static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index)
19
-{
20
- return (int16_t)tci_read_reg(regs, index);
21
-}
22
-#endif
23
-
24
#if TCG_TARGET_REG_BITS == 64
25
static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
26
{
27
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
28
return value;
29
}
30
31
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
32
-/* Read indexed register (16 bit signed) from bytecode. */
33
-static int16_t tci_read_r16s(const tcg_target_ulong *regs,
34
- const uint8_t **tb_ptr)
35
-{
36
- int16_t value = tci_read_reg16s(regs, **tb_ptr);
37
- *tb_ptr += 1;
38
- return value;
39
-}
40
-#endif
41
-
42
/* Read indexed register (32 bit) from bytecode. */
43
static uint32_t tci_read_r32(const tcg_target_ulong *regs,
44
const uint8_t **tb_ptr)
45
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
46
#if TCG_TARGET_HAS_ext16s_i32
47
case INDEX_op_ext16s_i32:
48
t0 = *tb_ptr++;
49
- t1 = tci_read_r16s(regs, &tb_ptr);
50
- tci_write_reg(regs, t0, t1);
51
+ t1 = tci_read_r(regs, &tb_ptr);
52
+ tci_write_reg(regs, t0, (int16_t)t1);
53
break;
54
#endif
55
#if TCG_TARGET_HAS_ext8u_i32
56
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
57
#if TCG_TARGET_HAS_ext16s_i64
58
case INDEX_op_ext16s_i64:
59
t0 = *tb_ptr++;
60
- t1 = tci_read_r16s(regs, &tb_ptr);
61
- tci_write_reg(regs, t0, t1);
62
+ t1 = tci_read_r(regs, &tb_ptr);
63
+ tci_write_reg(regs, t0, (int16_t)t1);
64
break;
65
#endif
66
#if TCG_TARGET_HAS_ext16u_i64
67
--
68
2.25.1
69
70
diff view generated by jsdifflib
1
With larger vector sizes, it turns out oprsz == maxsz, and we only
1
Use explicit casts for ext32u opcodes, and allow truncation
2
need to represent mismatch for oprsz <= 32. We do, however, need
2
to happen for other users.
3
to represent larger oprsz and do so without reducing SIMD_DATA_BITS.
3
4
5
Reduce the size of the oprsz field and increase the maxsz field.
6
Steal the oprsz value of 24 to indicate equality with maxsz.
7
8
Tested-by: Frank Chang <frank.chang@sifive.com>
9
Reviewed-by: Frank Chang <frank.chang@sifive.com>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
include/tcg/tcg-gvec-desc.h | 38 ++++++++++++++++++++++++-------------
7
tcg/tci.c | 122 ++++++++++++++++++++++++------------------------------
14
tcg/tcg-op-gvec.c | 35 ++++++++++++++++++++++++++--------
8
1 file changed, 54 insertions(+), 68 deletions(-)
15
2 files changed, 52 insertions(+), 21 deletions(-)
9
16
10
diff --git a/tcg/tci.c b/tcg/tci.c
17
diff --git a/include/tcg/tcg-gvec-desc.h b/include/tcg/tcg-gvec-desc.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg-gvec-desc.h
12
--- a/tcg/tci.c
20
+++ b/include/tcg/tcg-gvec-desc.h
13
+++ b/tcg/tci.c
21
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
22
#ifndef TCG_TCG_GVEC_DESC_H
15
}
23
#define TCG_TCG_GVEC_DESC_H
16
#endif
24
17
25
-/* ??? These bit widths are set for ARM SVE, maxing out at 256 byte vectors. */
18
-static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index)
26
-#define SIMD_OPRSZ_SHIFT 0
27
-#define SIMD_OPRSZ_BITS 5
28
+/*
29
+ * This configuration allows MAXSZ to represent 2048 bytes, and
30
+ * OPRSZ to match MAXSZ, or represent the smaller values 8, 16, or 32.
31
+ *
32
+ * Encode this with:
33
+ * 0, 1, 3 -> 8, 16, 32
34
+ * 2 -> maxsz
35
+ *
36
+ * This steals the input that would otherwise map to 24 to match maxsz.
37
+ */
38
+#define SIMD_MAXSZ_SHIFT 0
39
+#define SIMD_MAXSZ_BITS 8
40
41
-#define SIMD_MAXSZ_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS)
42
-#define SIMD_MAXSZ_BITS 5
43
+#define SIMD_OPRSZ_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS)
44
+#define SIMD_OPRSZ_BITS 2
45
46
-#define SIMD_DATA_SHIFT (SIMD_MAXSZ_SHIFT + SIMD_MAXSZ_BITS)
47
+#define SIMD_DATA_SHIFT (SIMD_OPRSZ_SHIFT + SIMD_OPRSZ_BITS)
48
#define SIMD_DATA_BITS (32 - SIMD_DATA_SHIFT)
49
50
/* Create a descriptor from components. */
51
uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data);
52
53
-/* Extract the operation size from a descriptor. */
54
-static inline intptr_t simd_oprsz(uint32_t desc)
55
-{
19
-{
56
- return (extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS) + 1) * 8;
20
- return (uint32_t)tci_read_reg(regs, index);
57
-}
21
-}
58
-
22
-
59
/* Extract the max vector size from a descriptor. */
23
#if TCG_TARGET_REG_BITS == 64
60
static inline intptr_t simd_maxsz(uint32_t desc)
24
static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
61
{
25
{
62
- return (extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) + 1) * 8;
26
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
63
+ return extract32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS) * 8 + 8;
27
return value;
64
+}
65
+
66
+/* Extract the operation size from a descriptor. */
67
+static inline intptr_t simd_oprsz(uint32_t desc)
68
+{
69
+ uint32_t f = extract32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS);
70
+ intptr_t o = f * 8 + 8;
71
+ intptr_t m = simd_maxsz(desc);
72
+ return f == 2 ? m : o;
73
}
28
}
74
29
75
/* Extract the operation-specific data from a descriptor. */
30
-/* Read indexed register (32 bit) from bytecode. */
76
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
31
-static uint32_t tci_read_r32(const tcg_target_ulong *regs,
77
index XXXXXXX..XXXXXXX 100644
32
- const uint8_t **tb_ptr)
78
--- a/tcg/tcg-op-gvec.c
33
-{
79
+++ b/tcg/tcg-op-gvec.c
34
- uint32_t value = tci_read_reg32(regs, **tb_ptr);
80
@@ -XXX,XX +XXX,XX @@ static const TCGOpcode vecop_list_empty[1] = { 0 };
35
- *tb_ptr += 1;
81
of the operand offsets so that we can check them all at once. */
36
- return value;
82
static void check_size_align(uint32_t oprsz, uint32_t maxsz, uint32_t ofs)
37
-}
38
-
39
#if TCG_TARGET_REG_BITS == 32
40
/* Read two indexed registers (2 * 32 bit) from bytecode. */
41
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
42
const uint8_t **tb_ptr)
83
{
43
{
84
- uint32_t opr_align = oprsz >= 16 ? 15 : 7;
44
- uint32_t low = tci_read_r32(regs, tb_ptr);
85
- uint32_t max_align = maxsz >= 16 || oprsz >= 16 ? 15 : 7;
45
- return tci_uint64(tci_read_r32(regs, tb_ptr), low);
86
- tcg_debug_assert(oprsz > 0);
46
+ uint32_t low = tci_read_r(regs, tb_ptr);
87
- tcg_debug_assert(oprsz <= maxsz);
47
+ return tci_uint64(tci_read_r(regs, tb_ptr), low);
88
- tcg_debug_assert((oprsz & opr_align) == 0);
89
+ uint32_t max_align;
90
+
91
+ switch (oprsz) {
92
+ case 8:
93
+ case 16:
94
+ case 32:
95
+ tcg_debug_assert(oprsz <= maxsz);
96
+ break;
97
+ default:
98
+ tcg_debug_assert(oprsz == maxsz);
99
+ break;
100
+ }
101
+ tcg_debug_assert(maxsz <= (8 << SIMD_MAXSZ_BITS));
102
+
103
+ max_align = maxsz >= 16 ? 15 : 7;
104
tcg_debug_assert((maxsz & max_align) == 0);
105
tcg_debug_assert((ofs & max_align) == 0);
106
}
48
}
107
@@ -XXX,XX +XXX,XX @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data)
49
#elif TCG_TARGET_REG_BITS == 64
108
{
50
/* Read indexed register (32 bit signed) from bytecode. */
109
uint32_t desc = 0;
51
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
110
52
continue;
111
- assert(oprsz % 8 == 0 && oprsz <= (8 << SIMD_OPRSZ_BITS));
53
case INDEX_op_setcond_i32:
112
- assert(maxsz % 8 == 0 && maxsz <= (8 << SIMD_MAXSZ_BITS));
54
t0 = *tb_ptr++;
113
- assert(data == sextract32(data, 0, SIMD_DATA_BITS));
55
- t1 = tci_read_r32(regs, &tb_ptr);
114
+ check_size_align(oprsz, maxsz, 0);
56
- t2 = tci_read_r32(regs, &tb_ptr);
115
+ tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS));
57
+ t1 = tci_read_r(regs, &tb_ptr);
116
58
+ t2 = tci_read_r(regs, &tb_ptr);
117
oprsz = (oprsz / 8) - 1;
59
condition = *tb_ptr++;
118
maxsz = (maxsz / 8) - 1;
60
tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
119
+
61
break;
120
+ /*
62
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
121
+ * We have just asserted in check_size_align that either
63
#endif
122
+ * oprsz is {8,16,32} or matches maxsz. Encode the final
64
case INDEX_op_mov_i32:
123
+ * case with '2', as that would otherwise map to 24.
65
t0 = *tb_ptr++;
124
+ */
66
- t1 = tci_read_r32(regs, &tb_ptr);
125
+ if (oprsz == maxsz) {
67
+ t1 = tci_read_r(regs, &tb_ptr);
126
+ oprsz = 2;
68
tci_write_reg(regs, t0, t1);
127
+ }
69
break;
128
+
70
case INDEX_op_tci_movi_i32:
129
desc = deposit32(desc, SIMD_OPRSZ_SHIFT, SIMD_OPRSZ_BITS, oprsz);
71
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
130
desc = deposit32(desc, SIMD_MAXSZ_SHIFT, SIMD_MAXSZ_BITS, maxsz);
72
break;
131
desc = deposit32(desc, SIMD_DATA_SHIFT, SIMD_DATA_BITS, data);
73
case INDEX_op_st_i32:
74
CASE_64(st32)
75
- t0 = tci_read_r32(regs, &tb_ptr);
76
+ t0 = tci_read_r(regs, &tb_ptr);
77
t1 = tci_read_r(regs, &tb_ptr);
78
t2 = tci_read_s32(&tb_ptr);
79
*(uint32_t *)(t1 + t2) = t0;
80
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
81
82
case INDEX_op_add_i32:
83
t0 = *tb_ptr++;
84
- t1 = tci_read_r32(regs, &tb_ptr);
85
- t2 = tci_read_r32(regs, &tb_ptr);
86
+ t1 = tci_read_r(regs, &tb_ptr);
87
+ t2 = tci_read_r(regs, &tb_ptr);
88
tci_write_reg(regs, t0, t1 + t2);
89
break;
90
case INDEX_op_sub_i32:
91
t0 = *tb_ptr++;
92
- t1 = tci_read_r32(regs, &tb_ptr);
93
- t2 = tci_read_r32(regs, &tb_ptr);
94
+ t1 = tci_read_r(regs, &tb_ptr);
95
+ t2 = tci_read_r(regs, &tb_ptr);
96
tci_write_reg(regs, t0, t1 - t2);
97
break;
98
case INDEX_op_mul_i32:
99
t0 = *tb_ptr++;
100
- t1 = tci_read_r32(regs, &tb_ptr);
101
- t2 = tci_read_r32(regs, &tb_ptr);
102
+ t1 = tci_read_r(regs, &tb_ptr);
103
+ t2 = tci_read_r(regs, &tb_ptr);
104
tci_write_reg(regs, t0, t1 * t2);
105
break;
106
case INDEX_op_div_i32:
107
t0 = *tb_ptr++;
108
- t1 = tci_read_r32(regs, &tb_ptr);
109
- t2 = tci_read_r32(regs, &tb_ptr);
110
+ t1 = tci_read_r(regs, &tb_ptr);
111
+ t2 = tci_read_r(regs, &tb_ptr);
112
tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
113
break;
114
case INDEX_op_divu_i32:
115
t0 = *tb_ptr++;
116
- t1 = tci_read_r32(regs, &tb_ptr);
117
- t2 = tci_read_r32(regs, &tb_ptr);
118
- tci_write_reg(regs, t0, t1 / t2);
119
+ t1 = tci_read_r(regs, &tb_ptr);
120
+ t2 = tci_read_r(regs, &tb_ptr);
121
+ tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2);
122
break;
123
case INDEX_op_rem_i32:
124
t0 = *tb_ptr++;
125
- t1 = tci_read_r32(regs, &tb_ptr);
126
- t2 = tci_read_r32(regs, &tb_ptr);
127
+ t1 = tci_read_r(regs, &tb_ptr);
128
+ t2 = tci_read_r(regs, &tb_ptr);
129
tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
130
break;
131
case INDEX_op_remu_i32:
132
t0 = *tb_ptr++;
133
- t1 = tci_read_r32(regs, &tb_ptr);
134
- t2 = tci_read_r32(regs, &tb_ptr);
135
- tci_write_reg(regs, t0, t1 % t2);
136
+ t1 = tci_read_r(regs, &tb_ptr);
137
+ t2 = tci_read_r(regs, &tb_ptr);
138
+ tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
139
break;
140
case INDEX_op_and_i32:
141
t0 = *tb_ptr++;
142
- t1 = tci_read_r32(regs, &tb_ptr);
143
- t2 = tci_read_r32(regs, &tb_ptr);
144
+ t1 = tci_read_r(regs, &tb_ptr);
145
+ t2 = tci_read_r(regs, &tb_ptr);
146
tci_write_reg(regs, t0, t1 & t2);
147
break;
148
case INDEX_op_or_i32:
149
t0 = *tb_ptr++;
150
- t1 = tci_read_r32(regs, &tb_ptr);
151
- t2 = tci_read_r32(regs, &tb_ptr);
152
+ t1 = tci_read_r(regs, &tb_ptr);
153
+ t2 = tci_read_r(regs, &tb_ptr);
154
tci_write_reg(regs, t0, t1 | t2);
155
break;
156
case INDEX_op_xor_i32:
157
t0 = *tb_ptr++;
158
- t1 = tci_read_r32(regs, &tb_ptr);
159
- t2 = tci_read_r32(regs, &tb_ptr);
160
+ t1 = tci_read_r(regs, &tb_ptr);
161
+ t2 = tci_read_r(regs, &tb_ptr);
162
tci_write_reg(regs, t0, t1 ^ t2);
163
break;
164
165
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
166
167
case INDEX_op_shl_i32:
168
t0 = *tb_ptr++;
169
- t1 = tci_read_r32(regs, &tb_ptr);
170
- t2 = tci_read_r32(regs, &tb_ptr);
171
- tci_write_reg(regs, t0, t1 << (t2 & 31));
172
+ t1 = tci_read_r(regs, &tb_ptr);
173
+ t2 = tci_read_r(regs, &tb_ptr);
174
+ tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31));
175
break;
176
case INDEX_op_shr_i32:
177
t0 = *tb_ptr++;
178
- t1 = tci_read_r32(regs, &tb_ptr);
179
- t2 = tci_read_r32(regs, &tb_ptr);
180
- tci_write_reg(regs, t0, t1 >> (t2 & 31));
181
+ t1 = tci_read_r(regs, &tb_ptr);
182
+ t2 = tci_read_r(regs, &tb_ptr);
183
+ tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31));
184
break;
185
case INDEX_op_sar_i32:
186
t0 = *tb_ptr++;
187
- t1 = tci_read_r32(regs, &tb_ptr);
188
- t2 = tci_read_r32(regs, &tb_ptr);
189
- tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
190
+ t1 = tci_read_r(regs, &tb_ptr);
191
+ t2 = tci_read_r(regs, &tb_ptr);
192
+ tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31));
193
break;
194
#if TCG_TARGET_HAS_rot_i32
195
case INDEX_op_rotl_i32:
196
t0 = *tb_ptr++;
197
- t1 = tci_read_r32(regs, &tb_ptr);
198
- t2 = tci_read_r32(regs, &tb_ptr);
199
+ t1 = tci_read_r(regs, &tb_ptr);
200
+ t2 = tci_read_r(regs, &tb_ptr);
201
tci_write_reg(regs, t0, rol32(t1, t2 & 31));
202
break;
203
case INDEX_op_rotr_i32:
204
t0 = *tb_ptr++;
205
- t1 = tci_read_r32(regs, &tb_ptr);
206
- t2 = tci_read_r32(regs, &tb_ptr);
207
+ t1 = tci_read_r(regs, &tb_ptr);
208
+ t2 = tci_read_r(regs, &tb_ptr);
209
tci_write_reg(regs, t0, ror32(t1, t2 & 31));
210
break;
211
#endif
212
#if TCG_TARGET_HAS_deposit_i32
213
case INDEX_op_deposit_i32:
214
t0 = *tb_ptr++;
215
- t1 = tci_read_r32(regs, &tb_ptr);
216
- t2 = tci_read_r32(regs, &tb_ptr);
217
+ t1 = tci_read_r(regs, &tb_ptr);
218
+ t2 = tci_read_r(regs, &tb_ptr);
219
tmp16 = *tb_ptr++;
220
tmp8 = *tb_ptr++;
221
tmp32 = (((1 << tmp8) - 1) << tmp16);
222
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
223
break;
224
#endif
225
case INDEX_op_brcond_i32:
226
- t0 = tci_read_r32(regs, &tb_ptr);
227
- t1 = tci_read_r32(regs, &tb_ptr);
228
+ t0 = tci_read_r(regs, &tb_ptr);
229
+ t1 = tci_read_r(regs, &tb_ptr);
230
condition = *tb_ptr++;
231
label = tci_read_label(&tb_ptr);
232
if (tci_compare32(t0, t1, condition)) {
233
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
234
case INDEX_op_mulu2_i32:
235
t0 = *tb_ptr++;
236
t1 = *tb_ptr++;
237
- t2 = tci_read_r32(regs, &tb_ptr);
238
- tmp64 = tci_read_r32(regs, &tb_ptr);
239
- tci_write_reg64(regs, t1, t0, t2 * tmp64);
240
+ t2 = tci_read_r(regs, &tb_ptr);
241
+ tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr);
242
+ tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
243
break;
244
#endif /* TCG_TARGET_REG_BITS == 32 */
245
#if TCG_TARGET_HAS_ext8s_i32
246
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
247
#if TCG_TARGET_HAS_bswap32_i32
248
case INDEX_op_bswap32_i32:
249
t0 = *tb_ptr++;
250
- t1 = tci_read_r32(regs, &tb_ptr);
251
+ t1 = tci_read_r(regs, &tb_ptr);
252
tci_write_reg(regs, t0, bswap32(t1));
253
break;
254
#endif
255
#if TCG_TARGET_HAS_not_i32
256
case INDEX_op_not_i32:
257
t0 = *tb_ptr++;
258
- t1 = tci_read_r32(regs, &tb_ptr);
259
+ t1 = tci_read_r(regs, &tb_ptr);
260
tci_write_reg(regs, t0, ~t1);
261
break;
262
#endif
263
#if TCG_TARGET_HAS_neg_i32
264
case INDEX_op_neg_i32:
265
t0 = *tb_ptr++;
266
- t1 = tci_read_r32(regs, &tb_ptr);
267
+ t1 = tci_read_r(regs, &tb_ptr);
268
tci_write_reg(regs, t0, -t1);
269
break;
270
#endif
271
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
272
#endif
273
case INDEX_op_extu_i32_i64:
274
t0 = *tb_ptr++;
275
- t1 = tci_read_r32(regs, &tb_ptr);
276
- tci_write_reg(regs, t0, t1);
277
+ t1 = tci_read_r(regs, &tb_ptr);
278
+ tci_write_reg(regs, t0, (uint32_t)t1);
279
break;
280
#if TCG_TARGET_HAS_bswap16_i64
281
case INDEX_op_bswap16_i64:
282
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
283
#if TCG_TARGET_HAS_bswap32_i64
284
case INDEX_op_bswap32_i64:
285
t0 = *tb_ptr++;
286
- t1 = tci_read_r32(regs, &tb_ptr);
287
+ t1 = tci_read_r(regs, &tb_ptr);
288
tci_write_reg(regs, t0, bswap32(t1));
289
break;
290
#endif
132
--
291
--
133
2.25.1
292
2.25.1
134
293
135
294
diff view generated by jsdifflib
New patch
1
Use explicit casts for ext32s opcodes.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tci.c | 20 ++------------------
7
1 file changed, 2 insertions(+), 18 deletions(-)
8
9
diff --git a/tcg/tci.c b/tcg/tci.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tci.c
12
+++ b/tcg/tci.c
13
@@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
14
return regs[index];
15
}
16
17
-#if TCG_TARGET_REG_BITS == 64
18
-static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
19
-{
20
- return (int32_t)tci_read_reg(regs, index);
21
-}
22
-#endif
23
-
24
#if TCG_TARGET_REG_BITS == 64
25
static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
26
{
27
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs,
28
return tci_uint64(tci_read_r(regs, tb_ptr), low);
29
}
30
#elif TCG_TARGET_REG_BITS == 64
31
-/* Read indexed register (32 bit signed) from bytecode. */
32
-static int32_t tci_read_r32s(const tcg_target_ulong *regs,
33
- const uint8_t **tb_ptr)
34
-{
35
- int32_t value = tci_read_reg32s(regs, **tb_ptr);
36
- *tb_ptr += 1;
37
- return value;
38
-}
39
-
40
/* Read indexed register (64 bit) from bytecode. */
41
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
42
const uint8_t **tb_ptr)
43
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
44
#endif
45
case INDEX_op_ext_i32_i64:
46
t0 = *tb_ptr++;
47
- t1 = tci_read_r32s(regs, &tb_ptr);
48
- tci_write_reg(regs, t0, t1);
49
+ t1 = tci_read_r(regs, &tb_ptr);
50
+ tci_write_reg(regs, t0, (int32_t)t1);
51
break;
52
#if TCG_TARGET_HAS_ext32u_i64
53
case INDEX_op_ext32u_i64:
54
--
55
2.25.1
56
57
diff view generated by jsdifflib
New patch
1
1
In all cases restricted to 64-bit hosts, tcg_read_r is
2
identical. We retain the 64-bit symbol for the single
3
case of INDEX_op_qemu_st_i64.
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/tci.c | 93 +++++++++++++++++++++++++------------------------------
9
1 file changed, 42 insertions(+), 51 deletions(-)
10
11
diff --git a/tcg/tci.c b/tcg/tci.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tci.c
14
+++ b/tcg/tci.c
15
@@ -XXX,XX +XXX,XX @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
16
return regs[index];
17
}
18
19
-#if TCG_TARGET_REG_BITS == 64
20
-static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
21
-{
22
- return tci_read_reg(regs, index);
23
-}
24
-#endif
25
-
26
static void
27
tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
28
{
29
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs,
30
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
31
const uint8_t **tb_ptr)
32
{
33
- uint64_t value = tci_read_reg64(regs, **tb_ptr);
34
- *tb_ptr += 1;
35
- return value;
36
+ return tci_read_r(regs, tb_ptr);
37
}
38
#endif
39
40
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
41
#elif TCG_TARGET_REG_BITS == 64
42
case INDEX_op_setcond_i64:
43
t0 = *tb_ptr++;
44
- t1 = tci_read_r64(regs, &tb_ptr);
45
- t2 = tci_read_r64(regs, &tb_ptr);
46
+ t1 = tci_read_r(regs, &tb_ptr);
47
+ t2 = tci_read_r(regs, &tb_ptr);
48
condition = *tb_ptr++;
49
tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
50
break;
51
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
52
#if TCG_TARGET_REG_BITS == 64
53
case INDEX_op_mov_i64:
54
t0 = *tb_ptr++;
55
- t1 = tci_read_r64(regs, &tb_ptr);
56
+ t1 = tci_read_r(regs, &tb_ptr);
57
tci_write_reg(regs, t0, t1);
58
break;
59
case INDEX_op_tci_movi_i64:
60
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
61
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
62
break;
63
case INDEX_op_st_i64:
64
- t0 = tci_read_r64(regs, &tb_ptr);
65
+ t0 = tci_read_r(regs, &tb_ptr);
66
t1 = tci_read_r(regs, &tb_ptr);
67
t2 = tci_read_s32(&tb_ptr);
68
*(uint64_t *)(t1 + t2) = t0;
69
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
70
71
case INDEX_op_add_i64:
72
t0 = *tb_ptr++;
73
- t1 = tci_read_r64(regs, &tb_ptr);
74
- t2 = tci_read_r64(regs, &tb_ptr);
75
+ t1 = tci_read_r(regs, &tb_ptr);
76
+ t2 = tci_read_r(regs, &tb_ptr);
77
tci_write_reg(regs, t0, t1 + t2);
78
break;
79
case INDEX_op_sub_i64:
80
t0 = *tb_ptr++;
81
- t1 = tci_read_r64(regs, &tb_ptr);
82
- t2 = tci_read_r64(regs, &tb_ptr);
83
+ t1 = tci_read_r(regs, &tb_ptr);
84
+ t2 = tci_read_r(regs, &tb_ptr);
85
tci_write_reg(regs, t0, t1 - t2);
86
break;
87
case INDEX_op_mul_i64:
88
t0 = *tb_ptr++;
89
- t1 = tci_read_r64(regs, &tb_ptr);
90
- t2 = tci_read_r64(regs, &tb_ptr);
91
+ t1 = tci_read_r(regs, &tb_ptr);
92
+ t2 = tci_read_r(regs, &tb_ptr);
93
tci_write_reg(regs, t0, t1 * t2);
94
break;
95
case INDEX_op_div_i64:
96
t0 = *tb_ptr++;
97
- t1 = tci_read_r64(regs, &tb_ptr);
98
- t2 = tci_read_r64(regs, &tb_ptr);
99
+ t1 = tci_read_r(regs, &tb_ptr);
100
+ t2 = tci_read_r(regs, &tb_ptr);
101
tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
102
break;
103
case INDEX_op_divu_i64:
104
t0 = *tb_ptr++;
105
- t1 = tci_read_r64(regs, &tb_ptr);
106
- t2 = tci_read_r64(regs, &tb_ptr);
107
+ t1 = tci_read_r(regs, &tb_ptr);
108
+ t2 = tci_read_r(regs, &tb_ptr);
109
tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
110
break;
111
case INDEX_op_rem_i64:
112
t0 = *tb_ptr++;
113
- t1 = tci_read_r64(regs, &tb_ptr);
114
- t2 = tci_read_r64(regs, &tb_ptr);
115
+ t1 = tci_read_r(regs, &tb_ptr);
116
+ t2 = tci_read_r(regs, &tb_ptr);
117
tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
118
break;
119
case INDEX_op_remu_i64:
120
t0 = *tb_ptr++;
121
- t1 = tci_read_r64(regs, &tb_ptr);
122
- t2 = tci_read_r64(regs, &tb_ptr);
123
+ t1 = tci_read_r(regs, &tb_ptr);
124
+ t2 = tci_read_r(regs, &tb_ptr);
125
tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
126
break;
127
case INDEX_op_and_i64:
128
t0 = *tb_ptr++;
129
- t1 = tci_read_r64(regs, &tb_ptr);
130
- t2 = tci_read_r64(regs, &tb_ptr);
131
+ t1 = tci_read_r(regs, &tb_ptr);
132
+ t2 = tci_read_r(regs, &tb_ptr);
133
tci_write_reg(regs, t0, t1 & t2);
134
break;
135
case INDEX_op_or_i64:
136
t0 = *tb_ptr++;
137
- t1 = tci_read_r64(regs, &tb_ptr);
138
- t2 = tci_read_r64(regs, &tb_ptr);
139
+ t1 = tci_read_r(regs, &tb_ptr);
140
+ t2 = tci_read_r(regs, &tb_ptr);
141
tci_write_reg(regs, t0, t1 | t2);
142
break;
143
case INDEX_op_xor_i64:
144
t0 = *tb_ptr++;
145
- t1 = tci_read_r64(regs, &tb_ptr);
146
- t2 = tci_read_r64(regs, &tb_ptr);
147
+ t1 = tci_read_r(regs, &tb_ptr);
148
+ t2 = tci_read_r(regs, &tb_ptr);
149
tci_write_reg(regs, t0, t1 ^ t2);
150
break;
151
152
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
153
154
case INDEX_op_shl_i64:
155
t0 = *tb_ptr++;
156
- t1 = tci_read_r64(regs, &tb_ptr);
157
- t2 = tci_read_r64(regs, &tb_ptr);
158
+ t1 = tci_read_r(regs, &tb_ptr);
159
+ t2 = tci_read_r(regs, &tb_ptr);
160
tci_write_reg(regs, t0, t1 << (t2 & 63));
161
break;
162
case INDEX_op_shr_i64:
163
t0 = *tb_ptr++;
164
- t1 = tci_read_r64(regs, &tb_ptr);
165
- t2 = tci_read_r64(regs, &tb_ptr);
166
+ t1 = tci_read_r(regs, &tb_ptr);
167
+ t2 = tci_read_r(regs, &tb_ptr);
168
tci_write_reg(regs, t0, t1 >> (t2 & 63));
169
break;
170
case INDEX_op_sar_i64:
171
t0 = *tb_ptr++;
172
- t1 = tci_read_r64(regs, &tb_ptr);
173
- t2 = tci_read_r64(regs, &tb_ptr);
174
+ t1 = tci_read_r(regs, &tb_ptr);
175
+ t2 = tci_read_r(regs, &tb_ptr);
176
tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
177
break;
178
#if TCG_TARGET_HAS_rot_i64
179
case INDEX_op_rotl_i64:
180
t0 = *tb_ptr++;
181
- t1 = tci_read_r64(regs, &tb_ptr);
182
- t2 = tci_read_r64(regs, &tb_ptr);
183
+ t1 = tci_read_r(regs, &tb_ptr);
184
+ t2 = tci_read_r(regs, &tb_ptr);
185
tci_write_reg(regs, t0, rol64(t1, t2 & 63));
186
break;
187
case INDEX_op_rotr_i64:
188
t0 = *tb_ptr++;
189
- t1 = tci_read_r64(regs, &tb_ptr);
190
- t2 = tci_read_r64(regs, &tb_ptr);
191
+ t1 = tci_read_r(regs, &tb_ptr);
192
+ t2 = tci_read_r(regs, &tb_ptr);
193
tci_write_reg(regs, t0, ror64(t1, t2 & 63));
194
break;
195
#endif
196
#if TCG_TARGET_HAS_deposit_i64
197
case INDEX_op_deposit_i64:
198
t0 = *tb_ptr++;
199
- t1 = tci_read_r64(regs, &tb_ptr);
200
- t2 = tci_read_r64(regs, &tb_ptr);
201
+ t1 = tci_read_r(regs, &tb_ptr);
202
+ t2 = tci_read_r(regs, &tb_ptr);
203
tmp16 = *tb_ptr++;
204
tmp8 = *tb_ptr++;
205
tmp64 = (((1ULL << tmp8) - 1) << tmp16);
206
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
207
break;
208
#endif
209
case INDEX_op_brcond_i64:
210
- t0 = tci_read_r64(regs, &tb_ptr);
211
- t1 = tci_read_r64(regs, &tb_ptr);
212
+ t0 = tci_read_r(regs, &tb_ptr);
213
+ t1 = tci_read_r(regs, &tb_ptr);
214
condition = *tb_ptr++;
215
label = tci_read_label(&tb_ptr);
216
if (tci_compare64(t0, t1, condition)) {
217
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
218
#if TCG_TARGET_HAS_bswap64_i64
219
case INDEX_op_bswap64_i64:
220
t0 = *tb_ptr++;
221
- t1 = tci_read_r64(regs, &tb_ptr);
222
+ t1 = tci_read_r(regs, &tb_ptr);
223
tci_write_reg(regs, t0, bswap64(t1));
224
break;
225
#endif
226
#if TCG_TARGET_HAS_not_i64
227
case INDEX_op_not_i64:
228
t0 = *tb_ptr++;
229
- t1 = tci_read_r64(regs, &tb_ptr);
230
+ t1 = tci_read_r(regs, &tb_ptr);
231
tci_write_reg(regs, t0, ~t1);
232
break;
233
#endif
234
#if TCG_TARGET_HAS_neg_i64
235
case INDEX_op_neg_i64:
236
t0 = *tb_ptr++;
237
- t1 = tci_read_r64(regs, &tb_ptr);
238
+ t1 = tci_read_r(regs, &tb_ptr);
239
tci_write_reg(regs, t0, -t1);
240
break;
241
#endif
242
--
243
2.25.1
244
245
diff view generated by jsdifflib
New patch
1
This includes add, sub, mul, and, or, xor.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tci.c | 83 +++++++++++++++++--------------------------------------
7
1 file changed, 25 insertions(+), 58 deletions(-)
8
9
diff --git a/tcg/tci.c b/tcg/tci.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tci.c
12
+++ b/tcg/tci.c
13
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
14
*(uint32_t *)(t1 + t2) = t0;
15
break;
16
17
- /* Arithmetic operations (32 bit). */
18
+ /* Arithmetic operations (mixed 32/64 bit). */
19
20
- case INDEX_op_add_i32:
21
+ CASE_32_64(add)
22
t0 = *tb_ptr++;
23
t1 = tci_read_r(regs, &tb_ptr);
24
t2 = tci_read_r(regs, &tb_ptr);
25
tci_write_reg(regs, t0, t1 + t2);
26
break;
27
- case INDEX_op_sub_i32:
28
+ CASE_32_64(sub)
29
t0 = *tb_ptr++;
30
t1 = tci_read_r(regs, &tb_ptr);
31
t2 = tci_read_r(regs, &tb_ptr);
32
tci_write_reg(regs, t0, t1 - t2);
33
break;
34
- case INDEX_op_mul_i32:
35
+ CASE_32_64(mul)
36
t0 = *tb_ptr++;
37
t1 = tci_read_r(regs, &tb_ptr);
38
t2 = tci_read_r(regs, &tb_ptr);
39
tci_write_reg(regs, t0, t1 * t2);
40
break;
41
+ CASE_32_64(and)
42
+ t0 = *tb_ptr++;
43
+ t1 = tci_read_r(regs, &tb_ptr);
44
+ t2 = tci_read_r(regs, &tb_ptr);
45
+ tci_write_reg(regs, t0, t1 & t2);
46
+ break;
47
+ CASE_32_64(or)
48
+ t0 = *tb_ptr++;
49
+ t1 = tci_read_r(regs, &tb_ptr);
50
+ t2 = tci_read_r(regs, &tb_ptr);
51
+ tci_write_reg(regs, t0, t1 | t2);
52
+ break;
53
+ CASE_32_64(xor)
54
+ t0 = *tb_ptr++;
55
+ t1 = tci_read_r(regs, &tb_ptr);
56
+ t2 = tci_read_r(regs, &tb_ptr);
57
+ tci_write_reg(regs, t0, t1 ^ t2);
58
+ break;
59
+
60
+ /* Arithmetic operations (32 bit). */
61
+
62
case INDEX_op_div_i32:
63
t0 = *tb_ptr++;
64
t1 = tci_read_r(regs, &tb_ptr);
65
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
66
t2 = tci_read_r(regs, &tb_ptr);
67
tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
68
break;
69
- case INDEX_op_and_i32:
70
- t0 = *tb_ptr++;
71
- t1 = tci_read_r(regs, &tb_ptr);
72
- t2 = tci_read_r(regs, &tb_ptr);
73
- tci_write_reg(regs, t0, t1 & t2);
74
- break;
75
- case INDEX_op_or_i32:
76
- t0 = *tb_ptr++;
77
- t1 = tci_read_r(regs, &tb_ptr);
78
- t2 = tci_read_r(regs, &tb_ptr);
79
- tci_write_reg(regs, t0, t1 | t2);
80
- break;
81
- case INDEX_op_xor_i32:
82
- t0 = *tb_ptr++;
83
- t1 = tci_read_r(regs, &tb_ptr);
84
- t2 = tci_read_r(regs, &tb_ptr);
85
- tci_write_reg(regs, t0, t1 ^ t2);
86
- break;
87
88
/* Shift/rotate operations (32 bit). */
89
90
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
91
92
/* Arithmetic operations (64 bit). */
93
94
- case INDEX_op_add_i64:
95
- t0 = *tb_ptr++;
96
- t1 = tci_read_r(regs, &tb_ptr);
97
- t2 = tci_read_r(regs, &tb_ptr);
98
- tci_write_reg(regs, t0, t1 + t2);
99
- break;
100
- case INDEX_op_sub_i64:
101
- t0 = *tb_ptr++;
102
- t1 = tci_read_r(regs, &tb_ptr);
103
- t2 = tci_read_r(regs, &tb_ptr);
104
- tci_write_reg(regs, t0, t1 - t2);
105
- break;
106
- case INDEX_op_mul_i64:
107
- t0 = *tb_ptr++;
108
- t1 = tci_read_r(regs, &tb_ptr);
109
- t2 = tci_read_r(regs, &tb_ptr);
110
- tci_write_reg(regs, t0, t1 * t2);
111
- break;
112
case INDEX_op_div_i64:
113
t0 = *tb_ptr++;
114
t1 = tci_read_r(regs, &tb_ptr);
115
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
116
t2 = tci_read_r(regs, &tb_ptr);
117
tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
118
break;
119
- case INDEX_op_and_i64:
120
- t0 = *tb_ptr++;
121
- t1 = tci_read_r(regs, &tb_ptr);
122
- t2 = tci_read_r(regs, &tb_ptr);
123
- tci_write_reg(regs, t0, t1 & t2);
124
- break;
125
- case INDEX_op_or_i64:
126
- t0 = *tb_ptr++;
127
- t1 = tci_read_r(regs, &tb_ptr);
128
- t2 = tci_read_r(regs, &tb_ptr);
129
- tci_write_reg(regs, t0, t1 | t2);
130
- break;
131
- case INDEX_op_xor_i64:
132
- t0 = *tb_ptr++;
133
- t1 = tci_read_r(regs, &tb_ptr);
134
- t2 = tci_read_r(regs, &tb_ptr);
135
- tci_write_reg(regs, t0, t1 ^ t2);
136
- break;
137
138
/* Shift/rotate operations (64 bit). */
139
140
--
141
2.25.1
142
143
diff view generated by jsdifflib
1
The cmp_vec opcode is mandatory; this symbol is unused.
1
This includes ext8s, ext8u, ext16s, ext16u.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/aarch64/tcg-target.h | 1 -
6
tcg/tci.c | 44 ++++++++------------------------------------
7
tcg/i386/tcg-target.h | 1 -
7
1 file changed, 8 insertions(+), 36 deletions(-)
8
tcg/ppc/tcg-target.h | 1 -
9
3 files changed, 3 deletions(-)
10
8
11
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/tcg/tci.c b/tcg/tci.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/aarch64/tcg-target.h
11
--- a/tcg/tci.c
14
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/tcg/tci.c
15
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
16
#define TCG_TARGET_HAS_shi_vec 1
14
tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
17
#define TCG_TARGET_HAS_shs_vec 0
15
break;
18
#define TCG_TARGET_HAS_shv_vec 1
16
#endif /* TCG_TARGET_REG_BITS == 32 */
19
-#define TCG_TARGET_HAS_cmp_vec 1
17
-#if TCG_TARGET_HAS_ext8s_i32
20
#define TCG_TARGET_HAS_mul_vec 1
18
- case INDEX_op_ext8s_i32:
21
#define TCG_TARGET_HAS_sat_vec 1
19
+#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
22
#define TCG_TARGET_HAS_minmax_vec 1
20
+ CASE_32_64(ext8s)
23
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
21
t0 = *tb_ptr++;
24
index XXXXXXX..XXXXXXX 100644
22
t1 = tci_read_r(regs, &tb_ptr);
25
--- a/tcg/i386/tcg-target.h
23
tci_write_reg(regs, t0, (int8_t)t1);
26
+++ b/tcg/i386/tcg-target.h
24
break;
27
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
25
#endif
28
#define TCG_TARGET_HAS_shi_vec 1
26
-#if TCG_TARGET_HAS_ext16s_i32
29
#define TCG_TARGET_HAS_shs_vec 1
27
- case INDEX_op_ext16s_i32:
30
#define TCG_TARGET_HAS_shv_vec have_avx2
28
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
31
-#define TCG_TARGET_HAS_cmp_vec 1
29
+ CASE_32_64(ext16s)
32
#define TCG_TARGET_HAS_mul_vec 1
30
t0 = *tb_ptr++;
33
#define TCG_TARGET_HAS_sat_vec 1
31
t1 = tci_read_r(regs, &tb_ptr);
34
#define TCG_TARGET_HAS_minmax_vec 1
32
tci_write_reg(regs, t0, (int16_t)t1);
35
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
33
break;
36
index XXXXXXX..XXXXXXX 100644
34
#endif
37
--- a/tcg/ppc/tcg-target.h
35
-#if TCG_TARGET_HAS_ext8u_i32
38
+++ b/tcg/ppc/tcg-target.h
36
- case INDEX_op_ext8u_i32:
39
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
37
+#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
40
#define TCG_TARGET_HAS_shi_vec 0
38
+ CASE_32_64(ext8u)
41
#define TCG_TARGET_HAS_shs_vec 0
39
t0 = *tb_ptr++;
42
#define TCG_TARGET_HAS_shv_vec 1
40
t1 = tci_read_r(regs, &tb_ptr);
43
-#define TCG_TARGET_HAS_cmp_vec 1
41
tci_write_reg(regs, t0, (uint8_t)t1);
44
#define TCG_TARGET_HAS_mul_vec 1
42
break;
45
#define TCG_TARGET_HAS_sat_vec 1
43
#endif
46
#define TCG_TARGET_HAS_minmax_vec 1
44
-#if TCG_TARGET_HAS_ext16u_i32
45
- case INDEX_op_ext16u_i32:
46
+#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
47
+ CASE_32_64(ext16u)
48
t0 = *tb_ptr++;
49
t1 = tci_read_r(regs, &tb_ptr);
50
tci_write_reg(regs, t0, (uint16_t)t1);
51
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
52
continue;
53
}
54
break;
55
-#if TCG_TARGET_HAS_ext8u_i64
56
- case INDEX_op_ext8u_i64:
57
- t0 = *tb_ptr++;
58
- t1 = tci_read_r(regs, &tb_ptr);
59
- tci_write_reg(regs, t0, (uint8_t)t1);
60
- break;
61
-#endif
62
-#if TCG_TARGET_HAS_ext8s_i64
63
- case INDEX_op_ext8s_i64:
64
- t0 = *tb_ptr++;
65
- t1 = tci_read_r(regs, &tb_ptr);
66
- tci_write_reg(regs, t0, (int8_t)t1);
67
- break;
68
-#endif
69
-#if TCG_TARGET_HAS_ext16s_i64
70
- case INDEX_op_ext16s_i64:
71
- t0 = *tb_ptr++;
72
- t1 = tci_read_r(regs, &tb_ptr);
73
- tci_write_reg(regs, t0, (int16_t)t1);
74
- break;
75
-#endif
76
-#if TCG_TARGET_HAS_ext16u_i64
77
- case INDEX_op_ext16u_i64:
78
- t0 = *tb_ptr++;
79
- t1 = tci_read_r(regs, &tb_ptr);
80
- tci_write_reg(regs, t0, (uint16_t)t1);
81
- break;
82
-#endif
83
#if TCG_TARGET_HAS_ext32s_i64
84
case INDEX_op_ext32s_i64:
85
#endif
47
--
86
--
48
2.25.1
87
2.25.1
49
88
50
89
diff view generated by jsdifflib
1
The last user of this field disappeared in f69d277ece4.
1
This includes bswap16 and bswap32.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
include/tcg/tcg.h | 3 ---
6
tcg/tci.c | 22 ++++------------------
7
1 file changed, 3 deletions(-)
7
1 file changed, 4 insertions(+), 18 deletions(-)
8
8
9
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
diff --git a/tcg/tci.c b/tcg/tci.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/tcg/tcg.h
11
--- a/tcg/tci.c
12
+++ b/include/tcg/tcg.h
12
+++ b/tcg/tci.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef {
13
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
14
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
14
tci_write_reg(regs, t0, (uint16_t)t1);
15
uint8_t flags;
15
break;
16
TCGArgConstraint *args_ct;
16
#endif
17
-#if defined(CONFIG_DEBUG_TCG)
17
-#if TCG_TARGET_HAS_bswap16_i32
18
- int used;
18
- case INDEX_op_bswap16_i32:
19
+#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
20
+ CASE_32_64(bswap16)
21
t0 = *tb_ptr++;
22
t1 = tci_read_r(regs, &tb_ptr);
23
tci_write_reg(regs, t0, bswap16(t1));
24
break;
25
#endif
26
-#if TCG_TARGET_HAS_bswap32_i32
27
- case INDEX_op_bswap32_i32:
28
+#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
29
+ CASE_32_64(bswap32)
30
t0 = *tb_ptr++;
31
t1 = tci_read_r(regs, &tb_ptr);
32
tci_write_reg(regs, t0, bswap32(t1));
33
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
34
t1 = tci_read_r(regs, &tb_ptr);
35
tci_write_reg(regs, t0, (uint32_t)t1);
36
break;
37
-#if TCG_TARGET_HAS_bswap16_i64
38
- case INDEX_op_bswap16_i64:
39
- t0 = *tb_ptr++;
40
- t1 = tci_read_r(regs, &tb_ptr);
41
- tci_write_reg(regs, t0, bswap16(t1));
42
- break;
19
-#endif
43
-#endif
20
} TCGOpDef;
44
-#if TCG_TARGET_HAS_bswap32_i64
21
45
- case INDEX_op_bswap32_i64:
22
extern TCGOpDef tcg_op_defs[];
46
- t0 = *tb_ptr++;
47
- t1 = tci_read_r(regs, &tb_ptr);
48
- tci_write_reg(regs, t0, bswap32(t1));
49
- break;
50
-#endif
51
#if TCG_TARGET_HAS_bswap64_i64
52
case INDEX_op_bswap64_i64:
53
t0 = *tb_ptr++;
23
--
54
--
24
2.25.1
55
2.25.1
25
56
26
57
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tci.c | 29 +++++------------------------
5
1 file changed, 5 insertions(+), 24 deletions(-)
1
6
7
diff --git a/tcg/tci.c b/tcg/tci.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tci.c
10
+++ b/tcg/tci.c
11
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
12
tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
13
break;
14
#endif
15
- case INDEX_op_mov_i32:
16
+ CASE_32_64(mov)
17
t0 = *tb_ptr++;
18
t1 = tci_read_r(regs, &tb_ptr);
19
tci_write_reg(regs, t0, t1);
20
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
21
tci_write_reg(regs, t0, bswap32(t1));
22
break;
23
#endif
24
-#if TCG_TARGET_HAS_not_i32
25
- case INDEX_op_not_i32:
26
+#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
27
+ CASE_32_64(not)
28
t0 = *tb_ptr++;
29
t1 = tci_read_r(regs, &tb_ptr);
30
tci_write_reg(regs, t0, ~t1);
31
break;
32
#endif
33
-#if TCG_TARGET_HAS_neg_i32
34
- case INDEX_op_neg_i32:
35
+#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
36
+ CASE_32_64(neg)
37
t0 = *tb_ptr++;
38
t1 = tci_read_r(regs, &tb_ptr);
39
tci_write_reg(regs, t0, -t1);
40
break;
41
#endif
42
#if TCG_TARGET_REG_BITS == 64
43
- case INDEX_op_mov_i64:
44
- t0 = *tb_ptr++;
45
- t1 = tci_read_r(regs, &tb_ptr);
46
- tci_write_reg(regs, t0, t1);
47
- break;
48
case INDEX_op_tci_movi_i64:
49
t0 = *tb_ptr++;
50
t1 = tci_read_i64(&tb_ptr);
51
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
52
tci_write_reg(regs, t0, bswap64(t1));
53
break;
54
#endif
55
-#if TCG_TARGET_HAS_not_i64
56
- case INDEX_op_not_i64:
57
- t0 = *tb_ptr++;
58
- t1 = tci_read_r(regs, &tb_ptr);
59
- tci_write_reg(regs, t0, ~t1);
60
- break;
61
-#endif
62
-#if TCG_TARGET_HAS_neg_i64
63
- case INDEX_op_neg_i64:
64
- t0 = *tb_ptr++;
65
- t1 = tci_read_r(regs, &tb_ptr);
66
- tci_write_reg(regs, t0, -t1);
67
- break;
68
-#endif
69
#endif /* TCG_TARGET_REG_BITS == 64 */
70
71
/* QEMU specific operations. */
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
New patch
1
From: Alex Bennée <alex.bennee@linaro.org>
1
2
3
Having a function return either and valid TB and some system state
4
seems excessive. It will make the subsequent re-factoring easier if we
5
lookup the current state where we are.
6
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Message-Id: <20210224165811.11567-2-alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/tb-lookup.h | 18 ++++++++----------
12
accel/tcg/cpu-exec.c | 10 ++++++++--
13
accel/tcg/tcg-runtime.c | 4 +++-
14
3 files changed, 19 insertions(+), 13 deletions(-)
15
16
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/tb-lookup.h
19
+++ b/include/exec/tb-lookup.h
20
@@ -XXX,XX +XXX,XX @@
21
#include "exec/tb-hash.h"
22
23
/* Might cause an exception, so have a longjmp destination ready */
24
-static inline TranslationBlock *
25
-tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
26
- uint32_t *flags, uint32_t cf_mask)
27
+static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
28
+ target_ulong cs_base,
29
+ uint32_t flags, uint32_t cf_mask)
30
{
31
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
32
TranslationBlock *tb;
33
uint32_t hash;
34
35
- cpu_get_tb_cpu_state(env, pc, cs_base, flags);
36
- hash = tb_jmp_cache_hash_func(*pc);
37
+ hash = tb_jmp_cache_hash_func(pc);
38
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
39
40
cf_mask &= ~CF_CLUSTER_MASK;
41
cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
42
43
if (likely(tb &&
44
- tb->pc == *pc &&
45
- tb->cs_base == *cs_base &&
46
- tb->flags == *flags &&
47
+ tb->pc == pc &&
48
+ tb->cs_base == cs_base &&
49
+ tb->flags == flags &&
50
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
51
(tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
52
return tb;
53
}
54
- tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
55
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
56
if (tb == NULL) {
57
return NULL;
58
}
59
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/accel/tcg/cpu-exec.c
62
+++ b/accel/tcg/cpu-exec.c
63
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu)
64
65
void cpu_exec_step_atomic(CPUState *cpu)
66
{
67
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
68
TranslationBlock *tb;
69
target_ulong cs_base, pc;
70
uint32_t flags;
71
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
72
g_assert(!cpu->running);
73
cpu->running = true;
74
75
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
76
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
77
+ tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
78
+
79
if (tb == NULL) {
80
mmap_lock();
81
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
82
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu,
83
TranslationBlock *last_tb,
84
int tb_exit, uint32_t cf_mask)
85
{
86
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
87
TranslationBlock *tb;
88
target_ulong cs_base, pc;
89
uint32_t flags;
90
91
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
92
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
93
+
94
+ tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
95
if (tb == NULL) {
96
mmap_lock();
97
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
98
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/accel/tcg/tcg-runtime.c
101
+++ b/accel/tcg/tcg-runtime.c
102
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
103
target_ulong cs_base, pc;
104
uint32_t flags;
105
106
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
107
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
108
+
109
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags());
110
if (tb == NULL) {
111
return tcg_code_gen_epilogue;
112
}
113
--
114
2.25.1
115
116
diff view generated by jsdifflib
1
From: Kele Huang <kele.hwang@gmail.com>
1
From: Alex Bennée <alex.bennee@linaro.org>
2
2
3
Detect all MIPS store instructions in cpu_signal_handler for all available
3
There is nothing special about this compile flag that doesn't mean we
4
MIPS versions, and set is_write if encountering such store instructions.
4
can't just compute it with curr_cflags() which we should be using when
5
building a new set.
5
6
6
This fixed the error while dealing with self-modified code for MIPS.
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
8
Message-Id: <20210224165811.11567-3-alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Kele Huang <kele.hwang@gmail.com>
10
Signed-off-by: Xu Zou <iwatchnima@gmail.com>
11
Message-Id: <20201002081420.10814-1-kele.hwang@gmail.com>
12
[rth: Use uintptr_t for pc to fix n32 build error.]
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
10
---
15
accel/tcg/user-exec.c | 43 +++++++++++++++++++++++++++++++++++++++----
11
include/exec/exec-all.h | 8 +++++---
16
1 file changed, 39 insertions(+), 4 deletions(-)
12
include/exec/tb-lookup.h | 3 ---
13
accel/tcg/cpu-exec.c | 9 ++++-----
14
accel/tcg/tcg-runtime.c | 2 +-
15
accel/tcg/translate-all.c | 6 +++---
16
softmmu/physmem.c | 2 +-
17
6 files changed, 14 insertions(+), 16 deletions(-)
17
18
18
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
19
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
19
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
20
--- a/accel/tcg/user-exec.c
21
--- a/include/exec/exec-all.h
21
+++ b/accel/tcg/user-exec.c
22
+++ b/include/exec/exec-all.h
22
@@ -XXX,XX +XXX,XX @@ int cpu_signal_handler(int host_signum, void *pinfo,
23
@@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
23
24
}
24
#elif defined(__mips__)
25
25
26
/* current cflags for hashing/comparison */
26
+#if defined(__misp16) || defined(__mips_micromips)
27
-static inline uint32_t curr_cflags(void)
27
+#error "Unsupported encoding"
28
+static inline uint32_t curr_cflags(CPUState *cpu)
28
+#endif
29
+
30
int cpu_signal_handler(int host_signum, void *pinfo,
31
void *puc)
32
{
29
{
33
siginfo_t *info = pinfo;
30
- return (parallel_cpus ? CF_PARALLEL : 0)
34
ucontext_t *uc = puc;
31
- | (icount_enabled() ? CF_USE_ICOUNT : 0);
35
- greg_t pc = uc->uc_mcontext.pc;
32
+ uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index);
36
- int is_write;
33
+ cflags |= parallel_cpus ? CF_PARALLEL : 0;
37
+ uintptr_t pc = uc->uc_mcontext.pc;
34
+ cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
38
+ uint32_t insn = *(uint32_t *)pc;
35
+ return cflags;
39
+ int is_write = 0;
40
+
41
+ /* Detect all store instructions at program counter. */
42
+ switch((insn >> 26) & 077) {
43
+ case 050: /* SB */
44
+ case 051: /* SH */
45
+ case 052: /* SWL */
46
+ case 053: /* SW */
47
+ case 054: /* SDL */
48
+ case 055: /* SDR */
49
+ case 056: /* SWR */
50
+ case 070: /* SC */
51
+ case 071: /* SWC1 */
52
+ case 074: /* SCD */
53
+ case 075: /* SDC1 */
54
+ case 077: /* SD */
55
+#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
56
+ case 072: /* SWC2 */
57
+ case 076: /* SDC2 */
58
+#endif
59
+ is_write = 1;
60
+ break;
61
+ case 023: /* COP1X */
62
+ /* Required in all versions of MIPS64 since
63
+ MIPS64r1 and subsequent versions of MIPS32r2. */
64
+ switch (insn & 077) {
65
+ case 010: /* SWXC1 */
66
+ case 011: /* SDXC1 */
67
+ case 015: /* SUXC1 */
68
+ is_write = 1;
69
+ }
70
+ break;
71
+ }
72
73
- /* XXX: compute is_write */
74
- is_write = 0;
75
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
76
}
36
}
77
37
38
/* TranslationBlock invalidate API */
39
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/include/exec/tb-lookup.h
42
+++ b/include/exec/tb-lookup.h
43
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
44
hash = tb_jmp_cache_hash_func(pc);
45
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
46
47
- cf_mask &= ~CF_CLUSTER_MASK;
48
- cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
49
-
50
if (likely(tb &&
51
tb->pc == pc &&
52
tb->cs_base == cs_base &&
53
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/accel/tcg/cpu-exec.c
56
+++ b/accel/tcg/cpu-exec.c
57
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
58
TranslationBlock *tb;
59
target_ulong cs_base, pc;
60
uint32_t flags;
61
- uint32_t cflags = 1;
62
- uint32_t cf_mask = cflags & CF_HASH_MASK;
63
+ uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
64
int tb_exit;
65
66
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
67
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
68
cpu->running = true;
69
70
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
71
- tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
72
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
73
74
if (tb == NULL) {
75
mmap_lock();
76
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
77
if (replay_has_exception()
78
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
79
/* Execute just one insn to trigger exception pending in the log */
80
- cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
81
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
82
}
83
#endif
84
return false;
85
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
86
have CF_INVALID set, -1 is a convenient invalid value that
87
does not require tcg headers for cpu_common_reset. */
88
if (cflags == -1) {
89
- cflags = curr_cflags();
90
+ cflags = curr_cflags(cpu);
91
} else {
92
cpu->cflags_next_tb = -1;
93
}
94
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/accel/tcg/tcg-runtime.c
97
+++ b/accel/tcg/tcg-runtime.c
98
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
99
100
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
101
102
- tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags());
103
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
104
if (tb == NULL) {
105
return tcg_code_gen_epilogue;
106
}
107
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/accel/tcg/translate-all.c
110
+++ b/accel/tcg/translate-all.c
111
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
112
if (current_tb_modified) {
113
page_collection_unlock(pages);
114
/* Force execution of one insn next time. */
115
- cpu->cflags_next_tb = 1 | curr_cflags();
116
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
117
mmap_unlock();
118
cpu_loop_exit_noexc(cpu);
119
}
120
@@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
121
#ifdef TARGET_HAS_PRECISE_SMC
122
if (current_tb_modified) {
123
/* Force execution of one insn next time. */
124
- cpu->cflags_next_tb = 1 | curr_cflags();
125
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
126
return true;
127
}
128
#endif
129
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
130
* operations only (which execute after completion) so we don't
131
* double instrument the instruction.
132
*/
133
- cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
134
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
135
136
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
137
"cpu_io_recompile: rewound execution of TB to "
138
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/softmmu/physmem.c
141
+++ b/softmmu/physmem.c
142
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
143
cpu_loop_exit_restore(cpu, ra);
144
} else {
145
/* Force execution of one insn next time. */
146
- cpu->cflags_next_tb = 1 | curr_cflags();
147
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
148
mmap_unlock();
149
if (ra) {
150
cpu_restore_state(cpu, ra, true);
78
--
151
--
79
2.25.1
152
2.25.1
80
153
81
154
diff view generated by jsdifflib
1
The definition of INDEX_op_dupi_vec is that it operates on
1
From: Alex Bennée <alex.bennee@linaro.org>
2
units of tcg_target_ulong -- in this case 32 bits. It does
2
3
not work to use this for a uint64_t value that happens to be
3
We don't really deal in cf_mask most of the time. The one time it's
4
small enough to fit in tcg_target_ulong.
4
relevant is when we want to remove an invalidated TB from the QHT
5
5
lookup. Everywhere else we should be looking up things without
6
Fixes: d2fd745fe8b
6
CF_INVALID set.
7
Fixes: db432672dc5
7
8
Cc: qemu-stable@nongnu.org
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-Id: <20210224165811.11567-4-alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
---
11
tcg/tcg-op-vec.c | 12 ++++++++----
12
include/exec/exec-all.h | 4 +---
12
1 file changed, 8 insertions(+), 4 deletions(-)
13
include/exec/tb-lookup.h | 9 ++++++---
13
14
accel/tcg/cpu-exec.c | 16 ++++++++--------
14
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
15
accel/tcg/tcg-runtime.c | 2 +-
15
index XXXXXXX..XXXXXXX 100644
16
accel/tcg/translate-all.c | 8 +++++---
16
--- a/tcg/tcg-op-vec.c
17
5 files changed, 21 insertions(+), 18 deletions(-)
17
+++ b/tcg/tcg-op-vec.c
18
18
@@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
19
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
19
20
index XXXXXXX..XXXXXXX 100644
20
void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a)
21
--- a/include/exec/exec-all.h
22
+++ b/include/exec/exec-all.h
23
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
24
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
25
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
26
#define CF_CLUSTER_SHIFT 24
27
-/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */
28
-#define CF_HASH_MASK (~CF_INVALID)
29
30
/* Per-vCPU dynamic tracing state used to generate this TB */
31
uint32_t trace_vcpu_dstate;
32
@@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu);
33
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
34
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
35
target_ulong cs_base, uint32_t flags,
36
- uint32_t cf_mask);
37
+ uint32_t cflags);
38
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
39
40
/* GETPC is the true target of the return instruction that we'll execute. */
41
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/exec/tb-lookup.h
44
+++ b/include/exec/tb-lookup.h
45
@@ -XXX,XX +XXX,XX @@
46
/* Might cause an exception, so have a longjmp destination ready */
47
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
48
target_ulong cs_base,
49
- uint32_t flags, uint32_t cf_mask)
50
+ uint32_t flags, uint32_t cflags)
21
{
51
{
22
- if (TCG_TARGET_REG_BITS == 32 && a == deposit64(a, 32, 32, a)) {
52
TranslationBlock *tb;
23
- do_dupi_vec(r, MO_32, a);
53
uint32_t hash;
24
- } else if (TCG_TARGET_REG_BITS == 64 || a == (uint64_t)(int32_t)a) {
54
25
+ if (TCG_TARGET_REG_BITS == 64) {
55
+ /* we should never be trying to look up an INVALID tb */
26
do_dupi_vec(r, MO_64, a);
56
+ tcg_debug_assert(!(cflags & CF_INVALID));
27
+ } else if (a == dup_const(MO_32, a)) {
57
+
28
+ do_dupi_vec(r, MO_32, a);
58
hash = tb_jmp_cache_hash_func(pc);
29
} else {
59
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
30
TCGv_i64 c = tcg_const_i64(a);
60
31
tcg_gen_dup_i64_vec(MO_64, r, c);
61
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
32
@@ -XXX,XX +XXX,XX @@ void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a)
62
tb->cs_base == cs_base &&
33
63
tb->flags == flags &&
34
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
64
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
65
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
66
+ tb_cflags(tb) == cflags)) {
67
return tb;
68
}
69
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask);
70
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
71
if (tb == NULL) {
72
return NULL;
73
}
74
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/accel/tcg/cpu-exec.c
77
+++ b/accel/tcg/cpu-exec.c
78
@@ -XXX,XX +XXX,XX @@ struct tb_desc {
79
CPUArchState *env;
80
tb_page_addr_t phys_page1;
81
uint32_t flags;
82
- uint32_t cf_mask;
83
+ uint32_t cflags;
84
uint32_t trace_vcpu_dstate;
85
};
86
87
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
88
tb->cs_base == desc->cs_base &&
89
tb->flags == desc->flags &&
90
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
91
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
92
+ tb_cflags(tb) == desc->cflags) {
93
/* check next page if needed */
94
if (tb->page_addr[1] == -1) {
95
return true;
96
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
97
98
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
99
target_ulong cs_base, uint32_t flags,
100
- uint32_t cf_mask)
101
+ uint32_t cflags)
35
{
102
{
36
- do_dupi_vec(r, MO_REG, dup_const(vece, a));
103
tb_page_addr_t phys_pc;
37
+ if (vece == MO_64) {
104
struct tb_desc desc;
38
+ tcg_gen_dup64i_vec(r, a);
105
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
39
+ } else {
106
desc.env = (CPUArchState *)cpu->env_ptr;
40
+ do_dupi_vec(r, MO_REG, dup_const(vece, a));
107
desc.cs_base = cs_base;
41
+ }
108
desc.flags = flags;
109
- desc.cf_mask = cf_mask;
110
+ desc.cflags = cflags;
111
desc.trace_vcpu_dstate = *cpu->trace_dstate;
112
desc.pc = pc;
113
phys_pc = get_page_addr_code(desc.env, pc);
114
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
115
return NULL;
116
}
117
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
118
- h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
119
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
120
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
42
}
121
}
43
122
44
void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
123
@@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
124
125
static inline TranslationBlock *tb_find(CPUState *cpu,
126
TranslationBlock *last_tb,
127
- int tb_exit, uint32_t cf_mask)
128
+ int tb_exit, uint32_t cflags)
129
{
130
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
131
TranslationBlock *tb;
132
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu,
133
134
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
135
136
- tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
137
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
138
if (tb == NULL) {
139
mmap_lock();
140
- tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
141
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
142
mmap_unlock();
143
/* We add the TB in the virtual pc hash table for the fast lookup */
144
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
145
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/accel/tcg/tcg-runtime.c
148
+++ b/accel/tcg/tcg-runtime.c
149
@@ -XXX,XX +XXX,XX @@
150
#include "exec/helper-proto.h"
151
#include "exec/cpu_ldst.h"
152
#include "exec/exec-all.h"
153
-#include "exec/tb-lookup.h"
154
#include "disas/disas.h"
155
#include "exec/log.h"
156
#include "tcg/tcg.h"
157
+#include "exec/tb-lookup.h"
158
159
/* 32-bit helpers */
160
161
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/accel/tcg/translate-all.c
164
+++ b/accel/tcg/translate-all.c
165
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
166
return a->pc == b->pc &&
167
a->cs_base == b->cs_base &&
168
a->flags == b->flags &&
169
- (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
170
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
171
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
172
a->page_addr[0] == b->page_addr[0] &&
173
a->page_addr[1] == b->page_addr[1];
174
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
175
PageDesc *p;
176
uint32_t h;
177
tb_page_addr_t phys_pc;
178
+ uint32_t orig_cflags = tb_cflags(tb);
179
180
assert_memory_lock();
181
182
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
183
184
/* remove the TB from the hash list */
185
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
186
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
187
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
188
tb->trace_vcpu_dstate);
189
if (!qht_remove(&tb_ctx.htable, tb, h)) {
190
return;
191
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
192
uint32_t h;
193
194
assert_memory_lock();
195
+ tcg_debug_assert(!(tb->cflags & CF_INVALID));
196
197
/*
198
* Add the TB to the page list, acquiring first the pages's locks.
199
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
200
}
201
202
/* add in the hash table */
203
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
204
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
205
tb->trace_vcpu_dstate);
206
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
207
45
--
208
--
46
2.25.1
209
2.25.1
47
210
48
211
diff view generated by jsdifflib
1
When the two arguments are identical, this can be reduced to
1
From: Alex Bennée <alex.bennee@linaro.org>
2
dup_vec or to mov_vec from a tcg_constant_vec.
3
2
3
Lets make sure all the flags we compare when looking up blocks are
4
together in the same place.
5
6
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Message-Id: <20210224165811.11567-5-alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
9
---
6
tcg/optimize.c | 15 +++++++++++++++
10
include/exec/exec-all.h | 11 ++++++++---
7
1 file changed, 15 insertions(+)
11
1 file changed, 8 insertions(+), 3 deletions(-)
8
12
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
15
--- a/include/exec/exec-all.h
12
+++ b/tcg/optimize.c
16
+++ b/include/exec/exec-all.h
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
17
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
14
}
18
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
15
goto do_default;
19
target_ulong cs_base; /* CS base for this block */
16
20
uint32_t flags; /* flags defining in which context the code was generated */
17
+ case INDEX_op_dup2_vec:
21
- uint16_t size; /* size of target code for this block (1 <=
18
+ assert(TCG_TARGET_REG_BITS == 32);
22
- size <= TARGET_PAGE_SIZE) */
19
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
23
- uint16_t icount;
20
+ tmp = arg_info(op->args[1])->val;
24
uint32_t cflags; /* compile flags */
21
+ if (tmp == arg_info(op->args[2])->val) {
25
#define CF_COUNT_MASK 0x00007fff
22
+ tcg_opt_gen_movi(s, op, op->args[0], tmp);
26
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
23
+ break;
27
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
24
+ }
28
/* Per-vCPU dynamic tracing state used to generate this TB */
25
+ } else if (args_are_copies(op->args[1], op->args[2])) {
29
uint32_t trace_vcpu_dstate;
26
+ op->opc = INDEX_op_dup_vec;
30
27
+ TCGOP_VECE(op) = MO_32;
31
+ /*
28
+ nb_iargs = 1;
32
+ * Above fields used for comparing
29
+ }
33
+ */
30
+ goto do_default;
31
+
34
+
32
CASE_OP_32_64(not):
35
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
33
CASE_OP_32_64(neg):
36
+ uint16_t size;
34
CASE_OP_32_64(ext8s):
37
+ uint16_t icount;
38
+
39
struct tb_tc tc;
40
41
/* first and second physical page containing code. The lower bit
35
--
42
--
36
2.25.1
43
2.25.1
37
44
38
45
diff view generated by jsdifflib
1
This uses an existing hole in the TCGArgConstraint structure
1
The primary motivation is to remove a dozen insns along
2
and will be convenient for keeping the data in one place.
2
the fast-path in tb_lookup. As a byproduct, this allows
3
3
us to completely remove parallel_cpus.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
include/tcg/tcg.h | 2 +-
8
accel/tcg/tcg-accel-ops.h | 1 +
7
tcg/tcg.c | 35 +++++++++++++++++------------------
9
include/exec/exec-all.h | 7 +------
8
2 files changed, 18 insertions(+), 19 deletions(-)
10
include/hw/core/cpu.h | 2 ++
9
11
accel/tcg/cpu-exec.c | 3 ---
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
accel/tcg/tcg-accel-ops-mttcg.c | 3 +--
11
index XXXXXXX..XXXXXXX 100644
13
accel/tcg/tcg-accel-ops-rr.c | 2 +-
12
--- a/include/tcg/tcg.h
14
accel/tcg/tcg-accel-ops.c | 8 ++++++++
13
+++ b/include/tcg/tcg.h
15
accel/tcg/translate-all.c | 4 ----
14
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void);
16
linux-user/main.c | 1 +
15
typedef struct TCGArgConstraint {
17
linux-user/sh4/signal.c | 8 +++++---
16
uint16_t ct;
18
linux-user/syscall.c | 18 ++++++++++--------
17
uint8_t alias_index;
19
11 files changed, 30 insertions(+), 27 deletions(-)
18
+ uint8_t sort_index;
20
19
TCGRegSet regs;
21
diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h
20
} TCGArgConstraint;
22
index XXXXXXX..XXXXXXX 100644
21
23
--- a/accel/tcg/tcg-accel-ops.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef {
24
+++ b/accel/tcg/tcg-accel-ops.h
23
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
25
@@ -XXX,XX +XXX,XX @@
24
uint8_t flags;
26
void tcg_cpus_destroy(CPUState *cpu);
25
TCGArgConstraint *args_ct;
27
int tcg_cpus_exec(CPUState *cpu);
26
- int *sorted_args;
28
void tcg_handle_interrupt(CPUState *cpu, int mask);
27
#if defined(CONFIG_DEBUG_TCG)
29
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
28
int used;
30
29
#endif
31
#endif /* TCG_CPUS_H */
30
diff --git a/tcg/tcg.c b/tcg/tcg.c
32
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
31
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/tcg.c
34
--- a/include/exec/exec-all.h
33
+++ b/tcg/tcg.c
35
+++ b/include/exec/exec-all.h
34
@@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s)
36
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
35
int op, total_args, n, i;
37
uintptr_t jmp_dest[2];
36
TCGOpDef *def;
38
};
37
TCGArgConstraint *args_ct;
39
38
- int *sorted_args;
40
-extern bool parallel_cpus;
39
TCGTemp *ts;
41
-
40
42
/* Hide the qatomic_read to make code a little easier on the eyes */
41
memset(s, 0, sizeof(*s));
43
static inline uint32_t tb_cflags(const TranslationBlock *tb)
42
@@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s)
44
{
45
@@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
46
/* current cflags for hashing/comparison */
47
static inline uint32_t curr_cflags(CPUState *cpu)
48
{
49
- uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index);
50
- cflags |= parallel_cpus ? CF_PARALLEL : 0;
51
- cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
52
- return cflags;
53
+ return cpu->tcg_cflags;
54
}
55
56
/* TranslationBlock invalidate API */
57
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/include/hw/core/cpu.h
60
+++ b/include/hw/core/cpu.h
61
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
62
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
63
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
64
* QOM parent.
65
+ * @tcg_cflags: Pre-computed cflags for this cpu.
66
* @nr_cores: Number of cores within this CPU package.
67
* @nr_threads: Number of threads within this CPU.
68
* @running: #true if CPU is currently running (lockless).
69
@@ -XXX,XX +XXX,XX @@ struct CPUState {
70
/* TODO Move common fields from CPUArchState here. */
71
int cpu_index;
72
int cluster_index;
73
+ uint32_t tcg_cflags;
74
uint32_t halted;
75
uint32_t can_do_io;
76
int32_t exception_index;
77
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/accel/tcg/cpu-exec.c
80
+++ b/accel/tcg/cpu-exec.c
81
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
82
mmap_unlock();
83
}
84
85
- /* Since we got here, we know that parallel_cpus must be true. */
86
- parallel_cpus = false;
87
cpu_exec_enter(cpu);
88
/* execute the generated code */
89
trace_exec_tb(tb, pc);
90
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
91
* the execution.
92
*/
93
g_assert(cpu_in_exclusive_context(cpu));
94
- parallel_cpus = true;
95
cpu->running = false;
96
end_exclusive();
97
}
98
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/accel/tcg/tcg-accel-ops-mttcg.c
101
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
102
@@ -XXX,XX +XXX,XX @@ void mttcg_start_vcpu_thread(CPUState *cpu)
103
char thread_name[VCPU_THREAD_NAME_SIZE];
104
105
g_assert(tcg_enabled());
106
-
107
- parallel_cpus = (current_machine->smp.max_cpus > 1);
108
+ tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
109
110
cpu->thread = g_malloc0(sizeof(QemuThread));
111
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
112
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/accel/tcg/tcg-accel-ops-rr.c
115
+++ b/accel/tcg/tcg-accel-ops-rr.c
116
@@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu)
117
static QemuThread *single_tcg_cpu_thread;
118
119
g_assert(tcg_enabled());
120
- parallel_cpus = false;
121
+ tcg_cpu_init_cflags(cpu, false);
122
123
if (!single_tcg_cpu_thread) {
124
cpu->thread = g_malloc0(sizeof(QemuThread));
125
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/accel/tcg/tcg-accel-ops.c
128
+++ b/accel/tcg/tcg-accel-ops.c
129
@@ -XXX,XX +XXX,XX @@
130
131
/* common functionality among all TCG variants */
132
133
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
134
+{
135
+ uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
136
+ cflags |= parallel ? CF_PARALLEL : 0;
137
+ cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
138
+ cpu->tcg_cflags = cflags;
139
+}
140
+
141
void tcg_cpus_destroy(CPUState *cpu)
142
{
143
cpu_thread_signal_destroyed(cpu);
144
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/accel/tcg/translate-all.c
147
+++ b/accel/tcg/translate-all.c
148
@@ -XXX,XX +XXX,XX @@ static void *l1_map[V_L1_MAX_SIZE];
149
TCGContext tcg_init_ctx;
150
__thread TCGContext *tcg_ctx;
151
TBContext tb_ctx;
152
-bool parallel_cpus;
153
154
static void page_table_config_init(void)
155
{
156
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
157
cflags = (cflags & ~CF_COUNT_MASK) | 1;
43
}
158
}
44
159
45
args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
160
- cflags &= ~CF_CLUSTER_MASK;
46
- sorted_args = g_malloc(sizeof(int) * total_args);
161
- cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
47
162
-
48
for(op = 0; op < NB_OPS; op++) {
163
max_insns = cflags & CF_COUNT_MASK;
49
def = &tcg_op_defs[op];
164
if (max_insns == 0) {
50
def->args_ct = args_ct;
165
max_insns = CF_COUNT_MASK;
51
- def->sorted_args = sorted_args;
166
diff --git a/linux-user/main.c b/linux-user/main.c
52
n = def->nb_iargs + def->nb_oargs;
167
index XXXXXXX..XXXXXXX 100644
53
- sorted_args += n;
168
--- a/linux-user/main.c
54
args_ct += n;
169
+++ b/linux-user/main.c
55
}
170
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env)
56
171
/* Reset non arch specific state */
57
@@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k)
172
cpu_reset(new_cpu);
58
/* sort from highest priority to lowest */
173
59
static void sort_constraints(TCGOpDef *def, int start, int n)
174
+ new_cpu->tcg_cflags = cpu->tcg_cflags;
60
{
175
memcpy(new_env, env, sizeof(CPUArchState));
61
- int i, j, p1, p2, tmp;
176
62
+ int i, j;
177
/* Clone all break/watchpoints.
63
+ TCGArgConstraint *a = def->args_ct;
178
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
64
179
index XXXXXXX..XXXXXXX 100644
65
- for(i = 0; i < n; i++)
180
--- a/linux-user/sh4/signal.c
66
- def->sorted_args[start + i] = start + i;
181
+++ b/linux-user/sh4/signal.c
67
- if (n <= 1)
182
@@ -XXX,XX +XXX,XX @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
68
+ for (i = 0; i < n; i++) {
183
return (sp - frame_size) & -8ul;
69
+ a[start + i].sort_index = start + i;
184
}
70
+ }
185
71
+ if (n <= 1) {
186
-/* Notice when we're in the middle of a gUSA region and reset.
72
return;
187
- Note that this will only occur for !parallel_cpus, as we will
73
- for(i = 0; i < n - 1; i++) {
188
- translate such sequences differently in a parallel context. */
74
- for(j = i + 1; j < n; j++) {
189
+/*
75
- p1 = get_constraint_priority(def, def->sorted_args[start + i]);
190
+ * Notice when we're in the middle of a gUSA region and reset.
76
- p2 = get_constraint_priority(def, def->sorted_args[start + j]);
191
+ * Note that this will only occur when #CF_PARALLEL is unset, as we
77
+ }
192
+ * will translate such sequences differently in a parallel context.
78
+ for (i = 0; i < n - 1; i++) {
193
+ */
79
+ for (j = i + 1; j < n; j++) {
194
static void unwind_gusa(CPUSH4State *regs)
80
+ int p1 = get_constraint_priority(def, a[start + i].sort_index);
195
{
81
+ int p2 = get_constraint_priority(def, a[start + j].sort_index);
196
/* If the stack pointer is sufficiently negative, and we haven't
82
if (p1 < p2) {
197
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
83
- tmp = def->sorted_args[start + i];
198
index XXXXXXX..XXXXXXX 100644
84
- def->sorted_args[start + i] = def->sorted_args[start + j];
199
--- a/linux-user/syscall.c
85
- def->sorted_args[start + j] = tmp;
200
+++ b/linux-user/syscall.c
86
+ int tmp = a[start + i].sort_index;
201
@@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
87
+ a[start + i].sort_index = a[start + j].sort_index;
202
/* Grab a mutex so that thread setup appears atomic. */
88
+ a[start + j].sort_index = tmp;
203
pthread_mutex_lock(&clone_lock);
89
}
204
90
}
205
+ /*
91
}
206
+ * If this is our first additional thread, we need to ensure we
92
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
207
+ * generate code for parallel execution and flush old translations.
93
for (k = 0; k < nb_iargs; k++) {
208
+ * Do this now so that the copy gets CF_PARALLEL too.
94
TCGRegSet i_preferred_regs, o_preferred_regs;
209
+ */
95
210
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
96
- i = def->sorted_args[nb_oargs + k];
211
+ cpu->tcg_cflags |= CF_PARALLEL;
97
+ i = def->args_ct[nb_oargs + k].sort_index;
212
+ tb_flush(cpu);
98
arg = op->args[i];
213
+ }
99
arg_ct = &def->args_ct[i];
214
+
100
ts = arg_temp(arg);
215
/* we create a new CPU instance. */
101
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
216
new_env = cpu_copy(env);
102
int k2, i2;
217
/* Init regs that differ from the parent. */
103
reg = ts->reg;
218
@@ -XXX,XX +XXX,XX @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
104
for (k2 = 0 ; k2 < k ; k2++) {
219
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
105
- i2 = def->sorted_args[nb_oargs + k2];
220
cpu->random_seed = qemu_guest_random_seed_thread_part1();
106
+ i2 = def->args_ct[nb_oargs + k2].sort_index;
221
107
if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
222
- /* If this is our first additional thread, we need to ensure we
108
reg == new_args[i2]) {
223
- * generate code for parallel execution and flush old translations.
109
goto allocate_in_reg;
224
- */
110
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
225
- if (!parallel_cpus) {
111
226
- parallel_cpus = true;
112
/* satisfy the output constraints */
227
- tb_flush(cpu);
113
for(k = 0; k < nb_oargs; k++) {
228
- }
114
- i = def->sorted_args[k];
229
-
115
+ i = def->args_ct[k].sort_index;
230
ret = pthread_create(&info.thread, &attr, clone_func, &info);
116
arg = op->args[i];
231
/* TODO: Free new CPU state if thread creation failed. */
117
arg_ct = &def->args_ct[i];
232
118
ts = arg_temp(arg);
119
--
233
--
120
2.25.1
234
2.25.1
121
235
122
236
diff view generated by jsdifflib