1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
1
The following changes since commit 0a301624c2f4ced3331ffd5bce85b4274fe132af:
2
2
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
3
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20220208' into staging (2022-02-08 11:40:08 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211027
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220211
8
8
9
for you to fetch changes up to 820c025f0dcacf2f3c12735b1f162893fbfa7bc6:
9
for you to fetch changes up to 5c1a101ef6b85537a4ade93c39ea81cadd5c246e:
10
10
11
tcg/optimize: Propagate sign info for shifting (2021-10-27 17:11:23 -0700)
11
tests/tcg/multiarch: Add sigbus.c (2022-02-09 09:00:01 +1100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Improvements to qemu/int128
14
Fix safe_syscall_base for sparc64.
15
Fixes for 128/64 division.
15
Fix host signal handling for sparc64-linux.
16
Cleanup tcg/optimize.c
16
Speedups for jump cache and work list probing.
17
Optimize redundant sign extensions
17
Fix for exception replays.
18
Raise guest SIGBUS for user-only misaligned accesses.
18
19
19
----------------------------------------------------------------
20
----------------------------------------------------------------
20
Frédéric Pétrot (1):
21
Idan Horowitz (2):
21
qemu/int128: Add int128_{not,xor}
22
accel/tcg: Optimize jump cache flush during tlb range flush
23
softmmu/cpus: Check if the cpu work list is empty atomically
22
24
23
Luis Pires (4):
25
Pavel Dovgalyuk (1):
24
host-utils: move checks out of divu128/divs128
26
replay: use CF_NOIRQ for special exception-replaying TB
25
host-utils: move udiv_qrnnd() to host-utils
26
host-utils: add 128-bit quotient support to divu128/divs128
27
host-utils: add unit tests for divu128/divs128
28
27
29
Richard Henderson (51):
28
Richard Henderson (29):
30
tcg/optimize: Rename "mask" to "z_mask"
29
common-user/host/sparc64: Fix safe_syscall_base
31
tcg/optimize: Split out OptContext
30
linux-user: Introduce host_signal_mask
32
tcg/optimize: Remove do_default label
31
linux-user: Introduce host_sigcontext
33
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
32
linux-user: Move sparc/host-signal.h to sparc64/host-signal.h
34
tcg/optimize: Move prev_mb into OptContext
33
linux-user/include/host/sparc64: Fix host_sigcontext
35
tcg/optimize: Split out init_arguments
34
tcg/i386: Support raising sigbus for user-only
36
tcg/optimize: Split out copy_propagate
35
tcg/aarch64: Support raising sigbus for user-only
37
tcg/optimize: Split out fold_call
36
tcg/ppc: Support raising sigbus for user-only
38
tcg/optimize: Drop nb_oargs, nb_iargs locals
37
tcg/riscv: Support raising sigbus for user-only
39
tcg/optimize: Change fail return for do_constant_folding_cond*
38
tcg/s390x: Support raising sigbus for user-only
40
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
39
tcg/tci: Support raising sigbus for user-only
41
tcg/optimize: Split out finish_folding
40
tcg/arm: Drop support for armv4 and armv5 hosts
42
tcg/optimize: Use a boolean to avoid a mass of continues
41
tcg/arm: Remove use_armv5t_instructions
43
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
42
tcg/arm: Remove use_armv6_instructions
44
tcg/optimize: Split out fold_const{1,2}
43
tcg/arm: Check alignment for ldrd and strd
45
tcg/optimize: Split out fold_setcond2
44
tcg/arm: Support unaligned access for softmmu
46
tcg/optimize: Split out fold_brcond2
45
tcg/arm: Reserve a register for guest_base
47
tcg/optimize: Split out fold_brcond
46
tcg/arm: Support raising sigbus for user-only
48
tcg/optimize: Split out fold_setcond
47
tcg/mips: Support unaligned access for user-only
49
tcg/optimize: Split out fold_mulu2_i32
48
tcg/mips: Support unaligned access for softmmu
50
tcg/optimize: Split out fold_addsub2_i32
49
tcg/sparc: Use tcg_out_movi_imm13 in tcg_out_addsub2_i64
51
tcg/optimize: Split out fold_movcond
50
tcg/sparc: Split out tcg_out_movi_imm32
52
tcg/optimize: Split out fold_extract2
51
tcg/sparc: Add scratch argument to tcg_out_movi_int
53
tcg/optimize: Split out fold_extract, fold_sextract
52
tcg/sparc: Improve code gen for shifted 32-bit constants
54
tcg/optimize: Split out fold_deposit
53
tcg/sparc: Convert patch_reloc to return bool
55
tcg/optimize: Split out fold_count_zeros
54
tcg/sparc: Use the constant pool for 64-bit constants
56
tcg/optimize: Split out fold_bswap
55
tcg/sparc: Add tcg_out_jmpl_const for better tail calls
57
tcg/optimize: Split out fold_dup, fold_dup2
56
tcg/sparc: Support unaligned access for user-only
58
tcg/optimize: Split out fold_mov
57
tests/tcg/multiarch: Add sigbus.c
59
tcg/optimize: Split out fold_xx_to_i
60
tcg/optimize: Split out fold_xx_to_x
61
tcg/optimize: Split out fold_xi_to_i
62
tcg/optimize: Add type to OptContext
63
tcg/optimize: Split out fold_to_not
64
tcg/optimize: Split out fold_sub_to_neg
65
tcg/optimize: Split out fold_xi_to_x
66
tcg/optimize: Split out fold_ix_to_i
67
tcg/optimize: Split out fold_masks
68
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
69
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
70
tcg/optimize: Sink commutative operand swapping into fold functions
71
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
72
tcg/optimize: Use fold_xx_to_i for orc
73
tcg/optimize: Use fold_xi_to_x for mul
74
tcg/optimize: Use fold_xi_to_x for div
75
tcg/optimize: Use fold_xx_to_i for rem
76
tcg/optimize: Optimize sign extensions
77
tcg/optimize: Propagate sign info for logical operations
78
tcg/optimize: Propagate sign info for setcond
79
tcg/optimize: Propagate sign info for bit counting
80
tcg/optimize: Propagate sign info for shifting
81
58
82
include/fpu/softfloat-macros.h | 82 --
59
WANG Xuerui (2):
83
include/hw/clock.h | 5 +-
60
tcg/loongarch64: Fix fallout from recent MO_Q renaming
84
include/qemu/host-utils.h | 121 +-
61
tcg/loongarch64: Support raising sigbus for user-only
85
include/qemu/int128.h | 20 +
86
target/ppc/int_helper.c | 23 +-
87
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
88
tests/unit/test-div128.c | 197 +++
89
util/host-utils.c | 147 ++-
90
tests/unit/meson.build | 1 +
91
9 files changed, 2053 insertions(+), 1187 deletions(-)
92
create mode 100644 tests/unit/test-div128.c
93
62
63
linux-user/include/host/aarch64/host-signal.h | 16 +-
64
linux-user/include/host/alpha/host-signal.h | 14 +-
65
linux-user/include/host/arm/host-signal.h | 14 +-
66
linux-user/include/host/i386/host-signal.h | 14 +-
67
linux-user/include/host/loongarch64/host-signal.h | 14 +-
68
linux-user/include/host/mips/host-signal.h | 14 +-
69
linux-user/include/host/ppc/host-signal.h | 14 +-
70
linux-user/include/host/riscv/host-signal.h | 14 +-
71
linux-user/include/host/s390/host-signal.h | 14 +-
72
linux-user/include/host/sparc/host-signal.h | 63 ----
73
linux-user/include/host/sparc64/host-signal.h | 65 +++-
74
linux-user/include/host/x86_64/host-signal.h | 14 +-
75
tcg/aarch64/tcg-target.h | 2 -
76
tcg/arm/tcg-target.h | 6 +-
77
tcg/i386/tcg-target.h | 2 -
78
tcg/loongarch64/tcg-target.h | 2 -
79
tcg/mips/tcg-target.h | 2 -
80
tcg/ppc/tcg-target.h | 2 -
81
tcg/riscv/tcg-target.h | 2 -
82
tcg/s390x/tcg-target.h | 2 -
83
accel/tcg/cpu-exec.c | 3 +-
84
accel/tcg/cputlb.c | 9 +
85
linux-user/signal.c | 22 +-
86
softmmu/cpus.c | 7 +-
87
tcg/tci.c | 20 +-
88
tests/tcg/multiarch/sigbus.c | 68 ++++
89
tcg/aarch64/tcg-target.c.inc | 91 ++++-
90
tcg/arm/tcg-target.c.inc | 410 +++++++++-------------
91
tcg/i386/tcg-target.c.inc | 103 +++++-
92
tcg/loongarch64/tcg-target.c.inc | 73 +++-
93
tcg/mips/tcg-target.c.inc | 387 ++++++++++++++++++--
94
tcg/ppc/tcg-target.c.inc | 98 +++++-
95
tcg/riscv/tcg-target.c.inc | 63 +++-
96
tcg/s390x/tcg-target.c.inc | 59 +++-
97
tcg/sparc/tcg-target.c.inc | 348 +++++++++++++++---
98
common-user/host/sparc64/safe-syscall.inc.S | 5 +-
99
36 files changed, 1561 insertions(+), 495 deletions(-)
100
delete mode 100644 linux-user/include/host/sparc/host-signal.h
101
create mode 100644 tests/tcg/multiarch/sigbus.c
102
diff view generated by jsdifflib
1
Move all of the known-zero optimizations into the per-opcode
1
Use the "retl" instead of "ret" instruction alias, since we
2
functions. Use fold_masks when there is a possibility of the
2
do not allocate a register window in this function.
3
result being determined, and simply set ctx->z_mask otherwise.
4
3
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Fix the offset to the first stacked parameter, which lies
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
beyond the register window save area.
6
7
Fixes: 95c021dac835 ("linux-user/host/sparc64: Add safe-syscall.inc.S")
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
---
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
10
common-user/host/sparc64/safe-syscall.inc.S | 5 +++--
10
1 file changed, 294 insertions(+), 251 deletions(-)
11
1 file changed, 3 insertions(+), 2 deletions(-)
11
12
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
diff --git a/common-user/host/sparc64/safe-syscall.inc.S b/common-user/host/sparc64/safe-syscall.inc.S
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
--- a/common-user/host/sparc64/safe-syscall.inc.S
15
+++ b/tcg/optimize.c
16
+++ b/common-user/host/sparc64/safe-syscall.inc.S
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
17
@@ -XXX,XX +XXX,XX @@
17
TCGTempSet temps_used;
18
.type safe_syscall_end, @function
18
19
19
/* In flight values from optimization. */
20
#define STACK_BIAS 2047
20
- uint64_t z_mask;
21
-#define PARAM(N) STACK_BIAS + N*8
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
22
+#define WINDOW_SIZE 16 * 8
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
23
+#define PARAM(N) STACK_BIAS + WINDOW_SIZE + N * 8
23
TCGType type;
24
} OptContext;
25
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
27
return false;
28
}
29
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
31
+{
32
+ uint64_t a_mask = ctx->a_mask;
33
+ uint64_t z_mask = ctx->z_mask;
34
+
35
+ /*
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
37
+ * below, we can ignore high bits, but for further optimizations we
38
+ * need to record that the high bits contain garbage.
39
+ */
40
+ if (ctx->type == TCG_TYPE_I32) {
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
44
+ }
45
+
46
+ if (z_mask == 0) {
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
+ }
49
+ if (a_mask == 0) {
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
51
+ }
52
+ return false;
53
+}
54
+
55
/*
56
* Convert @op to NOT, if NOT is supported by the host.
57
* Return true f the conversion is successful, which will still
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
59
60
static bool fold_and(OptContext *ctx, TCGOp *op)
61
{
62
+ uint64_t z1, z2;
63
+
64
if (fold_const2(ctx, op) ||
65
fold_xi_to_i(ctx, op, 0) ||
66
fold_xi_to_x(ctx, op, -1) ||
67
fold_xx_to_x(ctx, op)) {
68
return true;
69
}
70
- return false;
71
+
72
+ z1 = arg_info(op->args[1])->z_mask;
73
+ z2 = arg_info(op->args[2])->z_mask;
74
+ ctx->z_mask = z1 & z2;
75
+
76
+ /*
77
+ * Known-zeros does not imply known-ones. Therefore unless
78
+ * arg2 is constant, we can't infer affected bits from it.
79
+ */
80
+ if (arg_is_const(op->args[2])) {
81
+ ctx->a_mask = z1 & ~z2;
82
+ }
83
+
84
+ return fold_masks(ctx, op);
85
}
86
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
88
{
89
+ uint64_t z1;
90
+
91
if (fold_const2(ctx, op) ||
92
fold_xx_to_i(ctx, op, 0) ||
93
fold_xi_to_x(ctx, op, 0) ||
94
fold_ix_to_not(ctx, op, -1)) {
95
return true;
96
}
97
- return false;
98
+
99
+ z1 = arg_info(op->args[1])->z_mask;
100
+
101
+ /*
102
+ * Known-zeros does not imply known-ones. Therefore unless
103
+ * arg2 is constant, we can't infer anything from it.
104
+ */
105
+ if (arg_is_const(op->args[2])) {
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
107
+ ctx->a_mask = z1 & ~z2;
108
+ z1 &= z2;
109
+ }
110
+ ctx->z_mask = z1;
111
+
112
+ return fold_masks(ctx, op);
113
}
114
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
{
120
+ uint64_t z_mask, sign;
121
+
122
if (arg_is_const(op->args[1])) {
123
uint64_t t = arg_info(op->args[1])->val;
124
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
}
128
- return false;
129
+
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
+ switch (op->opc) {
132
+ case INDEX_op_bswap16_i32:
133
+ case INDEX_op_bswap16_i64:
134
+ z_mask = bswap16(z_mask);
135
+ sign = INT16_MIN;
136
+ break;
137
+ case INDEX_op_bswap32_i32:
138
+ case INDEX_op_bswap32_i64:
139
+ z_mask = bswap32(z_mask);
140
+ sign = INT32_MIN;
141
+ break;
142
+ case INDEX_op_bswap64_i64:
143
+ z_mask = bswap64(z_mask);
144
+ sign = INT64_MIN;
145
+ break;
146
+ default:
147
+ g_assert_not_reached();
148
+ }
149
+
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
151
+ case TCG_BSWAP_OZ:
152
+ break;
153
+ case TCG_BSWAP_OS:
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
155
+ if (z_mask & sign) {
156
+ z_mask |= sign;
157
+ }
158
+ break;
159
+ default:
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
161
+ z_mask |= sign << 1;
162
+ break;
163
+ }
164
+ ctx->z_mask = z_mask;
165
+
166
+ return fold_masks(ctx, op);
167
}
168
169
static bool fold_call(OptContext *ctx, TCGOp *op)
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
171
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
173
{
174
+ uint64_t z_mask;
175
+
176
if (arg_is_const(op->args[1])) {
177
uint64_t t = arg_info(op->args[1])->val;
178
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
180
}
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
182
}
183
+
184
+ switch (ctx->type) {
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
190
+ break;
191
+ default:
192
+ g_assert_not_reached();
193
+ }
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
195
+
196
return false;
197
}
198
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
200
{
201
- return fold_const1(ctx, op);
202
+ if (fold_const1(ctx, op)) {
203
+ return true;
204
+ }
205
+
206
+ switch (ctx->type) {
207
+ case TCG_TYPE_I32:
208
+ ctx->z_mask = 32 | 31;
209
+ break;
210
+ case TCG_TYPE_I64:
211
+ ctx->z_mask = 64 | 63;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ return false;
217
}
218
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
223
}
224
+
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
226
+ op->args[3], op->args[4],
227
+ arg_info(op->args[2])->z_mask);
228
return false;
229
}
230
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
232
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
234
{
235
+ uint64_t z_mask_old, z_mask;
236
+
237
if (arg_is_const(op->args[1])) {
238
uint64_t t;
239
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
241
t = extract64(t, op->args[2], op->args[3]);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
243
}
244
- return false;
245
+
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
248
+ if (op->args[2] == 0) {
249
+ ctx->a_mask = z_mask_old ^ z_mask;
250
+ }
251
+ ctx->z_mask = z_mask;
252
+
253
+ return fold_masks(ctx, op);
254
}
255
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
258
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
260
{
261
- return fold_const1(ctx, op);
262
+ uint64_t z_mask_old, z_mask, sign;
263
+ bool type_change = false;
264
+
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
287
+ default:
288
+ g_assert_not_reached();
289
+ }
290
+
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
359
if (fold_const1(ctx, op)) {
360
return true;
361
}
362
+
363
+ /* Set to 1 all bits to the left of the rightmost. */
364
+ z_mask = arg_info(op->args[1])->z_mask;
365
+ ctx->z_mask = -(z_mask & -z_mask);
366
+
367
/*
368
* Because of fold_sub_to_neg, we want to always return true,
369
* via finish_folding.
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
371
fold_xx_to_x(ctx, op)) {
372
return true;
373
}
374
- return false;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
409
break;
410
}
411
+
412
+ ctx->z_mask = 1;
413
return false;
414
415
do_setcond_const:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
417
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
419
{
420
+ int64_t z_mask_old, z_mask;
421
+
422
if (arg_is_const(op->args[1])) {
423
uint64_t t;
424
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
426
t = sextract64(t, op->args[2], op->args[3]);
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
428
}
429
- return false;
430
+
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
433
+ if (op->args[2] == 0 && z_mask >= 0) {
434
+ ctx->a_mask = z_mask_old ^ z_mask;
435
+ }
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
444
return true;
445
}
446
+
447
+ if (arg_is_const(op->args[2])) {
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
449
+ arg_info(op->args[1])->z_mask,
450
+ arg_info(op->args[2])->val);
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
461
+{
462
+ /* We can't do any folding with a load, but we can record bits. */
463
+ switch (op->opc) {
464
+ CASE_OP_32_64(ld8u):
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
466
+ break;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
477
+}
478
+
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
480
{
481
if (fold_const2(ctx, op) ||
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
483
fold_xi_to_not(ctx, op, -1)) {
484
return true;
485
}
486
- return false;
487
+
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
489
+ | arg_info(op->args[2])->z_mask;
490
+ return fold_masks(ctx, op);
491
}
492
493
/* Propagate constants and copies, fold constant expressions. */
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
495
}
496
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
498
- uint64_t z_mask, partmask, affected, tmp;
499
TCGOpcode opc = op->opc;
500
const TCGOpDef *def;
501
bool done = false;
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
503
break;
504
}
505
506
- /* Simplify using known-zero bits. Currently only ops with a single
507
- output argument is supported. */
508
- z_mask = -1;
509
- affected = -1;
510
- switch (opc) {
511
- CASE_OP_32_64(ext8s):
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
513
- break;
514
- }
515
- QEMU_FALLTHROUGH;
516
- CASE_OP_32_64(ext8u):
517
- z_mask = 0xff;
518
- goto and_const;
519
- CASE_OP_32_64(ext16s):
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
521
- break;
522
- }
523
- QEMU_FALLTHROUGH;
524
- CASE_OP_32_64(ext16u):
525
- z_mask = 0xffff;
526
- goto and_const;
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
24
749
/*
25
/*
750
* Process each opcode.
26
* This is the entry point for making a system call. The calling
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
@@ -XXX,XX +XXX,XX @@ safe_syscall_end:
752
case INDEX_op_extrh_i64_i32:
28
/* code path for having successfully executed the syscall */
753
done = fold_extu(&ctx, op);
29
bcs,pn %xcc, 1f
754
break;
30
nop
755
+ CASE_OP_32_64(ld8u):
31
- ret
756
+ CASE_OP_32_64(ld16u):
32
+ retl
757
+ case INDEX_op_ld32u_i64:
33
nop
758
+ done = fold_tcg_ld(&ctx, op);
34
759
+ break;
35
/* code path when we didn't execute the syscall */
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
763
--
36
--
764
2.25.1
37
2.25.1
765
38
766
39
diff view generated by jsdifflib
1
Break the final cleanup clause out of the main switch
1
Do not directly access the uc_sigmask member.
2
statement. When fully folding an opcode to mov/movi,
2
This is preparation for a sparc64 fix.
3
use "continue" to process the next opcode, else break
3
4
to fall into the final cleanup.
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
8
linux-user/include/host/aarch64/host-signal.h | 5 +++++
12
1 file changed, 94 insertions(+), 96 deletions(-)
9
linux-user/include/host/alpha/host-signal.h | 5 +++++
13
10
linux-user/include/host/arm/host-signal.h | 5 +++++
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
linux-user/include/host/i386/host-signal.h | 5 +++++
15
index XXXXXXX..XXXXXXX 100644
12
.../include/host/loongarch64/host-signal.h | 5 +++++
16
--- a/tcg/optimize.c
13
linux-user/include/host/mips/host-signal.h | 5 +++++
17
+++ b/tcg/optimize.c
14
linux-user/include/host/ppc/host-signal.h | 5 +++++
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
linux-user/include/host/riscv/host-signal.h | 5 +++++
19
switch (opc) {
16
linux-user/include/host/s390/host-signal.h | 5 +++++
20
CASE_OP_32_64_VEC(mov):
17
linux-user/include/host/sparc/host-signal.h | 5 +++++
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
18
linux-user/include/host/x86_64/host-signal.h | 5 +++++
22
- break;
19
linux-user/signal.c | 18 ++++++++----------
23
+ continue;
20
12 files changed, 63 insertions(+), 10 deletions(-)
24
21
25
case INDEX_op_dup_vec:
22
diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h
26
if (arg_is_const(op->args[1])) {
23
index XXXXXXX..XXXXXXX 100644
27
tmp = arg_info(op->args[1])->val;
24
--- a/linux-user/include/host/aarch64/host-signal.h
28
tmp = dup_const(TCGOP_VECE(op), tmp);
25
+++ b/linux-user/include/host/aarch64/host-signal.h
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
26
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
30
- break;
27
uc->uc_mcontext.pc = pc;
31
+ continue;
28
}
32
}
29
33
- goto do_default;
30
+static inline void *host_signal_mask(ucontext_t *uc)
34
+ break;
31
+{
35
32
+ return &uc->uc_sigmask;
36
case INDEX_op_dup2_vec:
33
+}
37
assert(TCG_TARGET_REG_BITS == 32);
34
+
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
35
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
36
{
40
deposit64(arg_info(op->args[1])->val, 32, 32,
37
struct _aarch64_ctx *hdr;
41
arg_info(op->args[2])->val));
38
diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h
42
- break;
39
index XXXXXXX..XXXXXXX 100644
43
+ continue;
40
--- a/linux-user/include/host/alpha/host-signal.h
44
} else if (args_are_copies(op->args[1], op->args[2])) {
41
+++ b/linux-user/include/host/alpha/host-signal.h
45
op->opc = INDEX_op_dup_vec;
42
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
46
TCGOP_VECE(op) = MO_32;
43
uc->uc_mcontext.sc_pc = pc;
47
nb_iargs = 1;
44
}
48
}
45
49
- goto do_default;
46
+static inline void *host_signal_mask(ucontext_t *uc)
50
+ break;
47
+{
51
48
+ return &uc->uc_sigmask;
52
CASE_OP_32_64(not):
49
+}
53
CASE_OP_32_64(neg):
50
+
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
55
if (arg_is_const(op->args[1])) {
52
{
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
53
uint32_t *pc = (uint32_t *)host_signal_pc(uc);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
54
diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h
58
- break;
55
index XXXXXXX..XXXXXXX 100644
59
+ continue;
56
--- a/linux-user/include/host/arm/host-signal.h
60
}
57
+++ b/linux-user/include/host/arm/host-signal.h
61
- goto do_default;
58
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
62
+ break;
59
uc->uc_mcontext.arm_pc = pc;
63
60
}
64
CASE_OP_32_64(bswap16):
61
65
CASE_OP_32_64(bswap32):
62
+static inline void *host_signal_mask(ucontext_t *uc)
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
+{
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
64
+ return &uc->uc_sigmask;
68
op->args[2]);
65
+}
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
66
+
70
- break;
67
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
71
+ continue;
68
{
72
}
69
/*
73
- goto do_default;
70
diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h
74
+ break;
71
index XXXXXXX..XXXXXXX 100644
75
72
--- a/linux-user/include/host/i386/host-signal.h
76
CASE_OP_32_64(add):
73
+++ b/linux-user/include/host/i386/host-signal.h
77
CASE_OP_32_64(sub):
74
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
75
uc->uc_mcontext.gregs[REG_EIP] = pc;
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
76
}
80
arg_info(op->args[2])->val);
77
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
78
+static inline void *host_signal_mask(ucontext_t *uc)
82
- break;
79
+{
83
+ continue;
80
+ return &uc->uc_sigmask;
84
}
81
+}
85
- goto do_default;
82
+
86
+ break;
83
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
87
84
{
88
CASE_OP_32_64(clz):
85
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
89
CASE_OP_32_64(ctz):
86
diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
87
index XXXXXXX..XXXXXXX 100644
91
} else {
88
--- a/linux-user/include/host/loongarch64/host-signal.h
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
89
+++ b/linux-user/include/host/loongarch64/host-signal.h
90
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
91
uc->uc_mcontext.__pc = pc;
92
}
93
94
+static inline void *host_signal_mask(ucontext_t *uc)
95
+{
96
+ return &uc->uc_sigmask;
97
+}
98
+
99
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
100
{
101
const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc);
102
diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/linux-user/include/host/mips/host-signal.h
105
+++ b/linux-user/include/host/mips/host-signal.h
106
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
107
uc->uc_mcontext.pc = pc;
108
}
109
110
+static inline void *host_signal_mask(ucontext_t *uc)
111
+{
112
+ return &uc->uc_sigmask;
113
+}
114
+
115
#if defined(__misp16) || defined(__mips_micromips)
116
#error "Unsupported encoding"
117
#endif
118
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
119
index XXXXXXX..XXXXXXX 100644
120
--- a/linux-user/include/host/ppc/host-signal.h
121
+++ b/linux-user/include/host/ppc/host-signal.h
122
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
123
uc->uc_mcontext.regs->nip = pc;
124
}
125
126
+static inline void *host_signal_mask(ucontext_t *uc)
127
+{
128
+ return &uc->uc_sigmask;
129
+}
130
+
131
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
132
{
133
return uc->uc_mcontext.regs->trap != 0x400
134
diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/linux-user/include/host/riscv/host-signal.h
137
+++ b/linux-user/include/host/riscv/host-signal.h
138
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
139
uc->uc_mcontext.__gregs[REG_PC] = pc;
140
}
141
142
+static inline void *host_signal_mask(ucontext_t *uc)
143
+{
144
+ return &uc->uc_sigmask;
145
+}
146
+
147
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
148
{
149
/*
150
diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/linux-user/include/host/s390/host-signal.h
153
+++ b/linux-user/include/host/s390/host-signal.h
154
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
155
uc->uc_mcontext.psw.addr = pc;
156
}
157
158
+static inline void *host_signal_mask(ucontext_t *uc)
159
+{
160
+ return &uc->uc_sigmask;
161
+}
162
+
163
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
164
{
165
uint16_t *pinsn = (uint16_t *)host_signal_pc(uc);
166
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
167
index XXXXXXX..XXXXXXX 100644
168
--- a/linux-user/include/host/sparc/host-signal.h
169
+++ b/linux-user/include/host/sparc/host-signal.h
170
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
171
#endif
172
}
173
174
+static inline void *host_signal_mask(ucontext_t *uc)
175
+{
176
+ return &uc->uc_sigmask;
177
+}
178
+
179
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
180
{
181
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
182
diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h
183
index XXXXXXX..XXXXXXX 100644
184
--- a/linux-user/include/host/x86_64/host-signal.h
185
+++ b/linux-user/include/host/x86_64/host-signal.h
186
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
187
uc->uc_mcontext.gregs[REG_RIP] = pc;
188
}
189
190
+static inline void *host_signal_mask(ucontext_t *uc)
191
+{
192
+ return &uc->uc_sigmask;
193
+}
194
+
195
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
196
{
197
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
198
diff --git a/linux-user/signal.c b/linux-user/signal.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/linux-user/signal.c
201
+++ b/linux-user/signal.c
202
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
203
int guest_sig;
204
uintptr_t pc = 0;
205
bool sync_sig = false;
206
+ void *sigmask = host_signal_mask(uc);
207
208
/*
209
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
210
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
211
if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
212
/* If this was a write to a TB protected page, restart. */
213
if (is_write &&
214
- handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
215
- pc, guest_addr)) {
216
+ handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) {
217
return;
93
}
218
}
94
- break;
219
95
+ continue;
220
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
310
break;
311
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (tmp != 2) {
314
do_setcond_const:
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
316
- } else if ((op->args[5] == TCG_COND_LT
317
- || op->args[5] == TCG_COND_GE)
318
- && arg_is_const(op->args[3])
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
371
break;
372
373
- case INDEX_op_call:
374
- if (!(tcg_call_flags(op)
375
+ default:
376
+ break;
377
+ }
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
221
}
393
}
222
}
394
- goto do_reset_output;
223
395
224
- sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
396
- default:
225
+ sigprocmask(SIG_SETMASK, sigmask, NULL);
397
- do_default:
226
cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
398
- /* Default case: we know nothing about operation (or were unable
227
} else {
399
- to compute the operation result) so no propagation is done.
228
- sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
400
- We trash everything if the operation is the end of a basic
229
+ sigprocmask(SIG_SETMASK, sigmask, NULL);
401
- block, otherwise we only trash the output args. "z_mask" is
230
if (info->si_code == BUS_ADRALN) {
402
- the non-zero bits mask for the first output arg. */
231
cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
232
}
422
- break;
233
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
423
}
234
* now and it getting out to the main loop. Signals will be
424
235
* unblocked again in process_pending_signals().
425
/* Eliminate duplicate and redundant fence instructions. */
236
*
237
- * WARNING: we cannot use sigfillset() here because the uc_sigmask
238
+ * WARNING: we cannot use sigfillset() here because the sigmask
239
* field is a kernel sigset_t, which is much smaller than the
240
* libc sigset_t which sigfillset() operates on. Using sigfillset()
241
* would write 0xff bytes off the end of the structure and trash
242
* data on the struct.
243
- * We can't use sizeof(uc->uc_sigmask) either, because the libc
244
- * headers define the struct field with the wrong (too large) type.
245
*/
246
- memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
247
- sigdelset(&uc->uc_sigmask, SIGSEGV);
248
- sigdelset(&uc->uc_sigmask, SIGBUS);
249
+ memset(sigmask, 0xff, SIGSET_T_SIZE);
250
+ sigdelset(sigmask, SIGSEGV);
251
+ sigdelset(sigmask, SIGBUS);
252
253
/* interrupt the virtual CPU as soon as possible */
254
cpu_exit(thread_cpu);
426
--
255
--
427
2.25.1
256
2.25.1
428
257
429
258
diff view generated by jsdifflib
1
Recognize the identity function for low-part multiply.
1
Do not directly access ucontext_t as the third signal parameter.
2
This is preparation for a sparc64 fix.
2
3
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/optimize.c | 3 ++-
8
linux-user/include/host/aarch64/host-signal.h | 13 ++++++++-----
9
1 file changed, 2 insertions(+), 1 deletion(-)
9
linux-user/include/host/alpha/host-signal.h | 11 +++++++----
10
linux-user/include/host/arm/host-signal.h | 11 +++++++----
11
linux-user/include/host/i386/host-signal.h | 11 +++++++----
12
linux-user/include/host/loongarch64/host-signal.h | 11 +++++++----
13
linux-user/include/host/mips/host-signal.h | 11 +++++++----
14
linux-user/include/host/ppc/host-signal.h | 11 +++++++----
15
linux-user/include/host/riscv/host-signal.h | 11 +++++++----
16
linux-user/include/host/s390/host-signal.h | 11 +++++++----
17
linux-user/include/host/sparc/host-signal.h | 11 +++++++----
18
linux-user/include/host/x86_64/host-signal.h | 11 +++++++----
19
linux-user/signal.c | 4 ++--
20
12 files changed, 80 insertions(+), 47 deletions(-)
10
21
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h
12
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
24
--- a/linux-user/include/host/aarch64/host-signal.h
14
+++ b/tcg/optimize.c
25
+++ b/linux-user/include/host/aarch64/host-signal.h
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
26
@@ -XXX,XX +XXX,XX @@
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
27
#ifndef AARCH64_HOST_SIGNAL_H
17
{
28
#define AARCH64_HOST_SIGNAL_H
18
if (fold_const2(ctx, op) ||
29
19
- fold_xi_to_i(ctx, op, 0)) {
30
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
20
+ fold_xi_to_i(ctx, op, 0) ||
31
+typedef ucontext_t host_sigcontext;
21
+ fold_xi_to_x(ctx, op, 1)) {
32
+
22
return true;
33
/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
23
}
34
#ifndef ESR_MAGIC
24
return false;
35
#define ESR_MAGIC 0x45535201
36
@@ -XXX,XX +XXX,XX @@ struct esr_context {
37
};
38
#endif
39
40
-static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
41
+static inline struct _aarch64_ctx *first_ctx(host_sigcontext *uc)
42
{
43
return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
44
}
45
@@ -XXX,XX +XXX,XX @@ static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
46
return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
47
}
48
49
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
50
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
51
{
52
return uc->uc_mcontext.pc;
53
}
54
55
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
56
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
57
{
58
uc->uc_mcontext.pc = pc;
59
}
60
61
-static inline void *host_signal_mask(ucontext_t *uc)
62
+static inline void *host_signal_mask(host_sigcontext *uc)
63
{
64
return &uc->uc_sigmask;
65
}
66
67
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
68
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
69
{
70
struct _aarch64_ctx *hdr;
71
uint32_t insn;
72
diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/linux-user/include/host/alpha/host-signal.h
75
+++ b/linux-user/include/host/alpha/host-signal.h
76
@@ -XXX,XX +XXX,XX @@
77
#ifndef ALPHA_HOST_SIGNAL_H
78
#define ALPHA_HOST_SIGNAL_H
79
80
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
81
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
82
+typedef ucontext_t host_sigcontext;
83
+
84
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
85
{
86
return uc->uc_mcontext.sc_pc;
87
}
88
89
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
90
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
91
{
92
uc->uc_mcontext.sc_pc = pc;
93
}
94
95
-static inline void *host_signal_mask(ucontext_t *uc)
96
+static inline void *host_signal_mask(host_sigcontext *uc)
97
{
98
return &uc->uc_sigmask;
99
}
100
101
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
102
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
103
{
104
uint32_t *pc = (uint32_t *)host_signal_pc(uc);
105
uint32_t insn = *pc;
106
diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h
107
index XXXXXXX..XXXXXXX 100644
108
--- a/linux-user/include/host/arm/host-signal.h
109
+++ b/linux-user/include/host/arm/host-signal.h
110
@@ -XXX,XX +XXX,XX @@
111
#ifndef ARM_HOST_SIGNAL_H
112
#define ARM_HOST_SIGNAL_H
113
114
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
115
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
116
+typedef ucontext_t host_sigcontext;
117
+
118
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
119
{
120
return uc->uc_mcontext.arm_pc;
121
}
122
123
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
124
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
125
{
126
uc->uc_mcontext.arm_pc = pc;
127
}
128
129
-static inline void *host_signal_mask(ucontext_t *uc)
130
+static inline void *host_signal_mask(host_sigcontext *uc)
131
{
132
return &uc->uc_sigmask;
133
}
134
135
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
136
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
137
{
138
/*
139
* In the FSR, bit 11 is WnR, assuming a v6 or
140
diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/linux-user/include/host/i386/host-signal.h
143
+++ b/linux-user/include/host/i386/host-signal.h
144
@@ -XXX,XX +XXX,XX @@
145
#ifndef I386_HOST_SIGNAL_H
146
#define I386_HOST_SIGNAL_H
147
148
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
149
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
150
+typedef ucontext_t host_sigcontext;
151
+
152
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
153
{
154
return uc->uc_mcontext.gregs[REG_EIP];
155
}
156
157
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
158
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
159
{
160
uc->uc_mcontext.gregs[REG_EIP] = pc;
161
}
162
163
-static inline void *host_signal_mask(ucontext_t *uc)
164
+static inline void *host_signal_mask(host_sigcontext *uc)
165
{
166
return &uc->uc_sigmask;
167
}
168
169
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
170
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
171
{
172
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
173
&& (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
174
diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h
175
index XXXXXXX..XXXXXXX 100644
176
--- a/linux-user/include/host/loongarch64/host-signal.h
177
+++ b/linux-user/include/host/loongarch64/host-signal.h
178
@@ -XXX,XX +XXX,XX @@
179
#ifndef LOONGARCH64_HOST_SIGNAL_H
180
#define LOONGARCH64_HOST_SIGNAL_H
181
182
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
183
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
184
+typedef ucontext_t host_sigcontext;
185
+
186
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
187
{
188
return uc->uc_mcontext.__pc;
189
}
190
191
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
192
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
193
{
194
uc->uc_mcontext.__pc = pc;
195
}
196
197
-static inline void *host_signal_mask(ucontext_t *uc)
198
+static inline void *host_signal_mask(host_sigcontext *uc)
199
{
200
return &uc->uc_sigmask;
201
}
202
203
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
204
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
205
{
206
const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc);
207
uint32_t insn = pinsn[0];
208
diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h
209
index XXXXXXX..XXXXXXX 100644
210
--- a/linux-user/include/host/mips/host-signal.h
211
+++ b/linux-user/include/host/mips/host-signal.h
212
@@ -XXX,XX +XXX,XX @@
213
#ifndef MIPS_HOST_SIGNAL_H
214
#define MIPS_HOST_SIGNAL_H
215
216
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
217
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
218
+typedef ucontext_t host_sigcontext;
219
+
220
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
221
{
222
return uc->uc_mcontext.pc;
223
}
224
225
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
226
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
227
{
228
uc->uc_mcontext.pc = pc;
229
}
230
231
-static inline void *host_signal_mask(ucontext_t *uc)
232
+static inline void *host_signal_mask(host_sigcontext *uc)
233
{
234
return &uc->uc_sigmask;
235
}
236
@@ -XXX,XX +XXX,XX @@ static inline void *host_signal_mask(ucontext_t *uc)
237
#error "Unsupported encoding"
238
#endif
239
240
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
241
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
242
{
243
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
244
245
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
246
index XXXXXXX..XXXXXXX 100644
247
--- a/linux-user/include/host/ppc/host-signal.h
248
+++ b/linux-user/include/host/ppc/host-signal.h
249
@@ -XXX,XX +XXX,XX @@
250
#ifndef PPC_HOST_SIGNAL_H
251
#define PPC_HOST_SIGNAL_H
252
253
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
254
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
255
+typedef ucontext_t host_sigcontext;
256
+
257
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
258
{
259
return uc->uc_mcontext.regs->nip;
260
}
261
262
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
263
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
264
{
265
uc->uc_mcontext.regs->nip = pc;
266
}
267
268
-static inline void *host_signal_mask(ucontext_t *uc)
269
+static inline void *host_signal_mask(host_sigcontext *uc)
270
{
271
return &uc->uc_sigmask;
272
}
273
274
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
275
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
276
{
277
return uc->uc_mcontext.regs->trap != 0x400
278
&& (uc->uc_mcontext.regs->dsisr & 0x02000000);
279
diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h
280
index XXXXXXX..XXXXXXX 100644
281
--- a/linux-user/include/host/riscv/host-signal.h
282
+++ b/linux-user/include/host/riscv/host-signal.h
283
@@ -XXX,XX +XXX,XX @@
284
#ifndef RISCV_HOST_SIGNAL_H
285
#define RISCV_HOST_SIGNAL_H
286
287
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
288
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
289
+typedef ucontext_t host_sigcontext;
290
+
291
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
292
{
293
return uc->uc_mcontext.__gregs[REG_PC];
294
}
295
296
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
297
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
298
{
299
uc->uc_mcontext.__gregs[REG_PC] = pc;
300
}
301
302
-static inline void *host_signal_mask(ucontext_t *uc)
303
+static inline void *host_signal_mask(host_sigcontext *uc)
304
{
305
return &uc->uc_sigmask;
306
}
307
308
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
309
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
310
{
311
/*
312
* Detect store by reading the instruction at the program counter.
313
diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h
314
index XXXXXXX..XXXXXXX 100644
315
--- a/linux-user/include/host/s390/host-signal.h
316
+++ b/linux-user/include/host/s390/host-signal.h
317
@@ -XXX,XX +XXX,XX @@
318
#ifndef S390_HOST_SIGNAL_H
319
#define S390_HOST_SIGNAL_H
320
321
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
322
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
323
+typedef ucontext_t host_sigcontext;
324
+
325
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
326
{
327
return uc->uc_mcontext.psw.addr;
328
}
329
330
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
331
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
332
{
333
uc->uc_mcontext.psw.addr = pc;
334
}
335
336
-static inline void *host_signal_mask(ucontext_t *uc)
337
+static inline void *host_signal_mask(host_sigcontext *uc)
338
{
339
return &uc->uc_sigmask;
340
}
341
342
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
343
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
344
{
345
uint16_t *pinsn = (uint16_t *)host_signal_pc(uc);
346
347
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
348
index XXXXXXX..XXXXXXX 100644
349
--- a/linux-user/include/host/sparc/host-signal.h
350
+++ b/linux-user/include/host/sparc/host-signal.h
351
@@ -XXX,XX +XXX,XX @@
352
#ifndef SPARC_HOST_SIGNAL_H
353
#define SPARC_HOST_SIGNAL_H
354
355
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
356
+/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
357
+typedef ucontext_t host_sigcontext;
358
+
359
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
360
{
361
#ifdef __arch64__
362
return uc->uc_mcontext.mc_gregs[MC_PC];
363
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t host_signal_pc(ucontext_t *uc)
364
#endif
365
}
366
367
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
368
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
369
{
370
#ifdef __arch64__
371
uc->uc_mcontext.mc_gregs[MC_PC] = pc;
372
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
373
#endif
374
}
375
376
-static inline void *host_signal_mask(ucontext_t *uc)
377
+static inline void *host_signal_mask(host_sigcontext *uc)
378
{
379
return &uc->uc_sigmask;
380
}
381
382
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
383
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
384
{
385
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
386
387
diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h
388
index XXXXXXX..XXXXXXX 100644
389
--- a/linux-user/include/host/x86_64/host-signal.h
390
+++ b/linux-user/include/host/x86_64/host-signal.h
391
@@ -XXX,XX +XXX,XX @@
392
#ifndef X86_64_HOST_SIGNAL_H
393
#define X86_64_HOST_SIGNAL_H
394
395
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
396
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
397
+typedef ucontext_t host_sigcontext;
398
+
399
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
400
{
401
return uc->uc_mcontext.gregs[REG_RIP];
402
}
403
404
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
405
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
406
{
407
uc->uc_mcontext.gregs[REG_RIP] = pc;
408
}
409
410
-static inline void *host_signal_mask(ucontext_t *uc)
411
+static inline void *host_signal_mask(host_sigcontext *uc)
412
{
413
return &uc->uc_sigmask;
414
}
415
416
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
417
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
418
{
419
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
420
&& (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
421
diff --git a/linux-user/signal.c b/linux-user/signal.c
422
index XXXXXXX..XXXXXXX 100644
423
--- a/linux-user/signal.c
424
+++ b/linux-user/signal.c
425
@@ -XXX,XX +XXX,XX @@ void queue_signal(CPUArchState *env, int sig, int si_type,
426
/* Adjust the signal context to rewind out of safe-syscall if we're in it */
427
static inline void rewind_if_in_safe_syscall(void *puc)
428
{
429
- ucontext_t *uc = (ucontext_t *)puc;
430
+ host_sigcontext *uc = (host_sigcontext *)puc;
431
uintptr_t pcreg = host_signal_pc(uc);
432
433
if (pcreg > (uintptr_t)safe_syscall_start
434
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
435
CPUState *cpu = env_cpu(env);
436
TaskState *ts = cpu->opaque;
437
target_siginfo_t tinfo;
438
- ucontext_t *uc = puc;
439
+ host_sigcontext *uc = puc;
440
struct emulated_sigtable *k;
441
int guest_sig;
442
uintptr_t pc = 0;
25
--
443
--
26
2.25.1
444
2.25.1
27
445
28
446
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
We do not support sparc32 as a host, so there's no point in
2
sparc64 redirecting to sparc.
3
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
7
linux-user/include/host/sparc/host-signal.h | 71 -------------------
6
1 file changed, 31 insertions(+), 25 deletions(-)
8
linux-user/include/host/sparc64/host-signal.h | 64 ++++++++++++++++-
9
2 files changed, 63 insertions(+), 72 deletions(-)
10
delete mode 100644 linux-user/include/host/sparc/host-signal.h
7
11
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
13
deleted file mode 100644
14
index XXXXXXX..XXXXXXX
15
--- a/linux-user/include/host/sparc/host-signal.h
16
+++ /dev/null
17
@@ -XXX,XX +XXX,XX @@
18
-/*
19
- * host-signal.h: signal info dependent on the host architecture
20
- *
21
- * Copyright (c) 2003-2005 Fabrice Bellard
22
- * Copyright (c) 2021 Linaro Limited
23
- *
24
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
25
- * See the COPYING file in the top-level directory.
26
- */
27
-
28
-#ifndef SPARC_HOST_SIGNAL_H
29
-#define SPARC_HOST_SIGNAL_H
30
-
31
-/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
32
-typedef ucontext_t host_sigcontext;
33
-
34
-static inline uintptr_t host_signal_pc(host_sigcontext *uc)
35
-{
36
-#ifdef __arch64__
37
- return uc->uc_mcontext.mc_gregs[MC_PC];
38
-#else
39
- return uc->uc_mcontext.gregs[REG_PC];
40
-#endif
41
-}
42
-
43
-static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
44
-{
45
-#ifdef __arch64__
46
- uc->uc_mcontext.mc_gregs[MC_PC] = pc;
47
-#else
48
- uc->uc_mcontext.gregs[REG_PC] = pc;
49
-#endif
50
-}
51
-
52
-static inline void *host_signal_mask(host_sigcontext *uc)
53
-{
54
- return &uc->uc_sigmask;
55
-}
56
-
57
-static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
58
-{
59
- uint32_t insn = *(uint32_t *)host_signal_pc(uc);
60
-
61
- if ((insn >> 30) == 3) {
62
- switch ((insn >> 19) & 0x3f) {
63
- case 0x05: /* stb */
64
- case 0x15: /* stba */
65
- case 0x06: /* sth */
66
- case 0x16: /* stha */
67
- case 0x04: /* st */
68
- case 0x14: /* sta */
69
- case 0x07: /* std */
70
- case 0x17: /* stda */
71
- case 0x0e: /* stx */
72
- case 0x1e: /* stxa */
73
- case 0x24: /* stf */
74
- case 0x34: /* stfa */
75
- case 0x27: /* stdf */
76
- case 0x37: /* stdfa */
77
- case 0x26: /* stqf */
78
- case 0x36: /* stqfa */
79
- case 0x25: /* stfsr */
80
- case 0x3c: /* casa */
81
- case 0x3e: /* casxa */
82
- return true;
83
- }
84
- }
85
- return false;
86
-}
87
-
88
-#endif
89
diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h
9
index XXXXXXX..XXXXXXX 100644
90
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
91
--- a/linux-user/include/host/sparc64/host-signal.h
11
+++ b/tcg/optimize.c
92
+++ b/linux-user/include/host/sparc64/host-signal.h
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
93
@@ -1 +1,63 @@
13
return true;
94
-#include "../sparc/host-signal.h"
14
}
95
+/*
15
96
+ * host-signal.h: signal info dependent on the host architecture
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
97
+ *
98
+ * Copyright (c) 2003-2005 Fabrice Bellard
99
+ * Copyright (c) 2021 Linaro Limited
100
+ *
101
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
102
+ * See the COPYING file in the top-level directory.
103
+ */
104
+
105
+#ifndef SPARC64_HOST_SIGNAL_H
106
+#define SPARC64_HOST_SIGNAL_H
107
+
108
+/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
109
+typedef ucontext_t host_sigcontext;
110
+
111
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
17
+{
112
+{
18
+ TCGOpcode opc = op->opc;
113
+ return uc->uc_mcontext.mc_gregs[MC_PC];
19
+ TCGCond cond = op->args[5];
114
+}
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
21
+
115
+
22
+ if (i >= 0) {
116
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
117
+{
24
+ }
118
+ uc->uc_mcontext.mc_gregs[MC_PC] = pc;
119
+}
25
+
120
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
121
+static inline void *host_signal_mask(host_sigcontext *uc)
27
+ uint64_t tv = arg_info(op->args[3])->val;
122
+{
28
+ uint64_t fv = arg_info(op->args[4])->val;
123
+ return &uc->uc_sigmask;
124
+}
29
+
125
+
30
+ opc = (opc == INDEX_op_movcond_i32
126
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
127
+{
128
+ uint32_t insn = *(uint32_t *)host_signal_pc(uc);
32
+
129
+
33
+ if (tv == 1 && fv == 0) {
130
+ if ((insn >> 30) == 3) {
34
+ op->opc = opc;
131
+ switch ((insn >> 19) & 0x3f) {
35
+ op->args[3] = cond;
132
+ case 0x05: /* stb */
36
+ } else if (fv == 1 && tv == 0) {
133
+ case 0x15: /* stba */
37
+ op->opc = opc;
134
+ case 0x06: /* sth */
38
+ op->args[3] = tcg_invert_cond(cond);
135
+ case 0x16: /* stha */
136
+ case 0x04: /* st */
137
+ case 0x14: /* sta */
138
+ case 0x07: /* std */
139
+ case 0x17: /* stda */
140
+ case 0x0e: /* stx */
141
+ case 0x1e: /* stxa */
142
+ case 0x24: /* stf */
143
+ case 0x34: /* stfa */
144
+ case 0x27: /* stdf */
145
+ case 0x37: /* stdfa */
146
+ case 0x26: /* stqf */
147
+ case 0x36: /* stqfa */
148
+ case 0x25: /* stfsr */
149
+ case 0x3c: /* casa */
150
+ case 0x3e: /* casxa */
151
+ return true;
39
+ }
152
+ }
40
+ }
153
+ }
41
+ return false;
154
+ return false;
42
+}
155
+}
43
+
156
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
157
+#endif
45
{
46
return fold_const2(ctx, op);
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
49
break;
50
51
- CASE_OP_32_64(movcond):
52
- i = do_constant_folding_cond(opc, op->args[1],
53
- op->args[2], op->args[5]);
54
- if (i >= 0) {
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
56
- continue;
57
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
59
- uint64_t tv = arg_info(op->args[3])->val;
60
- uint64_t fv = arg_info(op->args[4])->val;
61
- TCGCond cond = op->args[5];
62
-
63
- if (fv == 1 && tv == 0) {
64
- cond = tcg_invert_cond(cond);
65
- } else if (!(tv == 1 && fv == 0)) {
66
- break;
67
- }
68
- op->args[3] = cond;
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
70
- ? INDEX_op_setcond_i32
71
- : INDEX_op_setcond_i64);
72
- }
73
- break;
74
-
75
-
76
default:
77
break;
78
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
case INDEX_op_mb:
81
done = fold_mb(&ctx, op);
82
break;
83
+ CASE_OP_32_64(movcond):
84
+ done = fold_movcond(&ctx, op);
85
+ break;
86
CASE_OP_32_64(mul):
87
done = fold_mul(&ctx, op);
88
break;
89
--
158
--
90
2.25.1
159
2.25.1
91
160
92
161
diff view generated by jsdifflib
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
1
Sparc64 is unique on linux in *not* passing ucontext_t as
2
Bitwise operations preserve the relative quantity of the repetitions.
2
the third argument to a SA_SIGINFO handler. It passes the
3
old struct sigcontext instead.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Set both pc and npc in host_signal_set_pc.
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Fixes: 8b5bd461935b ("linux-user/host/sparc: Populate host_signal.h")
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
10
---
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
11
linux-user/include/host/sparc64/host-signal.h | 17 +++++++++--------
10
1 file changed, 29 insertions(+)
12
1 file changed, 9 insertions(+), 8 deletions(-)
11
13
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
16
--- a/linux-user/include/host/sparc64/host-signal.h
15
+++ b/tcg/optimize.c
17
+++ b/linux-user/include/host/sparc64/host-signal.h
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
18
@@ -XXX,XX +XXX,XX @@
17
z2 = arg_info(op->args[2])->z_mask;
19
#ifndef SPARC64_HOST_SIGNAL_H
18
ctx->z_mask = z1 & z2;
20
#define SPARC64_HOST_SIGNAL_H
19
21
20
+ /*
22
-/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
23
-typedef ucontext_t host_sigcontext;
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
24
+/* The third argument to a SA_SIGINFO handler is struct sigcontext. */
23
+ */
25
+typedef struct sigcontext host_sigcontext;
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
26
25
+ & arg_info(op->args[2])->s_mask;
27
-static inline uintptr_t host_signal_pc(host_sigcontext *uc)
26
+
28
+static inline uintptr_t host_signal_pc(host_sigcontext *sc)
27
/*
29
{
28
* Known-zeros does not imply known-ones. Therefore unless
30
- return uc->uc_mcontext.mc_gregs[MC_PC];
29
* arg2 is constant, we can't infer affected bits from it.
31
+ return sc->sigc_regs.tpc;
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
31
}
32
ctx->z_mask = z1;
33
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
35
+ & arg_info(op->args[2])->s_mask;
36
return fold_masks(ctx, op);
37
}
32
}
38
33
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
34
-static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
40
fold_xi_to_not(ctx, op, 0)) {
35
+static inline void host_signal_set_pc(host_sigcontext *sc, uintptr_t pc)
41
return true;
36
{
42
}
37
- uc->uc_mcontext.mc_gregs[MC_PC] = pc;
43
+
38
+ sc->sigc_regs.tpc = pc;
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
39
+ sc->sigc_regs.tnpc = pc + 4;
45
+ & arg_info(op->args[2])->s_mask;
46
return false;
47
}
40
}
48
41
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
42
-static inline void *host_signal_mask(host_sigcontext *uc)
50
43
+static inline void *host_signal_mask(host_sigcontext *sc)
51
ctx->z_mask = arg_info(op->args[3])->z_mask
44
{
52
| arg_info(op->args[4])->z_mask;
45
- return &uc->uc_sigmask;
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
46
+ return &sc->sigc_mask;
54
+ & arg_info(op->args[4])->s_mask;
55
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
57
uint64_t tv = arg_info(op->args[3])->val;
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
59
fold_xi_to_not(ctx, op, -1)) {
60
return true;
61
}
62
+
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
64
+ & arg_info(op->args[2])->s_mask;
65
return false;
66
}
47
}
67
48
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
49
static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
69
fold_xi_to_not(ctx, op, 0)) {
70
return true;
71
}
72
+
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
74
+ & arg_info(op->args[2])->s_mask;
75
return false;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
79
return true;
80
}
81
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
83
+
84
/* Because of fold_to_not, we want to always return true, via finish. */
85
finish_folding(ctx, op);
86
return true;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
88
89
ctx->z_mask = arg_info(op->args[1])->z_mask
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
94
}
95
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
97
fold_ix_to_not(ctx, op, 0)) {
98
return true;
99
}
100
+
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
102
+ & arg_info(op->args[2])->s_mask;
103
return false;
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
107
108
ctx->z_mask = arg_info(op->args[1])->z_mask
109
| arg_info(op->args[2])->z_mask;
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
111
+ & arg_info(op->args[2])->s_mask;
112
return fold_masks(ctx, op);
113
}
114
115
--
50
--
116
2.25.1
51
2.25.1
117
52
118
53
diff view generated by jsdifflib
1
For constant shifts, we can simply shift the s_mask.
1
From: Idan Horowitz <idan.horowitz@gmail.com>
2
2
3
For variable shifts, we know that sar does not reduce
3
When the length of the range is large enough, clearing the whole cache is
4
the s_mask, which helps for sequences like
4
faster than iterating over the (possibly extremely large) set of pages
5
contained in the range.
5
6
6
ext32s_i64 t, in
7
This mimics the pre-existing similar optimization done on the flush of the
7
sar_i64 t, t, v
8
tlb itself.
8
ext32s_i64 out, t
9
9
10
allowing the final extend to be eliminated.
10
Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com>
11
11
Message-Id: <20220110164754.1066025-1-idan.horowitz@gmail.com>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
14
---
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
15
accel/tcg/cputlb.c | 9 +++++++++
17
1 file changed, 47 insertions(+), 3 deletions(-)
16
1 file changed, 9 insertions(+)
18
17
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/optimize.c
20
--- a/accel/tcg/cputlb.c
22
+++ b/tcg/optimize.c
21
+++ b/accel/tcg/cputlb.c
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
22
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
24
return ~(~0ull >> rep);
25
}
26
27
+/*
28
+ * Recreate a properly left-aligned smask after manipulation.
29
+ * Some bit-shuffling, particularly shifts and rotates, may
30
+ * retain sign bits on the left, but may scatter disconnected
31
+ * sign bits on the right. Retain only what remains to the left.
32
+ */
33
+static uint64_t smask_from_smask(int64_t smask)
34
+{
35
+ /* Only the 1 bits are significant for smask */
36
+ return smask_from_zmask(~smask);
37
+}
38
+
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
40
{
41
return ts->state_ptr;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
43
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
45
{
46
+ uint64_t s_mask, z_mask, sign;
47
+
48
if (fold_const2(ctx, op) ||
49
fold_ix_to_i(ctx, op, 0) ||
50
fold_xi_to_x(ctx, op, 0)) {
51
return true;
52
}
23
}
53
24
qemu_spin_unlock(&env_tlb(env)->c.lock);
54
+ s_mask = arg_info(op->args[1])->s_mask;
25
55
+ z_mask = arg_info(op->args[1])->z_mask;
26
+ /*
56
+
27
+ * If the length is larger than the jump cache size, then it will take
57
if (arg_is_const(op->args[2])) {
28
+ * longer to clear each entry individually than it will to clear it all.
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
29
+ */
59
- arg_info(op->args[1])->z_mask,
30
+ if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
60
- arg_info(op->args[2])->val);
31
+ cpu_tb_jmp_cache_clear(cpu);
61
+ int sh = arg_info(op->args[2])->val;
32
+ return;
62
+
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
64
+
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
66
+ ctx->s_mask = smask_from_smask(s_mask);
67
+
68
return fold_masks(ctx, op);
69
}
70
+
71
+ switch (op->opc) {
72
+ CASE_OP_32_64(sar):
73
+ /*
74
+ * Arithmetic right shift will not reduce the number of
75
+ * input sign repetitions.
76
+ */
77
+ ctx->s_mask = s_mask;
78
+ break;
79
+ CASE_OP_32_64(shr):
80
+ /*
81
+ * If the sign bit is known zero, then logical right shift
82
+ * will not reduced the number of input sign repetitions.
83
+ */
84
+ sign = (s_mask & -s_mask) >> 1;
85
+ if (!(z_mask & sign)) {
86
+ ctx->s_mask = s_mask;
87
+ }
88
+ break;
89
+ default:
90
+ break;
91
+ }
33
+ }
92
+
34
+
93
return false;
35
for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
94
}
36
tb_flush_jmp_cache(cpu, d.addr + i);
95
37
}
96
--
38
--
97
2.25.1
39
2.25.1
98
40
99
41
diff view generated by jsdifflib
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
1
From: Idan Horowitz <idan.horowitz@gmail.com>
2
2
3
Addition of not and xor on 128-bit integers.
3
Instead of taking the lock of the cpu work list in order to check if it's
4
empty, we can just read the head pointer atomically. This decreases
5
cpu_work_list_empty's share from 5% to 1.3% in a profile of icount-enabled
6
aarch64-softmmu.
4
7
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
8
Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
9
Message-Id: <20220114004358.299534-1-idan.horowitz@gmail.com>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
---
12
include/qemu/int128.h | 20 ++++++++++++++++++++
13
softmmu/cpus.c | 7 +------
13
1 file changed, 20 insertions(+)
14
1 file changed, 1 insertion(+), 6 deletions(-)
14
15
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
16
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/int128.h
18
--- a/softmmu/cpus.c
18
+++ b/include/qemu/int128.h
19
+++ b/softmmu/cpus.c
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
20
@@ -XXX,XX +XXX,XX @@ bool cpu_is_stopped(CPUState *cpu)
20
return a;
21
22
bool cpu_work_list_empty(CPUState *cpu)
23
{
24
- bool ret;
25
-
26
- qemu_mutex_lock(&cpu->work_mutex);
27
- ret = QSIMPLEQ_EMPTY(&cpu->work_list);
28
- qemu_mutex_unlock(&cpu->work_mutex);
29
- return ret;
30
+ return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
21
}
31
}
22
32
23
+static inline Int128 int128_not(Int128 a)
33
bool cpu_thread_is_idle(CPUState *cpu)
24
+{
25
+ return ~a;
26
+}
27
+
28
static inline Int128 int128_and(Int128 a, Int128 b)
29
{
30
return a & b;
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
32
return a | b;
33
}
34
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
36
+{
37
+ return a ^ b;
38
+}
39
+
40
static inline Int128 int128_rshift(Int128 a, int n)
41
{
42
return a >> n;
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
44
return int128_make128(a, (a < 0) ? -1 : 0);
45
}
46
47
+static inline Int128 int128_not(Int128 a)
48
+{
49
+ return int128_make128(~a.lo, ~a.hi);
50
+}
51
+
52
static inline Int128 int128_and(Int128 a, Int128 b)
53
{
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
57
}
58
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
60
+{
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
62
+}
63
+
64
static inline Int128 int128_rshift(Int128 a, int n)
65
{
66
int64_t h;
67
--
34
--
68
2.25.1
35
2.25.1
69
36
70
37
diff view generated by jsdifflib
1
The results are generally 6 bit unsigned values, though
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
2
the count leading and trailing bits may produce any value
3
for a zero input.
4
2
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Commit aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4 introduced CF_NOIRQ usage,
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
but one case was forgotten. Record/replay uses one special TB which is not
5
really executed, but used to cause a correct exception in replay mode.
6
This patch adds CF_NOIRQ flag for such block.
7
8
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <164362834054.1754532.7678416881159817273.stgit@pasha-ThinkPad-X280>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
12
---
9
tcg/optimize.c | 3 ++-
13
accel/tcg/cpu-exec.c | 3 ++-
10
1 file changed, 2 insertions(+), 1 deletion(-)
14
1 file changed, 2 insertions(+), 1 deletion(-)
11
15
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
13
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
18
--- a/accel/tcg/cpu-exec.c
15
+++ b/tcg/optimize.c
19
+++ b/accel/tcg/cpu-exec.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
17
g_assert_not_reached();
21
if (replay_has_exception()
18
}
22
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
23
/* Execute just one insn to trigger exception pending in the log */
20
-
24
- cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
25
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
22
return false;
26
+ | CF_NOIRQ | 1;
23
}
27
}
24
28
#endif
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
return false;
26
default:
27
g_assert_not_reached();
28
}
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
30
return false;
31
}
32
33
--
30
--
34
2.25.1
31
2.25.1
35
32
36
33
diff view generated by jsdifflib
1
Recognize the constant function for remainder.
1
From: WANG Xuerui <git@xen0n.name>
2
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
3
Apparently we were left behind; just renaming MO_Q to MO_UQ is enough.
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
5
Fixes: fc313c64345453c7 ("exec/memop: Adding signedness to quad definitions")
6
Signed-off-by: WANG Xuerui <git@xen0n.name>
7
Message-Id: <20220206162106.1092364-1-i.qemu@xen0n.name>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
9
---
7
tcg/optimize.c | 6 +++++-
10
tcg/loongarch64/tcg-target.c.inc | 2 +-
8
1 file changed, 5 insertions(+), 1 deletion(-)
11
1 file changed, 1 insertion(+), 1 deletion(-)
9
12
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
15
--- a/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
16
+++ b/tcg/loongarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
15
18
case MO_SL:
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
19
tcg_out_opc_ldx_w(s, rd, rj, rk);
17
{
20
break;
18
- return fold_const2(ctx, op);
21
- case MO_Q:
19
+ if (fold_const2(ctx, op) ||
22
+ case MO_UQ:
20
+ fold_xx_to_i(ctx, op, 0)) {
23
tcg_out_opc_ldx_d(s, rd, rj, rk);
21
+ return true;
24
break;
22
+ }
25
default:
23
+ return false;
24
}
25
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
27
--
26
--
28
2.25.1
27
2.25.1
29
28
30
29
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
so it can be reused by divu128().
5
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
3
---
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
4
tcg/i386/tcg-target.h | 2 -
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
5
tcg/i386/tcg-target.c.inc | 103 ++++++++++++++++++++++++++++++++++++--
13
2 files changed, 81 insertions(+), 82 deletions(-)
6
2 files changed, 98 insertions(+), 7 deletions(-)
14
7
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
8
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
16
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
17
--- a/include/fpu/softfloat-macros.h
10
--- a/tcg/i386/tcg-target.h
18
+++ b/include/fpu/softfloat-macros.h
11
+++ b/tcg/i386/tcg-target.h
19
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
20
* so some portions are provided under:
13
21
* the SoftFloat-2a license
14
#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe
22
* the BSD license
15
23
- * GPL-v2-or-later
16
-#ifdef CONFIG_SOFTMMU
24
*
17
#define TCG_TARGET_NEED_LDST_LABELS
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
32
- * version 2 or later. See the COPYING file in the top-level directory.
33
- */
34
-
35
#ifndef FPU_SOFTFLOAT_MACROS_H
36
#define FPU_SOFTFLOAT_MACROS_H
37
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
39
40
}
41
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
44
- *
45
- * Licensed under the GPLv2/LGPLv3
46
- */
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
48
- uint64_t n0, uint64_t d)
49
-{
50
-#if defined(__x86_64__)
51
- uint64_t q;
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
53
- return q;
54
-#elif defined(__s390x__) && !defined(__clang__)
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
58
- *r = n >> 64;
59
- return n;
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
61
- /* From Power ISA 2.06, programming note for divdeu. */
62
- uint64_t q1, q2, Q, r1, r2, R;
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
64
- : "=&r"(q1), "=r"(q2)
65
- : "r"(n1), "r"(n0), "r"(d));
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
67
- r2 = n0 - (q2 * d);
68
- Q = q1 + q2;
69
- R = r1 + r2;
70
- if (R >= d || R < r2) { /* overflow implies R > d */
71
- Q += 1;
72
- R -= d;
73
- }
74
- *r = R;
75
- return Q;
76
-#else
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
78
-
79
- d0 = (uint32_t)d;
80
- d1 = d >> 32;
81
-
82
- r1 = n1 % d1;
83
- q1 = n1 / d1;
84
- m = q1 * d0;
85
- r1 = (r1 << 32) | (n0 >> 32);
86
- if (r1 < m) {
87
- q1 -= 1;
88
- r1 += d;
89
- if (r1 >= d) {
90
- if (r1 < m) {
91
- q1 -= 1;
92
- r1 += d;
93
- }
94
- }
95
- }
96
- r1 -= m;
97
-
98
- r0 = r1 % d1;
99
- q0 = r1 / d1;
100
- m = q0 * d0;
101
- r0 = (r0 << 32) | (uint32_t)n0;
102
- if (r0 < m) {
103
- q0 -= 1;
104
- r0 += d;
105
- if (r0 >= d) {
106
- if (r0 < m) {
107
- q0 -= 1;
108
- r0 += d;
109
- }
110
- }
111
- }
112
- r0 -= m;
113
-
114
- *r = r0;
115
- return (q1 << 32) | q0;
116
-#endif
18
-#endif
117
-}
19
#define TCG_TARGET_NEED_POOL_LABELS
118
-
20
119
/*----------------------------------------------------------------------------
21
#endif
120
| Returns an approximation to the square root of the 32-bit significand given
22
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
123
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
124
--- a/include/qemu/host-utils.h
24
--- a/tcg/i386/tcg-target.c.inc
125
+++ b/include/qemu/host-utils.h
25
+++ b/tcg/i386/tcg-target.c.inc
126
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@
127
* THE SOFTWARE.
27
* THE SOFTWARE.
128
*/
28
*/
129
29
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
30
+#include "../tcg-ldst.c.inc"
131
+ * version 2 or later. See the COPYING file in the top-level directory.
31
#include "../tcg-pool.c.inc"
132
+ */
32
133
+
33
#ifdef CONFIG_DEBUG_TCG
134
#ifndef HOST_UTILS_H
34
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
135
#define HOST_UTILS_H
35
#define OPC_VZEROUPPER (0x77 | P_EXT)
136
36
#define OPC_XCHG_ax_r32    (0x90)
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
37
38
-#define OPC_GRP3_Ev    (0xf7)
39
-#define OPC_GRP5    (0xff)
40
+#define OPC_GRP3_Eb (0xf6)
41
+#define OPC_GRP3_Ev (0xf7)
42
+#define OPC_GRP5 (0xff)
43
#define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
44
45
/* Group 1 opcode extensions for 0x80-0x83.
46
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
47
#define SHIFT_SAR 7
48
49
/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
50
+#define EXT3_TESTi 0
51
#define EXT3_NOT 2
52
#define EXT3_NEG 3
53
#define EXT3_MUL 4
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
55
}
56
57
#if defined(CONFIG_SOFTMMU)
58
-#include "../tcg-ldst.c.inc"
59
-
60
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
61
* int mmu_idx, uintptr_t ra)
138
*/
62
*/
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
63
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
140
64
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
65
return true;
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
66
}
143
+ *
67
-#elif TCG_TARGET_REG_BITS == 32
144
+ * Licensed under the GPLv2/LGPLv3
145
+ */
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
147
+ uint64_t n0, uint64_t d)
148
+{
149
+#if defined(__x86_64__)
150
+ uint64_t q;
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
152
+ return q;
153
+#elif defined(__s390x__) && !defined(__clang__)
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
157
+ *r = n >> 64;
158
+ return n;
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
160
+ /* From Power ISA 2.06, programming note for divdeu. */
161
+ uint64_t q1, q2, Q, r1, r2, R;
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
163
+ : "=&r"(q1), "=r"(q2)
164
+ : "r"(n1), "r"(n0), "r"(d));
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
166
+ r2 = n0 - (q2 * d);
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
172
+ }
173
+ *r = R;
174
+ return Q;
175
+#else
68
+#else
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
69
+
177
+
70
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
178
+ d0 = (uint32_t)d;
71
+ TCGReg addrhi, unsigned a_bits)
179
+ d1 = d >> 32;
72
+{
180
+
73
+ unsigned a_mask = (1 << a_bits) - 1;
181
+ r1 = n1 % d1;
74
+ TCGLabelQemuLdst *label;
182
+ q1 = n1 / d1;
75
+
183
+ m = q1 * d0;
76
+ /*
184
+ r1 = (r1 << 32) | (n0 >> 32);
77
+ * We are expecting a_bits to max out at 7, so we can usually use testb.
185
+ if (r1 < m) {
78
+ * For i686, we have to use testl for %esi/%edi.
186
+ q1 -= 1;
79
+ */
187
+ r1 += d;
80
+ if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
188
+ if (r1 >= d) {
81
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
189
+ if (r1 < m) {
82
+ tcg_out8(s, a_mask);
190
+ q1 -= 1;
83
+ } else {
191
+ r1 += d;
84
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
192
+ }
85
+ tcg_out32(s, a_mask);
86
+ }
87
+
88
+ /* jne slow_path */
89
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
90
+
91
+ label = new_ldst_label(s);
92
+ label->is_ld = is_ld;
93
+ label->addrlo_reg = addrlo;
94
+ label->addrhi_reg = addrhi;
95
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4);
96
+ label->label_ptr[0] = s->code_ptr;
97
+
98
+ s->code_ptr += 4;
99
+}
100
+
101
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
102
+{
103
+ /* resolve label address */
104
+ tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
105
+
106
+ if (TCG_TARGET_REG_BITS == 32) {
107
+ int ofs = 0;
108
+
109
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
110
+ ofs += 4;
111
+
112
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
113
+ ofs += 4;
114
+ if (TARGET_LONG_BITS == 64) {
115
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
116
+ ofs += 4;
193
+ }
117
+ }
194
+ }
118
+
195
+ r1 -= m;
119
+ tcg_out_pushi(s, (uintptr_t)l->raddr);
196
+
120
+ } else {
197
+ r0 = r1 % d1;
121
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
198
+ q0 = r1 / d1;
122
+ l->addrlo_reg);
199
+ m = q0 * d0;
123
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
200
+ r0 = (r0 << 32) | (uint32_t)n0;
124
+
201
+ if (r0 < m) {
125
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
202
+ q0 -= 1;
126
+ tcg_out_push(s, TCG_REG_RAX);
203
+ r0 += d;
127
+ }
204
+ if (r0 >= d) {
128
+
205
+ if (r0 < m) {
129
+ /* "Tail call" to the helper, with the return address back inline. */
206
+ q0 -= 1;
130
+ tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
207
+ r0 += d;
131
+ : helper_unaligned_st));
208
+ }
132
+ return true;
209
+ }
133
+}
210
+ }
134
+
211
+ r0 -= m;
135
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
212
+
136
+{
213
+ *r = r0;
137
+ return tcg_out_fail_alignment(s, l);
214
+ return (q1 << 32) | q0;
138
+}
139
+
140
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
141
+{
142
+ return tcg_out_fail_alignment(s, l);
143
+}
144
+
145
+#if TCG_TARGET_REG_BITS == 32
146
# define x86_guest_base_seg 0
147
# define x86_guest_base_index -1
148
# define x86_guest_base_offset guest_base
149
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
150
return 0;
151
}
152
# endif
215
+#endif
153
+#endif
216
+}
154
#endif /* SOFTMMU */
217
+
155
156
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
158
#if defined(CONFIG_SOFTMMU)
159
int mem_index;
160
tcg_insn_unit *label_ptr[2];
161
+#else
162
+ unsigned a_bits;
163
#endif
164
165
datalo = *args++;
166
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
167
add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
168
s->code_ptr, label_ptr);
169
#else
170
+ a_bits = get_alignment_bits(opc);
171
+ if (a_bits) {
172
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
173
+ }
174
+
175
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
176
x86_guest_base_offset, x86_guest_base_seg,
177
is64, opc);
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
179
#if defined(CONFIG_SOFTMMU)
180
int mem_index;
181
tcg_insn_unit *label_ptr[2];
182
+#else
183
+ unsigned a_bits;
184
#endif
185
186
datalo = *args++;
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
188
add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
189
s->code_ptr, label_ptr);
190
#else
191
+ a_bits = get_alignment_bits(opc);
192
+ if (a_bits) {
193
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
194
+ }
195
+
196
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
197
x86_guest_base_offset, x86_guest_base_seg, opc);
218
#endif
198
#endif
219
--
199
--
220
2.25.1
200
2.25.1
221
201
222
202
diff view generated by jsdifflib
1
Provide what will become a larger context for splitting
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
the very large tcg_optimize function.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
3
---
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
4
tcg/aarch64/tcg-target.h | 2 -
10
1 file changed, 40 insertions(+), 37 deletions(-)
5
tcg/aarch64/tcg-target.c.inc | 91 +++++++++++++++++++++++++++++-------
6
2 files changed, 74 insertions(+), 19 deletions(-)
11
7
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
10
--- a/tcg/aarch64/tcg-target.h
15
+++ b/tcg/optimize.c
11
+++ b/tcg/aarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
13
18
} TempOptInfo;
14
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
19
15
20
+typedef struct OptContext {
16
-#ifdef CONFIG_SOFTMMU
21
+ TCGTempSet temps_used;
17
#define TCG_TARGET_NEED_LDST_LABELS
22
+} OptContext;
18
-#endif
23
+
19
#define TCG_TARGET_NEED_POOL_LABELS
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
20
25
{
21
#endif /* AARCH64_TCG_TARGET_H */
26
return ts->state_ptr;
22
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
23
index XXXXXXX..XXXXXXX 100644
28
}
24
--- a/tcg/aarch64/tcg-target.c.inc
29
25
+++ b/tcg/aarch64/tcg-target.c.inc
30
/* Initialize and activate a temporary. */
26
@@ -XXX,XX +XXX,XX @@
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
27
* See the COPYING file in the top-level directory for details.
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
28
*/
33
{
29
34
size_t idx = temp_idx(ts);
30
+#include "../tcg-ldst.c.inc"
35
TempOptInfo *ti;
31
#include "../tcg-pool.c.inc"
36
32
#include "qemu/bitops.h"
37
- if (test_bit(idx, temps_used->l)) {
33
38
+ if (test_bit(idx, ctx->temps_used.l)) {
34
@@ -XXX,XX +XXX,XX @@ typedef enum {
39
return;
35
I3404_ANDI = 0x12000000,
40
}
36
I3404_ORRI = 0x32000000,
41
- set_bit(idx, temps_used->l);
37
I3404_EORI = 0x52000000,
42
+ set_bit(idx, ctx->temps_used.l);
38
+ I3404_ANDSI = 0x72000000,
43
39
44
ti = ts->state_ptr;
40
/* Move wide immediate instructions. */
45
if (ti == NULL) {
41
I3405_MOVN = 0x12800000,
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
43
if (offset == sextract64(offset, 0, 26)) {
44
tcg_out_insn(s, 3206, B, offset);
45
} else {
46
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
47
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
48
+ /* Choose X9 as a call-clobbered non-LR temporary. */
49
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
50
+ tcg_out_insn(s, 3207, BR, TCG_REG_X9);
47
}
51
}
48
}
52
}
49
53
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
52
{
53
- init_ts_info(temps_used, arg_temp(arg));
54
+ init_ts_info(ctx, arg_temp(arg));
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
59
}
55
}
60
}
56
}
61
57
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
58
-#ifdef CONFIG_SOFTMMU
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
59
-#include "../tcg-ldst.c.inc"
64
TCGOp *op, TCGArg dst, uint64_t val)
60
+static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
65
{
61
+{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
62
+ ptrdiff_t offset = tcg_pcrel_diff(s, target);
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
68
64
+ tcg_out_insn(s, 3406, ADR, rd, offset);
69
/* Convert movi to mov with constant temp. */
65
+}
70
tv = tcg_constant_internal(type, val);
66
71
- init_ts_info(temps_used, tv);
67
+#ifdef CONFIG_SOFTMMU
72
+ init_ts_info(ctx, tv);
68
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
69
* MemOpIdx oi, uintptr_t ra)
70
*/
71
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
72
#endif
73
};
74
75
-static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
76
-{
77
- ptrdiff_t offset = tcg_pcrel_diff(s, target);
78
- tcg_debug_assert(offset == sextract64(offset, 0, 21));
79
- tcg_out_insn(s, 3406, ADR, rd, offset);
80
-}
81
-
82
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
83
{
84
MemOpIdx oi = lb->oi;
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
86
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
74
}
87
}
75
88
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
+#else
77
{
90
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
78
int nb_temps, nb_globals, i;
91
+ unsigned a_bits)
79
TCGOp *op, *op_next, *prev_mb = NULL;
92
+{
80
- TCGTempSet temps_used;
93
+ unsigned a_mask = (1 << a_bits) - 1;
81
+ OptContext ctx = {};
94
+ TCGLabelQemuLdst *label = new_ldst_label(s);
82
95
+
83
/* Array VALS has an element for each temp.
96
+ label->is_ld = is_ld;
84
If this temp holds a constant then its value is kept in VALS' element.
97
+ label->addrlo_reg = addr_reg;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
+
86
nb_temps = s->nb_temps;
99
+ /* tst addr, #mask */
87
nb_globals = s->nb_globals;
100
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
88
101
+
89
- memset(&temps_used, 0, sizeof(temps_used));
102
+ label->label_ptr[0] = s->code_ptr;
90
for (i = 0; i < nb_temps; ++i) {
103
+
91
s->temps[i].state_ptr = NULL;
104
+ /* b.ne slow_path */
92
}
105
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
106
+
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
107
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
95
TCGTemp *ts = arg_temp(op->args[i]);
108
+}
96
if (ts) {
109
+
97
- init_ts_info(&temps_used, ts);
110
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
98
+ init_ts_info(&ctx, ts);
111
+{
99
}
112
+ if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
100
}
113
+ return false;
101
} else {
114
+ }
102
nb_oargs = def->nb_oargs;
115
+
103
nb_iargs = def->nb_iargs;
116
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg);
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
117
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
105
- init_arg_info(&temps_used, op->args[i]);
118
+
106
+ init_arg_info(&ctx, op->args[i]);
119
+ /* "Tail call" to the helper, with the return address back inline. */
107
}
120
+ tcg_out_adr(s, TCG_REG_LR, l->raddr);
108
}
121
+ tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld
109
122
+ : helper_unaligned_st));
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
+ return true;
111
CASE_OP_32_64(rotr):
124
+}
112
if (arg_is_const(op->args[1])
125
+
113
&& arg_info(op->args[1])->val == 0) {
126
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
127
+{
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
128
+ return tcg_out_fail_alignment(s, l);
116
continue;
129
+}
117
}
130
+
118
break;
131
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
132
+{
120
133
+ return tcg_out_fail_alignment(s, l);
121
if (partmask == 0) {
134
+}
122
tcg_debug_assert(nb_oargs == 1);
135
#endif /* CONFIG_SOFTMMU */
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
136
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
137
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
125
continue;
138
TCGReg data_r, TCGReg addr_r,
126
}
139
TCGType otype, TCGReg off_r)
127
if (affected == 0) {
140
{
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
141
- /* Byte swapping is left to middle-end expansion. */
129
CASE_OP_32_64(mulsh):
142
- tcg_debug_assert((memop & MO_BSWAP) == 0);
130
if (arg_is_const(op->args[2])
143
-
131
&& arg_info(op->args[2])->val == 0) {
144
switch (memop & MO_SSIZE) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
145
case MO_UB:
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
146
tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
134
continue;
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
135
}
148
TCGReg data_r, TCGReg addr_r,
136
break;
149
TCGType otype, TCGReg off_r)
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
150
{
138
CASE_OP_32_64_VEC(sub):
151
- /* Byte swapping is left to middle-end expansion. */
139
CASE_OP_32_64_VEC(xor):
152
- tcg_debug_assert((memop & MO_BSWAP) == 0);
140
if (args_are_copies(op->args[1], op->args[2])) {
153
-
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
154
switch (memop & MO_SIZE) {
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
155
case MO_8:
143
continue;
156
tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
144
}
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
145
break;
158
{
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
159
MemOp memop = get_memop(oi);
147
if (arg_is_const(op->args[1])) {
160
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
148
tmp = arg_info(op->args[1])->val;
161
+
149
tmp = dup_const(TCGOP_VECE(op), tmp);
162
+ /* Byte swapping is left to middle-end expansion. */
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
163
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
164
+
152
break;
165
#ifdef CONFIG_SOFTMMU
153
}
166
unsigned mem_index = get_mmuidx(oi);
154
goto do_default;
167
tcg_insn_unit *label_ptr;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
156
case INDEX_op_dup2_vec:
169
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
157
assert(TCG_TARGET_REG_BITS == 32);
170
s->code_ptr, label_ptr);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
171
#else /* !CONFIG_SOFTMMU */
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
172
+ unsigned a_bits = get_alignment_bits(memop);
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
173
+ if (a_bits) {
161
deposit64(arg_info(op->args[1])->val, 32, 32,
174
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
162
arg_info(op->args[2])->val));
175
+ }
163
break;
176
if (USE_GUEST_BASE) {
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
177
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
165
case INDEX_op_extrh_i64_i32:
178
TCG_REG_GUEST_BASE, otype, addr_reg);
166
if (arg_is_const(op->args[1])) {
179
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
180
{
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
181
MemOp memop = get_memop(oi);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
182
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
170
break;
183
+
171
}
184
+ /* Byte swapping is left to middle-end expansion. */
172
goto do_default;
185
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
+
174
if (arg_is_const(op->args[1])) {
187
#ifdef CONFIG_SOFTMMU
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
188
unsigned mem_index = get_mmuidx(oi);
176
op->args[2]);
189
tcg_insn_unit *label_ptr;
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
191
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
179
break;
192
data_reg, addr_reg, s->code_ptr, label_ptr);
180
}
193
#else /* !CONFIG_SOFTMMU */
181
goto do_default;
194
+ unsigned a_bits = get_alignment_bits(memop);
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
195
+ if (a_bits) {
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
196
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
197
+ }
185
arg_info(op->args[2])->val);
198
if (USE_GUEST_BASE) {
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
199
tcg_out_qemu_st_direct(s, memop, data_reg,
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
200
TCG_REG_GUEST_BASE, otype, addr_reg);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
330
--
201
--
331
2.25.1
202
2.25.1
332
203
333
204
diff view generated by jsdifflib
1
Calls are special in that they have a variable number
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
of arguments, and need to be able to clobber globals.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
4
tcg/ppc/tcg-target.h | 2 -
9
1 file changed, 41 insertions(+), 22 deletions(-)
5
tcg/ppc/tcg-target.c.inc | 98 ++++++++++++++++++++++++++++++++++++----
6
2 files changed, 90 insertions(+), 10 deletions(-)
10
7
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
10
--- a/tcg/ppc/tcg-target.h
14
+++ b/tcg/optimize.c
11
+++ b/tcg/ppc/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
12
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
#define TCG_TARGET_DEFAULT_MO (0)
14
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/ppc/tcg-target.c.inc
25
+++ b/tcg/ppc/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
28
#include "elf.h"
29
#include "../tcg-pool.c.inc"
30
+#include "../tcg-ldst.c.inc"
31
32
/*
33
* Standardize on the _CALL_FOO symbols used by GCC:
34
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
16
}
35
}
17
}
36
}
18
37
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
38
-static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
20
+{
39
+static void tcg_out_call_int(TCGContext *s, int lk,
21
+ TCGContext *s = ctx->tcg;
40
+ const tcg_insn_unit *target)
22
+ int nb_oargs = TCGOP_CALLO(op);
41
{
23
+ int nb_iargs = TCGOP_CALLI(op);
42
#ifdef _CALL_AIX
24
+ int flags, i;
43
/* Look through the descriptor. If the branch is in range, and we
25
+
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
45
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
46
if (in_range_b(diff) && toc == (uint32_t)toc) {
28
+
47
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
29
+ /* If the function reads or writes globals, reset temp data. */
48
- tcg_out_b(s, LK, tgt);
30
+ flags = tcg_call_flags(op);
49
+ tcg_out_b(s, lk, tgt);
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
50
} else {
32
+ int nb_globals = s->nb_globals;
51
/* Fold the low bits of the constant into the addresses below. */
33
+
52
intptr_t arg = (intptr_t)target;
34
+ for (i = 0; i < nb_globals; i++) {
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
35
+ if (test_bit(i, ctx->temps_used.l)) {
54
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
36
+ reset_ts(&ctx->tcg->temps[i]);
55
tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
37
+ }
56
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
57
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
58
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
59
}
60
#elif defined(_CALL_ELF) && _CALL_ELF == 2
61
intptr_t diff;
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
63
64
diff = tcg_pcrel_diff(s, target);
65
if (in_range_b(diff)) {
66
- tcg_out_b(s, LK, target);
67
+ tcg_out_b(s, lk, target);
68
} else {
69
tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
70
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
71
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
72
}
73
#else
74
- tcg_out_b(s, LK, target);
75
+ tcg_out_b(s, lk, target);
76
#endif
77
}
78
79
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
80
+{
81
+ tcg_out_call_int(s, LK, target);
82
+}
83
+
84
static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
85
[MO_UB] = LBZX,
86
[MO_UW] = LHZX,
87
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
88
};
89
90
#if defined (CONFIG_SOFTMMU)
91
-#include "../tcg-ldst.c.inc"
92
-
93
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
94
* int mmu_idx, uintptr_t ra)
95
*/
96
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
97
tcg_out_b(s, 0, lb->raddr);
98
return true;
99
}
100
+#else
101
+
102
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
103
+ TCGReg addrhi, unsigned a_bits)
104
+{
105
+ unsigned a_mask = (1 << a_bits) - 1;
106
+ TCGLabelQemuLdst *label = new_ldst_label(s);
107
+
108
+ label->is_ld = is_ld;
109
+ label->addrlo_reg = addrlo;
110
+ label->addrhi_reg = addrhi;
111
+
112
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
113
+ tcg_debug_assert(a_bits < 16);
114
+ tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
115
+
116
+ label->label_ptr[0] = s->code_ptr;
117
+ tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
118
+
119
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
120
+}
121
+
122
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
123
+{
124
+ if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
125
+ return false;
126
+ }
127
+
128
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
129
+ TCGReg arg = TCG_REG_R4;
130
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
131
+ arg |= 1;
132
+#endif
133
+ if (l->addrlo_reg != arg) {
134
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
135
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
136
+ } else if (l->addrhi_reg != arg + 1) {
137
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
138
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
139
+ } else {
140
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
141
+ tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
142
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
38
+ }
143
+ }
39
+ }
144
+ } else {
40
+
145
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
41
+ /* Reset temp data for outputs. */
146
+ }
42
+ for (i = 0; i < nb_oargs; i++) {
147
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
43
+ reset_temp(op->args[i]);
148
+
44
+ }
149
+ /* "Tail call" to the helper, with the return address back inline. */
45
+
150
+ tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
46
+ /* Stop optimizing MB across calls. */
151
+ : helper_unaligned_st));
47
+ ctx->prev_mb = NULL;
48
+ return true;
152
+ return true;
49
+}
153
+}
50
+
154
+
51
/* Propagate constants and copies, fold constant expressions. */
155
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
52
void tcg_optimize(TCGContext *s)
156
+{
53
{
157
+ return tcg_out_fail_alignment(s, l);
54
- int nb_temps, nb_globals, i;
158
+}
55
+ int nb_temps, i;
159
+
56
TCGOp *op, *op_next;
160
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
57
OptContext ctx = { .tcg = s };
161
+{
58
162
+ return tcg_out_fail_alignment(s, l);
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
163
+}
60
available through the doubly linked circular list. */
164
+
61
165
#endif /* SOFTMMU */
62
nb_temps = s->nb_temps;
166
63
- nb_globals = s->nb_globals;
167
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
64
-
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
65
for (i = 0; i < nb_temps; ++i) {
169
#ifdef CONFIG_SOFTMMU
66
s->temps[i].state_ptr = NULL;
170
int mem_index;
67
}
171
tcg_insn_unit *label_ptr;
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
172
+#else
69
uint64_t z_mask, partmask, affected, tmp;
173
+ unsigned a_bits;
70
int nb_oargs, nb_iargs;
174
#endif
71
TCGOpcode opc = op->opc;
175
72
- const TCGOpDef *def = &tcg_op_defs[opc];
176
datalo = *args++;
73
+ const TCGOpDef *def;
177
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
74
178
75
- /* Count the arguments, and initialize the temps that are
179
rbase = TCG_REG_R3;
76
- going to be used */
180
#else /* !CONFIG_SOFTMMU */
77
+ /* Calls are special. */
181
+ a_bits = get_alignment_bits(opc);
78
if (opc == INDEX_op_call) {
182
+ if (a_bits) {
79
- nb_oargs = TCGOP_CALLO(op);
183
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
80
- nb_iargs = TCGOP_CALLI(op);
184
+ }
81
- } else {
185
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
82
- nb_oargs = def->nb_oargs;
186
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
83
- nb_iargs = def->nb_iargs;
187
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
84
+ fold_call(&ctx, op);
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
85
+ continue;
189
#ifdef CONFIG_SOFTMMU
86
}
190
int mem_index;
87
+
191
tcg_insn_unit *label_ptr;
88
+ def = &tcg_op_defs[opc];
192
+#else
89
+ nb_oargs = def->nb_oargs;
193
+ unsigned a_bits;
90
+ nb_iargs = def->nb_iargs;
194
#endif
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
195
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
196
datalo = *args++;
93
197
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
198
95
if (def->flags & TCG_OPF_BB_END) {
199
rbase = TCG_REG_R3;
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
200
#else /* !CONFIG_SOFTMMU */
97
} else {
201
+ a_bits = get_alignment_bits(opc);
98
- if (opc == INDEX_op_call &&
202
+ if (a_bits) {
99
- !(tcg_call_flags(op)
203
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
204
+ }
101
- for (i = 0; i < nb_globals; i++) {
205
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
102
- if (test_bit(i, ctx.temps_used.l)) {
206
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
103
- reset_ts(&s->temps[i]);
207
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
104
- }
105
- }
106
- }
107
-
108
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
case INDEX_op_qemu_st_i32:
113
case INDEX_op_qemu_st8_i32:
114
case INDEX_op_qemu_st_i64:
115
- case INDEX_op_call:
116
/* Opcodes that touch guest memory stop the optimization. */
117
ctx.prev_mb = NULL;
118
break;
119
--
208
--
120
2.25.1
209
2.25.1
121
210
122
211
diff view generated by jsdifflib
1
This puts the separate mb optimization into the same framework
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
2
---
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
3
tcg/riscv/tcg-target.h | 2 --
10
1 file changed, 51 insertions(+), 38 deletions(-)
4
tcg/riscv/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++++++++++--
5
2 files changed, 61 insertions(+), 4 deletions(-)
11
6
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
9
--- a/tcg/riscv/tcg-target.h
15
+++ b/tcg/optimize.c
10
+++ b/tcg/riscv/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
11
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
12
13
#define TCG_TARGET_DEFAULT_MO (0)
14
15
-#ifdef CONFIG_SOFTMMU
16
#define TCG_TARGET_NEED_LDST_LABELS
17
-#endif
18
#define TCG_TARGET_NEED_POOL_LABELS
19
20
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
21
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/riscv/tcg-target.c.inc
24
+++ b/tcg/riscv/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@
26
* THE SOFTWARE.
27
*/
28
29
+#include "../tcg-ldst.c.inc"
30
#include "../tcg-pool.c.inc"
31
32
#ifdef CONFIG_DEBUG_TCG
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
34
*/
35
36
#if defined(CONFIG_SOFTMMU)
37
-#include "../tcg-ldst.c.inc"
38
-
39
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
40
* MemOpIdx oi, uintptr_t ra)
41
*/
42
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
43
tcg_out_goto(s, l->raddr);
17
return true;
44
return true;
18
}
45
}
19
46
+#else
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
47
+
48
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
49
+ unsigned a_bits)
21
+{
50
+{
22
+ /* Eliminate duplicate and redundant fence instructions. */
51
+ unsigned a_mask = (1 << a_bits) - 1;
23
+ if (ctx->prev_mb) {
52
+ TCGLabelQemuLdst *l = new_ldst_label(s);
24
+ /*
53
+
25
+ * Merge two barriers of the same type into one,
54
+ l->is_ld = is_ld;
26
+ * or a weaker barrier into a stronger one,
55
+ l->addrlo_reg = addr_reg;
27
+ * or two weaker barriers into a stronger one.
56
+
28
+ * mb X; mb Y => mb X|Y
57
+ /* We are expecting a_bits to max out at 7, so we can always use andi. */
29
+ * mb; strl => mb; st
58
+ tcg_debug_assert(a_bits < 12);
30
+ * ldaq; mb => ld; mb
59
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
31
+ * ldaq; strl => ld; mb; st
60
+
32
+ * Other combinations are also merged into a strong
61
+ l->label_ptr[0] = s->code_ptr;
33
+ * barrier. This is stricter than specified but for
62
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
34
+ * the purposes of TCG is better than not optimizing.
63
+
35
+ */
64
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
36
+ ctx->prev_mb->args[0] |= op->args[0];
65
+}
37
+ tcg_op_remove(ctx->tcg, op);
66
+
38
+ } else {
67
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
39
+ ctx->prev_mb = op;
68
+{
69
+ /* resolve label address */
70
+ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
71
+ return false;
40
+ }
72
+ }
73
+
74
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
75
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
76
+
77
+ /* tail call, with the return address back inline. */
78
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
79
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
80
+ : helper_unaligned_st), true);
41
+ return true;
81
+ return true;
42
+}
82
+}
43
+
83
+
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
45
+{
85
+{
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
86
+ return tcg_out_fail_alignment(s, l);
47
+ ctx->prev_mb = NULL;
48
+ return false;
49
+}
87
+}
50
+
88
+
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
89
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
52
+{
90
+{
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
91
+ return tcg_out_fail_alignment(s, l);
54
+ ctx->prev_mb = NULL;
55
+ return false;
56
+}
92
+}
57
+
93
+
58
/* Propagate constants and copies, fold constant expressions. */
94
#endif /* CONFIG_SOFTMMU */
59
void tcg_optimize(TCGContext *s)
95
60
{
96
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
62
}
98
MemOp opc;
63
break;
99
#if defined(CONFIG_SOFTMMU)
64
100
tcg_insn_unit *label_ptr[1];
65
+ case INDEX_op_mb:
101
+#else
66
+ done = fold_mb(&ctx, op);
102
+ unsigned a_bits;
67
+ break;
103
#endif
68
+ case INDEX_op_qemu_ld_i32:
104
TCGReg base = TCG_REG_TMP0;
69
+ case INDEX_op_qemu_ld_i64:
105
70
+ done = fold_qemu_ld(&ctx, op);
106
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
71
+ break;
107
tcg_out_ext32u(s, base, addr_regl);
72
+ case INDEX_op_qemu_st_i32:
108
addr_regl = base;
73
+ case INDEX_op_qemu_st8_i32:
74
+ case INDEX_op_qemu_st_i64:
75
+ done = fold_qemu_st(&ctx, op);
76
+ break;
77
+
78
default:
79
break;
80
}
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
82
if (!done) {
83
finish_folding(&ctx, op);
84
}
85
-
86
- /* Eliminate duplicate and redundant fence instructions. */
87
- if (ctx.prev_mb) {
88
- switch (opc) {
89
- case INDEX_op_mb:
90
- /* Merge two barriers of the same type into one,
91
- * or a weaker barrier into a stronger one,
92
- * or two weaker barriers into a stronger one.
93
- * mb X; mb Y => mb X|Y
94
- * mb; strl => mb; st
95
- * ldaq; mb => ld; mb
96
- * ldaq; strl => ld; mb; st
97
- * Other combinations are also merged into a strong
98
- * barrier. This is stricter than specified but for
99
- * the purposes of TCG is better than not optimizing.
100
- */
101
- ctx.prev_mb->args[0] |= op->args[0];
102
- tcg_op_remove(s, op);
103
- break;
104
-
105
- default:
106
- /* Opcodes that end the block stop the optimization. */
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
108
- break;
109
- }
110
- /* fallthru */
111
- case INDEX_op_qemu_ld_i32:
112
- case INDEX_op_qemu_ld_i64:
113
- case INDEX_op_qemu_st_i32:
114
- case INDEX_op_qemu_st8_i32:
115
- case INDEX_op_qemu_st_i64:
116
- /* Opcodes that touch guest memory stop the optimization. */
117
- ctx.prev_mb = NULL;
118
- break;
119
- }
120
- } else if (opc == INDEX_op_mb) {
121
- ctx.prev_mb = op;
122
- }
123
}
109
}
124
}
110
+ a_bits = get_alignment_bits(opc);
111
+ if (a_bits) {
112
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
113
+ }
114
if (guest_base != 0) {
115
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
116
}
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
118
MemOp opc;
119
#if defined(CONFIG_SOFTMMU)
120
tcg_insn_unit *label_ptr[1];
121
+#else
122
+ unsigned a_bits;
123
#endif
124
TCGReg base = TCG_REG_TMP0;
125
126
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
127
tcg_out_ext32u(s, base, addr_regl);
128
addr_regl = base;
129
}
130
+ a_bits = get_alignment_bits(opc);
131
+ if (a_bits) {
132
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
133
+ }
134
if (guest_base != 0) {
135
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
136
}
125
--
137
--
126
2.25.1
138
2.25.1
127
139
128
140
diff view generated by jsdifflib
1
Even though there is only one user, place this more complex
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
conversion into its own helper.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
4
tcg/s390x/tcg-target.h | 2 --
8
1 file changed, 47 insertions(+), 42 deletions(-)
5
tcg/s390x/tcg-target.c.inc | 59 ++++++++++++++++++++++++++++++++++++--
6
2 files changed, 57 insertions(+), 4 deletions(-)
9
7
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
10
--- a/tcg/s390x/tcg-target.h
13
+++ b/tcg/optimize.c
11
+++ b/tcg/s390x/tcg-target.h
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
15
13
/* no need to flush icache explicitly */
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
17
{
18
- return fold_const1(ctx, op);
19
+ if (fold_const1(ctx, op)) {
20
+ return true;
21
+ }
22
+ /*
23
+ * Because of fold_sub_to_neg, we want to always return true,
24
+ * via finish_folding.
25
+ */
26
+ finish_folding(ctx, op);
27
+ return true;
28
}
14
}
29
15
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
-#ifdef CONFIG_SOFTMMU
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
17
#define TCG_TARGET_NEED_LDST_LABELS
32
return fold_const2(ctx, op);
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/s390x/tcg-target.c.inc
25
+++ b/tcg/s390x/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
#error "unsupported code generation mode"
28
#endif
29
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
#include "elf.h"
33
34
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
35
RI_OIHL = 0xa509,
36
RI_OILH = 0xa50a,
37
RI_OILL = 0xa50b,
38
+ RI_TMLL = 0xa701,
39
40
RIE_CGIJ = 0xec7c,
41
RIE_CGRJ = 0xec64,
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
33
}
43
}
34
44
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
45
#if defined(CONFIG_SOFTMMU)
46
-#include "../tcg-ldst.c.inc"
47
-
48
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
49
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
50
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
51
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
52
return true;
53
}
54
#else
55
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld,
56
+ TCGReg addrlo, unsigned a_bits)
36
+{
57
+{
37
+ TCGOpcode neg_op;
58
+ unsigned a_mask = (1 << a_bits) - 1;
38
+ bool have_neg;
59
+ TCGLabelQemuLdst *l = new_ldst_label(s);
39
+
60
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
61
+ l->is_ld = is_ld;
62
+ l->addrlo_reg = addrlo;
63
+
64
+ /* We are expecting a_bits to max out at 7, much lower than TMLL. */
65
+ tcg_debug_assert(a_bits < 16);
66
+ tcg_out_insn(s, RI, TMLL, addrlo, a_mask);
67
+
68
+ tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
69
+ l->label_ptr[0] = s->code_ptr;
70
+ s->code_ptr += 1;
71
+
72
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
73
+}
74
+
75
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
76
+{
77
+ if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL,
78
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
41
+ return false;
79
+ return false;
42
+ }
80
+ }
43
+
81
+
44
+ switch (ctx->type) {
82
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg);
45
+ case TCG_TYPE_I32:
83
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
46
+ neg_op = INDEX_op_neg_i32;
84
+
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
85
+ /* "Tail call" to the helper, with the return address back inline. */
48
+ break;
86
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr);
49
+ case TCG_TYPE_I64:
87
+ tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld
50
+ neg_op = INDEX_op_neg_i64;
88
+ : helper_unaligned_st));
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
89
+ return true;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
69
+}
90
+}
70
+
91
+
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
92
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
93
+{
94
+ return tcg_out_fail_alignment(s, l);
95
+}
96
+
97
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
98
+{
99
+ return tcg_out_fail_alignment(s, l);
100
+}
101
+
102
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
103
TCGReg *index_reg, tcg_target_long *disp)
72
{
104
{
73
if (fold_const2(ctx, op) ||
105
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
74
- fold_xx_to_i(ctx, op, 0)) {
106
#else
75
+ fold_xx_to_i(ctx, op, 0) ||
107
TCGReg index_reg;
76
+ fold_sub_to_neg(ctx, op)) {
108
tcg_target_long disp;
77
return true;
109
+ unsigned a_bits = get_alignment_bits(opc);
78
}
110
79
return false;
111
+ if (a_bits) {
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
81
continue;
113
+ }
82
}
114
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
83
break;
115
tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
84
- CASE_OP_32_64_VEC(sub):
116
#endif
85
- {
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
86
- TCGOpcode neg_op;
118
#else
87
- bool have_neg;
119
TCGReg index_reg;
88
-
120
tcg_target_long disp;
89
- if (arg_is_const(op->args[2])) {
121
+ unsigned a_bits = get_alignment_bits(opc);
90
- /* Proceed with possible constant folding. */
122
91
- break;
123
+ if (a_bits) {
92
- }
124
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
93
- switch (ctx.type) {
125
+ }
94
- case TCG_TYPE_I32:
126
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
95
- neg_op = INDEX_op_neg_i32;
127
tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
96
- have_neg = TCG_TARGET_HAS_neg_i32;
128
#endif
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
125
break;
126
}
127
--
129
--
128
2.25.1
130
2.25.1
129
131
130
132
diff view generated by jsdifflib
1
Recognize the identity function for division.
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/optimize.c | 6 +++++-
4
tcg/tci.c | 20 ++++++++++++++------
9
1 file changed, 5 insertions(+), 1 deletion(-)
5
1 file changed, 14 insertions(+), 6 deletions(-)
10
6
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
diff --git a/tcg/tci.c b/tcg/tci.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
9
--- a/tcg/tci.c
14
+++ b/tcg/optimize.c
10
+++ b/tcg/tci.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
11
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
16
12
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
13
MemOpIdx oi, const void *tb_ptr)
18
{
14
{
19
- return fold_const2(ctx, op);
15
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
20
+ if (fold_const2(ctx, op) ||
16
+ MemOp mop = get_memop(oi);
21
+ fold_xi_to_x(ctx, op, 1)) {
17
uintptr_t ra = (uintptr_t)tb_ptr;
22
+ return true;
18
19
#ifdef CONFIG_SOFTMMU
20
- switch (mop) {
21
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
22
case MO_UB:
23
return helper_ret_ldub_mmu(env, taddr, oi, ra);
24
case MO_SB:
25
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
26
}
27
#else
28
void *haddr = g2h(env_cpu(env), taddr);
29
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
30
uint64_t ret;
31
32
set_helper_retaddr(ra);
33
- switch (mop) {
34
+ if (taddr & a_mask) {
35
+ helper_unaligned_ld(env, taddr);
23
+ }
36
+ }
24
+ return false;
37
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
25
}
38
case MO_UB:
26
39
ret = ldub_p(haddr);
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
40
break;
41
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
42
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
43
MemOpIdx oi, const void *tb_ptr)
44
{
45
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
46
+ MemOp mop = get_memop(oi);
47
uintptr_t ra = (uintptr_t)tb_ptr;
48
49
#ifdef CONFIG_SOFTMMU
50
- switch (mop) {
51
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
52
case MO_UB:
53
helper_ret_stb_mmu(env, taddr, val, oi, ra);
54
break;
55
@@ -XXX,XX +XXX,XX @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
56
}
57
#else
58
void *haddr = g2h(env_cpu(env), taddr);
59
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
60
61
set_helper_retaddr(ra);
62
- switch (mop) {
63
+ if (taddr & a_mask) {
64
+ helper_unaligned_st(env, taddr);
65
+ }
66
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
67
case MO_UB:
68
stb_p(haddr, val);
69
break;
28
--
70
--
29
2.25.1
71
2.25.1
30
72
31
73
diff view generated by jsdifflib
1
Split out a whole bunch of placeholder functions, which are
1
From: WANG Xuerui <git@xen0n.name>
2
currently identical. That won't last as more code gets moved.
3
2
4
Use CASE_32_64_VEC for some logical operators that previously
3
Signed-off-by: WANG Xuerui <git@xen0n.name>
5
missed the addition of vectors.
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
5
Message-Id: <20220106134238.3936163-1-git@xen0n.name>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
8
tcg/loongarch64/tcg-target.h | 2 -
12
1 file changed, 219 insertions(+), 52 deletions(-)
9
tcg/loongarch64/tcg-target.c.inc | 71 +++++++++++++++++++++++++++++++-
10
2 files changed, 69 insertions(+), 4 deletions(-)
13
11
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
15
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
14
--- a/tcg/loongarch64/tcg-target.h
17
+++ b/tcg/optimize.c
15
+++ b/tcg/loongarch64/tcg-target.h
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
19
}
17
18
#define TCG_TARGET_DEFAULT_MO (0)
19
20
-#ifdef CONFIG_SOFTMMU
21
#define TCG_TARGET_NEED_LDST_LABELS
22
-#endif
23
24
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
25
26
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/loongarch64/tcg-target.c.inc
29
+++ b/tcg/loongarch64/tcg-target.c.inc
30
@@ -XXX,XX +XXX,XX @@
31
* THE SOFTWARE.
32
*/
33
34
+#include "../tcg-ldst.c.inc"
35
+
36
#ifdef CONFIG_DEBUG_TCG
37
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
38
"zero",
39
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
40
*/
41
42
#if defined(CONFIG_SOFTMMU)
43
-#include "../tcg-ldst.c.inc"
44
-
45
/*
46
* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
47
* MemOpIdx oi, uintptr_t ra)
48
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
49
50
return tcg_out_goto(s, l->raddr);
20
}
51
}
21
52
+#else
53
+
22
+/*
54
+/*
23
+ * The fold_* functions return true when processing is complete,
55
+ * Alignment helpers for user-mode emulation
24
+ * usually by folding the operation to a constant or to a copy,
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
+ * like collect information about the value produced, for use in
27
+ * optimizing a subsequent operation.
28
+ *
29
+ * These first fold_* functions are all helpers, used by other
30
+ * folders for more specific operations.
31
+ */
56
+ */
32
+
57
+
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
58
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
59
+ unsigned a_bits)
34
+{
60
+{
35
+ if (arg_is_const(op->args[1])) {
61
+ TCGLabelQemuLdst *l = new_ldst_label(s);
36
+ uint64_t t;
37
+
62
+
38
+ t = arg_info(op->args[1])->val;
63
+ l->is_ld = is_ld;
39
+ t = do_constant_folding(op->opc, t, 0);
64
+ l->addrlo_reg = addr_reg;
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
65
+
41
+ }
66
+ /*
42
+ return false;
67
+ * Without micro-architecture details, we don't know which of bstrpick or
68
+ * andi is faster, so use bstrpick as it's not constrained by imm field
69
+ * width. (Not to say alignments >= 2^12 are going to happen any time
70
+ * soon, though)
71
+ */
72
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
73
+
74
+ l->label_ptr[0] = s->code_ptr;
75
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
76
+
77
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
43
+}
78
+}
44
+
79
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
80
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
46
+{
81
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
82
+ /* resolve label address */
48
+ uint64_t t1 = arg_info(op->args[1])->val;
83
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
49
+ uint64_t t2 = arg_info(op->args[2])->val;
84
+ return false;
85
+ }
50
+
86
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
87
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
88
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
53
+ }
89
+
54
+ return false;
90
+ /* tail call, with the return address back inline. */
91
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
92
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
93
+ : helper_unaligned_st), true);
94
+ return true;
55
+}
95
+}
56
+
96
+
57
+/*
97
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
98
+{
63
+ return fold_const2(ctx, op);
99
+ return tcg_out_fail_alignment(s, l);
64
+}
100
+}
65
+
101
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
102
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
67
+{
103
+{
68
+ return fold_const2(ctx, op);
104
+ return tcg_out_fail_alignment(s, l);
69
+}
105
+}
70
+
106
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
107
#endif /* CONFIG_SOFTMMU */
72
+{
108
73
+ return fold_const2(ctx, op);
109
/*
74
+}
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
75
+
111
MemOp opc;
76
static bool fold_call(OptContext *ctx, TCGOp *op)
112
#if defined(CONFIG_SOFTMMU)
77
{
113
tcg_insn_unit *label_ptr[1];
78
TCGContext *s = ctx->tcg;
114
+#else
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
115
+ unsigned a_bits;
80
return true;
116
#endif
81
}
117
TCGReg base;
82
118
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
84
+{
120
data_regl, addr_regl,
85
+ return fold_const1(ctx, op);
121
s->code_ptr, label_ptr);
86
+}
122
#else
87
+
123
+ a_bits = get_alignment_bits(opc);
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
124
+ if (a_bits) {
89
+{
125
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
90
+ return fold_const2(ctx, op);
126
+ }
91
+}
127
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
92
+
128
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
129
tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
94
+{
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
95
+ return fold_const2(ctx, op);
131
MemOp opc;
96
+}
132
#if defined(CONFIG_SOFTMMU)
97
+
133
tcg_insn_unit *label_ptr[1];
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
134
+#else
99
+{
135
+ unsigned a_bits;
100
+ return fold_const1(ctx, op);
136
#endif
101
+}
137
TCGReg base;
102
+
138
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
139
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
104
+{
140
data_regl, addr_regl,
105
+ return fold_const1(ctx, op);
141
s->code_ptr, label_ptr);
106
+}
142
#else
107
+
143
+ a_bits = get_alignment_bits(opc);
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
144
+ if (a_bits) {
109
{
145
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
110
/* Eliminate duplicate and redundant fence instructions. */
146
+ }
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
147
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
112
return true;
148
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
113
}
149
tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
114
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
116
+{
117
+ return fold_const2(ctx, op);
118
+}
119
+
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
121
+{
122
+ return fold_const2(ctx, op);
123
+}
124
+
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
160
}
161
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
163
+{
164
+ return fold_const2(ctx, op);
165
+}
166
+
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
168
+{
169
+ return fold_const2(ctx, op);
170
+}
171
+
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
173
+{
174
+ return fold_const2(ctx, op);
175
+}
176
+
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
178
+{
179
+ return fold_const2(ctx, op);
180
+}
181
+
182
/* Propagate constants and copies, fold constant expressions. */
183
void tcg_optimize(TCGContext *s)
184
{
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
}
187
break;
188
189
- CASE_OP_32_64(not):
190
- CASE_OP_32_64(neg):
191
- CASE_OP_32_64(ext8s):
192
- CASE_OP_32_64(ext8u):
193
- CASE_OP_32_64(ext16s):
194
- CASE_OP_32_64(ext16u):
195
- CASE_OP_32_64(ctpop):
196
- case INDEX_op_ext32s_i64:
197
- case INDEX_op_ext32u_i64:
198
- case INDEX_op_ext_i32_i64:
199
- case INDEX_op_extu_i32_i64:
200
- case INDEX_op_extrl_i64_i32:
201
- case INDEX_op_extrh_i64_i32:
202
- if (arg_is_const(op->args[1])) {
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
205
- continue;
206
- }
207
- break;
208
-
209
CASE_OP_32_64(bswap16):
210
CASE_OP_32_64(bswap32):
211
case INDEX_op_bswap64_i64:
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
213
}
214
break;
215
216
- CASE_OP_32_64(add):
217
- CASE_OP_32_64(sub):
218
- CASE_OP_32_64(mul):
219
- CASE_OP_32_64(or):
220
- CASE_OP_32_64(and):
221
- CASE_OP_32_64(xor):
222
- CASE_OP_32_64(shl):
223
- CASE_OP_32_64(shr):
224
- CASE_OP_32_64(sar):
225
- CASE_OP_32_64(rotl):
226
- CASE_OP_32_64(rotr):
227
- CASE_OP_32_64(andc):
228
- CASE_OP_32_64(orc):
229
- CASE_OP_32_64(eqv):
230
- CASE_OP_32_64(nand):
231
- CASE_OP_32_64(nor):
232
- CASE_OP_32_64(muluh):
233
- CASE_OP_32_64(mulsh):
234
- CASE_OP_32_64(div):
235
- CASE_OP_32_64(divu):
236
- CASE_OP_32_64(rem):
237
- CASE_OP_32_64(remu):
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
240
- arg_info(op->args[2])->val);
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
242
- continue;
243
- }
244
- break;
245
-
246
CASE_OP_32_64(clz):
247
CASE_OP_32_64(ctz):
248
if (arg_is_const(op->args[1])) {
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
}
251
break;
252
253
+ default:
254
+ break;
255
+
256
+ /* ---------------------------------------------------------- */
257
+ /* Sorted alphabetically by opcode as much as possible. */
258
+
259
+ CASE_OP_32_64_VEC(add):
260
+ done = fold_add(&ctx, op);
261
+ break;
262
+ CASE_OP_32_64_VEC(and):
263
+ done = fold_and(&ctx, op);
264
+ break;
265
+ CASE_OP_32_64_VEC(andc):
266
+ done = fold_andc(&ctx, op);
267
+ break;
268
+ CASE_OP_32_64(ctpop):
269
+ done = fold_ctpop(&ctx, op);
270
+ break;
271
+ CASE_OP_32_64(div):
272
+ CASE_OP_32_64(divu):
273
+ done = fold_divide(&ctx, op);
274
+ break;
275
+ CASE_OP_32_64(eqv):
276
+ done = fold_eqv(&ctx, op);
277
+ break;
278
+ CASE_OP_32_64(ext8s):
279
+ CASE_OP_32_64(ext16s):
280
+ case INDEX_op_ext32s_i64:
281
+ case INDEX_op_ext_i32_i64:
282
+ done = fold_exts(&ctx, op);
283
+ break;
284
+ CASE_OP_32_64(ext8u):
285
+ CASE_OP_32_64(ext16u):
286
+ case INDEX_op_ext32u_i64:
287
+ case INDEX_op_extu_i32_i64:
288
+ case INDEX_op_extrl_i64_i32:
289
+ case INDEX_op_extrh_i64_i32:
290
+ done = fold_extu(&ctx, op);
291
+ break;
292
case INDEX_op_mb:
293
done = fold_mb(&ctx, op);
294
break;
295
+ CASE_OP_32_64(mul):
296
+ done = fold_mul(&ctx, op);
297
+ break;
298
+ CASE_OP_32_64(mulsh):
299
+ CASE_OP_32_64(muluh):
300
+ done = fold_mul_highpart(&ctx, op);
301
+ break;
302
+ CASE_OP_32_64(nand):
303
+ done = fold_nand(&ctx, op);
304
+ break;
305
+ CASE_OP_32_64(neg):
306
+ done = fold_neg(&ctx, op);
307
+ break;
308
+ CASE_OP_32_64(nor):
309
+ done = fold_nor(&ctx, op);
310
+ break;
311
+ CASE_OP_32_64_VEC(not):
312
+ done = fold_not(&ctx, op);
313
+ break;
314
+ CASE_OP_32_64_VEC(or):
315
+ done = fold_or(&ctx, op);
316
+ break;
317
+ CASE_OP_32_64_VEC(orc):
318
+ done = fold_orc(&ctx, op);
319
+ break;
320
case INDEX_op_qemu_ld_i32:
321
case INDEX_op_qemu_ld_i64:
322
done = fold_qemu_ld(&ctx, op);
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
324
case INDEX_op_qemu_st_i64:
325
done = fold_qemu_st(&ctx, op);
326
break;
327
-
328
- default:
329
+ CASE_OP_32_64(rem):
330
+ CASE_OP_32_64(remu):
331
+ done = fold_remainder(&ctx, op);
332
+ break;
333
+ CASE_OP_32_64(rotl):
334
+ CASE_OP_32_64(rotr):
335
+ CASE_OP_32_64(sar):
336
+ CASE_OP_32_64(shl):
337
+ CASE_OP_32_64(shr):
338
+ done = fold_shift(&ctx, op);
339
+ break;
340
+ CASE_OP_32_64_VEC(sub):
341
+ done = fold_sub(&ctx, op);
342
+ break;
343
+ CASE_OP_32_64_VEC(xor):
344
+ done = fold_xor(&ctx, op);
345
break;
346
}
347
348
--
150
--
349
2.25.1
151
2.25.1
350
152
351
153
diff view generated by jsdifflib
1
The result is either 0 or 1, which means that we have
1
Support for unaligned accesses is difficult for pre-v6 hosts.
2
a 2 bit signed result, and thus 62 bits of sign.
2
While debian still builds for armv4, we cannot use a compile
3
For clarity, use the smask_from_zmask function.
3
time test, so test the architecture at runtime and error out.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
tcg/optimize.c | 2 ++
8
tcg/arm/tcg-target.c.inc | 5 +++++
10
1 file changed, 2 insertions(+)
9
1 file changed, 5 insertions(+)
11
10
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
13
--- a/tcg/arm/tcg-target.c.inc
15
+++ b/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
16
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
17
arm_arch = pl[1] - '0';
18
}
19
+
20
+ if (arm_arch < 6) {
21
+ error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
22
+ exit(EXIT_FAILURE);
23
+ }
17
}
24
}
18
25
19
ctx->z_mask = 1;
26
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
20
+ ctx->s_mask = smask_from_zmask(1);
21
return false;
22
}
23
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
25
}
26
27
ctx->z_mask = 1;
28
+ ctx->s_mask = smask_from_zmask(1);
29
return false;
30
31
do_setcond_const:
32
--
27
--
33
2.25.1
28
2.25.1
34
29
35
30
diff view generated by jsdifflib
1
Most of these are handled by creating a fold_const2_commutative
1
This is now always true, since we require armv6.
2
to handle all of the binary operators. The rest were already
3
handled on a case-by-case basis in the switch, and have their
4
own fold function in which to place the call.
5
2
6
We now have only one major switch on TCGOpcode.
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
5
---
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
6
tcg/arm/tcg-target.h | 3 +--
15
1 file changed, 70 insertions(+), 72 deletions(-)
7
tcg/arm/tcg-target.c.inc | 35 ++++++-----------------------------
8
2 files changed, 7 insertions(+), 31 deletions(-)
16
9
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/optimize.c
12
--- a/tcg/arm/tcg-target.h
20
+++ b/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.h
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
14
@@ -XXX,XX +XXX,XX @@
22
return -1;
15
16
extern int arm_arch;
17
18
-#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5)
19
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
20
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
21
22
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
23
#define TCG_TARGET_HAS_eqv_i32 0
24
#define TCG_TARGET_HAS_nand_i32 0
25
#define TCG_TARGET_HAS_nor_i32 0
26
-#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions
27
+#define TCG_TARGET_HAS_clz_i32 1
28
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
29
#define TCG_TARGET_HAS_ctpop_i32 0
30
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
31
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/arm/tcg-target.c.inc
34
+++ b/tcg/arm/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
36
* Unless the C portion of QEMU is compiled as thumb, we don't need
37
* true BX semantics; merely a branch to an address held in a register.
38
*/
39
- if (use_armv5t_instructions) {
40
- tcg_out_bx_reg(s, cond, rn);
41
- } else {
42
- tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
43
- }
44
+ tcg_out_bx_reg(s, cond, rn);
23
}
45
}
24
46
25
+/**
47
static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
26
+ * swap_commutative:
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
49
}
28
+ * @p1: first paired argument
50
29
+ * @p2: second paired argument
51
/* LDR is interworking from v5t. */
30
+ *
52
- if (arm_mode || use_armv5t_instructions) {
31
+ * If *@p1 is a constant and *@p2 is not, swap.
53
- tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
32
+ * If *@p2 matches @dest, swap.
54
- return;
33
+ * Return true if a swap was performed.
55
- }
34
+ */
56
-
35
+
57
- /* else v4t */
36
+#define NO_DEST temp_arg(NULL)
58
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
37
+
59
- tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
60
+ tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
39
{
40
TCGArg a1 = *p1, a2 = *p2;
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
42
return false;
43
}
61
}
44
62
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
63
/*
46
+{
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
65
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
48
+ return fold_const2(ctx, op);
66
if (arm_mode) {
49
+}
67
tcg_out_bl_imm(s, COND_AL, disp);
50
+
68
- return;
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
69
- }
52
{
70
- if (use_armv5t_instructions) {
53
uint64_t a_mask = ctx->a_mask;
71
+ } else {
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
72
tcg_out_blx_imm(s, disp);
55
73
- return;
56
static bool fold_add(OptContext *ctx, TCGOp *op)
74
}
57
{
75
+ return;
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
76
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
77
64
78
- if (use_armv5t_instructions) {
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
79
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
66
{
80
- tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
67
+ /* Note that the high and low parts may be independently swapped. */
81
- } else if (arm_mode) {
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
82
- /* ??? Know that movi_pool emits exactly 1 insn. */
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
83
- tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
70
+
84
- tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
71
return fold_addsub2(ctx, op, true);
85
- } else {
86
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
87
- tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
88
- tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
89
- }
90
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
91
+ tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
72
}
92
}
73
93
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
94
static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
75
{
76
uint64_t z1, z2;
77
78
- if (fold_const2(ctx, op) ||
79
+ if (fold_const2_commutative(ctx, op) ||
80
fold_xi_to_i(ctx, op, 0) ||
81
fold_xi_to_x(ctx, op, -1) ||
82
fold_xx_to_x(ctx, op)) {
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
TCGCond cond = op->args[2];
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
88
+ int i;
89
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
91
+ op->args[2] = cond = tcg_swap_cond(cond);
92
+ }
93
+
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
95
if (i == 0) {
96
tcg_op_remove(ctx->tcg, op);
97
return true;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
100
{
101
TCGCond cond = op->args[4];
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
103
TCGArg label = op->args[5];
104
- int inv = 0;
105
+ int i, inv = 0;
106
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
108
+ op->args[4] = cond = tcg_swap_cond(cond);
109
+ }
110
+
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
112
if (i >= 0) {
113
goto do_brcond_const;
114
}
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
116
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
118
{
119
- if (fold_const2(ctx, op) ||
120
+ if (fold_const2_commutative(ctx, op) ||
121
fold_xi_to_x(ctx, op, -1) ||
122
fold_xi_to_not(ctx, op, 0)) {
123
return true;
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
126
{
127
TCGCond cond = op->args[5];
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
129
+ int i;
130
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
132
+ op->args[5] = cond = tcg_swap_cond(cond);
133
+ }
134
+ /*
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
233
}
234
235
- /* For commutative operations make constant second argument */
236
- switch (opc) {
237
- CASE_OP_32_64_VEC(add):
238
- CASE_OP_32_64_VEC(mul):
239
- CASE_OP_32_64_VEC(and):
240
- CASE_OP_32_64_VEC(or):
241
- CASE_OP_32_64_VEC(xor):
242
- CASE_OP_32_64(eqv):
243
- CASE_OP_32_64(nand):
244
- CASE_OP_32_64(nor):
245
- CASE_OP_32_64(muluh):
246
- CASE_OP_32_64(mulsh):
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
248
- break;
249
- CASE_OP_32_64(brcond):
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
251
- op->args[2] = tcg_swap_cond(op->args[2]);
252
- }
253
- break;
254
- CASE_OP_32_64(setcond):
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
256
- op->args[3] = tcg_swap_cond(op->args[3]);
257
- }
258
- break;
259
- CASE_OP_32_64(movcond):
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
261
- op->args[5] = tcg_swap_cond(op->args[5]);
262
- }
263
- /* For movcond, we canonicalize the "false" input reg to match
264
- the destination reg so that the tcg backend can implement
265
- a "move if true" operation. */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
267
- op->args[5] = tcg_invert_cond(op->args[5]);
268
- }
269
- break;
270
- CASE_OP_32_64(add2):
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
273
- break;
274
- CASE_OP_32_64(mulu2):
275
- CASE_OP_32_64(muls2):
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
277
- break;
278
- case INDEX_op_brcond2_i32:
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
280
- op->args[4] = tcg_swap_cond(op->args[4]);
281
- }
282
- break;
283
- case INDEX_op_setcond2_i32:
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
285
- op->args[5] = tcg_swap_cond(op->args[5]);
286
- }
287
- break;
288
- default:
289
- break;
290
- }
291
-
292
/* Assume all bits affected, and no bits known zero. */
293
ctx.a_mask = -1;
294
ctx.z_mask = -1;
295
--
95
--
296
2.25.1
96
2.25.1
297
97
298
98
diff view generated by jsdifflib
1
Adjust the interface to take the OptContext parameter instead
1
This is now always true, since we require armv6.
2
of TCGContext or both.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
6
tcg/arm/tcg-target.h | 1 -
9
1 file changed, 34 insertions(+), 33 deletions(-)
7
tcg/arm/tcg-target.c.inc | 192 ++++++---------------------------------
8
2 files changed, 27 insertions(+), 166 deletions(-)
10
9
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
12
--- a/tcg/arm/tcg-target.h
14
+++ b/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
14
@@ -XXX,XX +XXX,XX @@
16
} TempOptInfo;
15
17
16
extern int arm_arch;
18
typedef struct OptContext {
17
19
+ TCGContext *tcg;
18
-#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
20
TCGTempSet temps_used;
19
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
21
} OptContext;
20
22
21
#undef TCG_TARGET_STACK_GROWSUP
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
22
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
23
index XXXXXXX..XXXXXXX 100644
25
}
24
--- a/tcg/arm/tcg-target.c.inc
26
25
+++ b/tcg/arm/tcg-target.c.inc
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
26
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
27
static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
29
{
28
TCGReg rn, TCGReg rm)
30
TCGTemp *dst_ts = arg_temp(dst);
29
{
31
TCGTemp *src_ts = arg_temp(src);
30
- /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
31
- if (!use_armv6_instructions && rd == rn) {
33
TCGOpcode new_op;
32
- if (rd == rm) {
34
33
- /* rd == rn == rm; copy an input to tmp first. */
35
if (ts_are_copies(dst_ts, src_ts)) {
34
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
36
- tcg_op_remove(s, op);
35
- rm = rn = TCG_REG_TMP;
37
+ tcg_op_remove(ctx->tcg, op);
36
- } else {
37
- rn = rm;
38
- rm = rd;
39
- }
40
- }
41
/* mul */
42
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
43
}
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
45
static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
46
TCGReg rd1, TCGReg rn, TCGReg rm)
47
{
48
- /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
49
- if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
50
- if (rd0 == rm || rd1 == rm) {
51
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
52
- rn = TCG_REG_TMP;
53
- } else {
54
- TCGReg t = rn;
55
- rn = rm;
56
- rm = t;
57
- }
58
- }
59
/* umull */
60
tcg_out32(s, (cond << 28) | 0x00800090 |
61
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
63
static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
64
TCGReg rd1, TCGReg rn, TCGReg rm)
65
{
66
- /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
67
- if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
68
- if (rd0 == rm || rd1 == rm) {
69
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
70
- rn = TCG_REG_TMP;
71
- } else {
72
- TCGReg t = rn;
73
- rn = rm;
74
- rm = t;
75
- }
76
- }
77
/* smull */
78
tcg_out32(s, (cond << 28) | 0x00c00090 |
79
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_udiv(TCGContext *s, ARMCond cond,
81
82
static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
83
{
84
- if (use_armv6_instructions) {
85
- /* sxtb */
86
- tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
87
- } else {
88
- tcg_out_dat_reg(s, cond, ARITH_MOV,
89
- rd, 0, rn, SHIFT_IMM_LSL(24));
90
- tcg_out_dat_reg(s, cond, ARITH_MOV,
91
- rd, 0, rd, SHIFT_IMM_ASR(24));
92
- }
93
+ /* sxtb */
94
+ tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
95
}
96
97
static void __attribute__((unused))
98
@@ -XXX,XX +XXX,XX @@ tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
99
100
static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
101
{
102
- if (use_armv6_instructions) {
103
- /* sxth */
104
- tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
105
- } else {
106
- tcg_out_dat_reg(s, cond, ARITH_MOV,
107
- rd, 0, rn, SHIFT_IMM_LSL(16));
108
- tcg_out_dat_reg(s, cond, ARITH_MOV,
109
- rd, 0, rd, SHIFT_IMM_ASR(16));
110
- }
111
+ /* sxth */
112
+ tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
113
}
114
115
static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
116
{
117
- if (use_armv6_instructions) {
118
- /* uxth */
119
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
120
- } else {
121
- tcg_out_dat_reg(s, cond, ARITH_MOV,
122
- rd, 0, rn, SHIFT_IMM_LSL(16));
123
- tcg_out_dat_reg(s, cond, ARITH_MOV,
124
- rd, 0, rd, SHIFT_IMM_LSR(16));
125
- }
126
+ /* uxth */
127
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
128
}
129
130
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
131
TCGReg rd, TCGReg rn, int flags)
132
{
133
- if (use_armv6_instructions) {
134
- if (flags & TCG_BSWAP_OS) {
135
- /* revsh */
136
- tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
137
- return;
138
- }
139
-
140
- /* rev16 */
141
- tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
142
- if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
143
- /* uxth */
144
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
145
- }
146
+ if (flags & TCG_BSWAP_OS) {
147
+ /* revsh */
148
+ tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
38
return;
149
return;
39
}
150
}
40
151
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
152
- if (flags == 0) {
42
}
153
- /*
43
}
154
- * For stores, no input or output extension:
44
155
- * rn = xxAB
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
156
- * lsr tmp, rn, #8 tmp = 0xxA
46
- TCGOp *op, TCGArg dst, uint64_t val)
157
- * and tmp, tmp, #0xff tmp = 000A
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
158
- * orr rd, tmp, rn, lsl #8 rd = xABA
48
+ TCGArg dst, uint64_t val)
159
- */
49
{
160
- tcg_out_dat_reg(s, cond, ARITH_MOV,
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
161
- TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
51
TCGType type;
162
- tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
163
- tcg_out_dat_reg(s, cond, ARITH_ORR,
53
/* Convert movi to mov with constant temp. */
164
- rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
54
tv = tcg_constant_internal(type, val);
165
- return;
55
init_ts_info(ctx, tv);
166
+ /* rev16 */
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
167
+ tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
168
+ if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
58
}
169
+ /* uxth */
59
170
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
171
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
172
-
62
{
173
- /*
63
int nb_temps, nb_globals, i;
174
- * Byte swap, leaving the result at the top of the register.
64
TCGOp *op, *op_next, *prev_mb = NULL;
175
- * We will then shift down, zero or sign-extending.
65
- OptContext ctx = {};
176
- */
66
+ OptContext ctx = { .tcg = s };
177
- if (flags & TCG_BSWAP_IZ) {
67
178
- /*
68
/* Array VALS has an element for each temp.
179
- * rn = 00AB
69
If this temp holds a constant then its value is kept in VALS' element.
180
- * ror tmp, rn, #8 tmp = B00A
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
181
- * orr tmp, tmp, tmp, lsl #16 tmp = BA00
71
CASE_OP_32_64(rotr):
182
- */
72
if (arg_is_const(op->args[1])
183
- tcg_out_dat_reg(s, cond, ARITH_MOV,
73
&& arg_info(op->args[1])->val == 0) {
184
- TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8));
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
185
- tcg_out_dat_reg(s, cond, ARITH_ORR,
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
186
- TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP,
76
continue;
187
- SHIFT_IMM_LSL(16));
77
}
188
- } else {
78
break;
189
- /*
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
190
- * rn = xxAB
80
if (!arg_is_const(op->args[1])
191
- * and tmp, rn, #0xff00 tmp = 00A0
81
&& arg_is_const(op->args[2])
192
- * lsl tmp, tmp, #8 tmp = 0A00
82
&& arg_info(op->args[2])->val == 0) {
193
- * orr tmp, tmp, rn, lsl #24 tmp = BA00
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
194
- */
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
195
- tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1);
85
continue;
196
- tcg_out_dat_reg(s, cond, ARITH_MOV,
86
}
197
- TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8));
87
break;
198
- tcg_out_dat_reg(s, cond, ARITH_ORR,
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
199
- TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24));
89
if (!arg_is_const(op->args[1])
200
- }
90
&& arg_is_const(op->args[2])
201
- tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP,
91
&& arg_info(op->args[2])->val == -1) {
202
- (flags & TCG_BSWAP_OS
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
203
- ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
204
}
94
continue;
205
95
}
206
static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
96
break;
207
{
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
208
- if (use_armv6_instructions) {
98
209
- /* rev */
99
if (partmask == 0) {
210
- tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
100
tcg_debug_assert(nb_oargs == 1);
211
- } else {
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
212
- tcg_out_dat_reg(s, cond, ARITH_EOR,
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
213
- TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
103
continue;
214
- tcg_out_dat_imm(s, cond, ARITH_BIC,
215
- TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
216
- tcg_out_dat_reg(s, cond, ARITH_MOV,
217
- rd, 0, rn, SHIFT_IMM_ROR(8));
218
- tcg_out_dat_reg(s, cond, ARITH_EOR,
219
- rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
220
- }
221
+ /* rev */
222
+ tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
223
}
224
225
static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
226
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
227
{
228
if (use_armv7_instructions) {
229
tcg_out32(s, INSN_DMB_ISH);
230
- } else if (use_armv6_instructions) {
231
+ } else {
232
tcg_out32(s, INSN_DMB_MCR);
233
}
234
}
235
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
236
if (argreg & 1) {
237
argreg++;
238
}
239
- if (use_armv6_instructions && argreg >= 4
240
- && (arglo & 1) == 0 && arghi == arglo + 1) {
241
+ if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
242
tcg_out_strd_8(s, COND_AL, arglo,
243
TCG_REG_CALL_STACK, (argreg - 4) * 4);
244
return argreg + 2;
245
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
246
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
247
: offsetof(CPUTLBEntry, addr_write));
248
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
249
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
250
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
251
unsigned s_bits = opc & MO_SIZE;
252
unsigned a_bits = get_alignment_bits(opc);
253
254
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
255
}
256
257
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
258
- if (use_armv6_instructions) {
259
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
260
- } else {
261
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
262
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
263
- }
264
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
265
266
/* Extract the tlb index from the address into R0. */
267
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
268
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
269
* Load the tlb comparator into R2/R3 and the fast path addend into R1.
270
*/
271
if (cmp_off == 0) {
272
- if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
273
+ if (TARGET_LONG_BITS == 64) {
274
tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
275
} else {
276
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
277
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
278
} else {
279
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
280
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
281
- if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
282
+ if (TARGET_LONG_BITS == 64) {
283
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
284
} else {
285
tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
104
}
286
}
105
if (affected == 0) {
287
}
106
tcg_debug_assert(nb_oargs == 1);
288
- if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
289
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
290
- }
109
continue;
291
110
}
292
/* Load the tlb addend. */
111
293
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
294
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
113
CASE_OP_32_64(mulsh):
295
TCGReg argreg, datalo, datahi;
114
if (arg_is_const(op->args[2])
296
MemOpIdx oi = lb->oi;
115
&& arg_info(op->args[2])->val == 0) {
297
MemOp opc = get_memop(oi);
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
298
- void *func;
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
299
118
continue;
300
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
119
}
301
return false;
120
break;
302
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
argreg = tcg_out_arg_imm32(s, argreg, oi);
122
CASE_OP_32_64_VEC(or):
304
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
123
CASE_OP_32_64_VEC(and):
305
124
if (args_are_copies(op->args[1], op->args[2])) {
306
- /* For armv6 we can use the canonical unsigned helpers and minimize
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
307
- icache usage. For pre-armv6, use the signed helpers since we do
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
308
- not have a single insn sign-extend. */
127
continue;
309
- if (use_armv6_instructions) {
128
}
310
- func = qemu_ld_helpers[opc & MO_SIZE];
129
break;
311
- } else {
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
312
- func = qemu_ld_helpers[opc & MO_SSIZE];
131
CASE_OP_32_64_VEC(sub):
313
- if (opc & MO_SIGN) {
132
CASE_OP_32_64_VEC(xor):
314
- opc = MO_UL;
133
if (args_are_copies(op->args[1], op->args[2])) {
315
- }
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
316
- }
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
317
- tcg_out_call(s, func);
136
continue;
318
+ /* Use the canonical unsigned helpers and minimize icache usage. */
137
}
319
+ tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
138
break;
320
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
321
datalo = lb->datalo_reg;
140
allocator where needed and possible. Also detect copies. */
322
datahi = lb->datahi_reg;
141
switch (opc) {
323
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
142
CASE_OP_32_64_VEC(mov):
324
break;
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
325
case MO_UQ:
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
326
/* Avoid ldrd for user-only emulation, to handle unaligned. */
145
continue;
327
- if (USING_SOFTMMU && use_armv6_instructions
146
328
+ if (USING_SOFTMMU
147
case INDEX_op_dup_vec:
329
&& (datalo & 1) == 0 && datahi == datalo + 1) {
148
if (arg_is_const(op->args[1])) {
330
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
149
tmp = arg_info(op->args[1])->val;
331
} else if (datalo != addend) {
150
tmp = dup_const(TCGOP_VECE(op), tmp);
332
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
333
break;
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
334
case MO_UQ:
153
continue;
335
/* Avoid ldrd for user-only emulation, to handle unaligned. */
154
}
336
- if (USING_SOFTMMU && use_armv6_instructions
155
break;
337
+ if (USING_SOFTMMU
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
338
&& (datalo & 1) == 0 && datahi == datalo + 1) {
157
case INDEX_op_dup2_vec:
339
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
158
assert(TCG_TARGET_REG_BITS == 32);
340
} else if (datalo == addrlo) {
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
341
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
342
break;
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
343
case MO_64:
162
deposit64(arg_info(op->args[1])->val, 32, 32,
344
/* Avoid strd for user-only emulation, to handle unaligned. */
163
arg_info(op->args[2])->val));
345
- if (USING_SOFTMMU && use_armv6_instructions
164
continue;
346
+ if (USING_SOFTMMU
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
347
&& (datalo & 1) == 0 && datahi == datalo + 1) {
166
case INDEX_op_extrh_i64_i32:
348
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
167
if (arg_is_const(op->args[1])) {
349
} else {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
350
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
351
break;
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
352
case MO_64:
171
continue;
353
/* Avoid strd for user-only emulation, to handle unaligned. */
172
}
354
- if (USING_SOFTMMU && use_armv6_instructions
173
break;
355
+ if (USING_SOFTMMU
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
356
&& (datalo & 1) == 0 && datahi == datalo + 1) {
175
if (arg_is_const(op->args[1])) {
357
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
358
} else {
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
289
--
359
--
290
2.25.1
360
2.25.1
291
361
292
362
diff view generated by jsdifflib
1
Recognize the constant function for or-complement.
1
We will shortly allow the use of unaligned memory accesses,
2
and these require proper alignment. Use get_alignment_bits
3
to verify and remove USING_SOFTMMU.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/optimize.c | 1 +
8
tcg/arm/tcg-target.c.inc | 23 ++++++++---------------
9
1 file changed, 1 insertion(+)
9
1 file changed, 8 insertions(+), 15 deletions(-)
10
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
13
--- a/tcg/arm/tcg-target.c.inc
14
+++ b/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions;
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
bool use_neon_instructions;
17
{
17
#endif
18
if (fold_const2(ctx, op) ||
18
19
+ fold_xx_to_i(ctx, op, -1) ||
19
-/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
20
fold_xi_to_x(ctx, op, -1) ||
20
-#ifdef CONFIG_SOFTMMU
21
fold_ix_to_not(ctx, op, 0)) {
21
-# define USING_SOFTMMU 1
22
return true;
22
-#else
23
-# define USING_SOFTMMU 0
24
-#endif
25
-
26
#ifdef CONFIG_DEBUG_TCG
27
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
28
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
30
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
31
break;
32
case MO_UQ:
33
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
34
- if (USING_SOFTMMU
35
+ /* LDRD requires alignment; double-check that. */
36
+ if (get_alignment_bits(opc) >= MO_64
37
&& (datalo & 1) == 0 && datahi == datalo + 1) {
38
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
39
} else if (datalo != addend) {
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
41
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
42
break;
43
case MO_UQ:
44
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
45
- if (USING_SOFTMMU
46
+ /* LDRD requires alignment; double-check that. */
47
+ if (get_alignment_bits(opc) >= MO_64
48
&& (datalo & 1) == 0 && datahi == datalo + 1) {
49
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
50
} else if (datalo == addrlo) {
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
52
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
53
break;
54
case MO_64:
55
- /* Avoid strd for user-only emulation, to handle unaligned. */
56
- if (USING_SOFTMMU
57
+ /* STRD requires alignment; double-check that. */
58
+ if (get_alignment_bits(opc) >= MO_64
59
&& (datalo & 1) == 0 && datahi == datalo + 1) {
60
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
61
} else {
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
63
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
64
break;
65
case MO_64:
66
- /* Avoid strd for user-only emulation, to handle unaligned. */
67
- if (USING_SOFTMMU
68
+ /* STRD requires alignment; double-check that. */
69
+ if (get_alignment_bits(opc) >= MO_64
70
&& (datalo & 1) == 0 && datahi == datalo + 1) {
71
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
72
} else {
23
--
73
--
24
2.25.1
74
2.25.1
25
75
26
76
diff view generated by jsdifflib
1
Rename to fold_addsub2.
1
From armv6, the architecture supports unaligned accesses.
2
Use Int128 to implement the wider operation.
2
All we need to do is perform the correct alignment check
3
in tcg_out_tlb_read.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
8
tcg/arm/tcg-target.c.inc | 41 ++++++++++++++++++++--------------------
10
1 file changed, 44 insertions(+), 21 deletions(-)
9
1 file changed, 21 insertions(+), 20 deletions(-)
11
10
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
13
--- a/tcg/arm/tcg-target.c.inc
15
+++ b/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
17
*/
16
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
18
17
: offsetof(CPUTLBEntry, addr_write));
19
#include "qemu/osdep.h"
18
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
20
+#include "qemu/int128.h"
19
- unsigned s_bits = opc & MO_SIZE;
21
#include "tcg/tcg-op.h"
20
- unsigned a_bits = get_alignment_bits(opc);
22
#include "tcg-internal.h"
21
-
23
22
- /*
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
23
- * We don't support inline unaligned acceses, but we can easily
25
return false;
24
- * support overalignment checks.
26
}
25
- */
27
26
- if (a_bits < s_bits) {
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
27
- a_bits = s_bits;
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
28
- }
30
{
29
+ unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
30
+ unsigned a_mask = (1 << get_alignment_bits(opc)) - 1;
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
31
+ TCGReg t_addr;
33
- uint32_t al = arg_info(op->args[2])->val;
32
34
- uint32_t ah = arg_info(op->args[3])->val;
33
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
35
- uint32_t bl = arg_info(op->args[4])->val;
34
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
36
- uint32_t bh = arg_info(op->args[5])->val;
35
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
37
- uint64_t a = ((uint64_t)ah << 32) | al;
36
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
37
/*
39
+ uint64_t al = arg_info(op->args[2])->val;
38
* Check alignment, check comparators.
40
+ uint64_t ah = arg_info(op->args[3])->val;
39
- * Do this in no more than 3 insns. Use MOVW for v7, if possible,
41
+ uint64_t bl = arg_info(op->args[4])->val;
40
+ * Do this in 2-4 insns. Use MOVW for v7, if possible,
42
+ uint64_t bh = arg_info(op->args[5])->val;
41
* to reduce the number of sequential conditional instructions.
43
TCGArg rl, rh;
42
* Almost all guests have at least 4k pages, which means that we need
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
43
* to clear at least 9 bits even for an 8-byte memory, which means it
45
+ TCGOp *op2;
44
* isn't worth checking for an immediate operand for BIC.
46
45
+ *
47
- if (add) {
46
+ * For unaligned accesses, test the page of the last unit of alignment.
48
- a += b;
47
+ * This leaves the least significant alignment bits unchanged, and of
49
+ if (ctx->type == TCG_TYPE_I32) {
48
+ * course must be zero.
50
+ uint64_t a = deposit64(al, 32, 32, ah);
49
*/
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
50
+ t_addr = addrlo;
52
+
51
+ if (a_mask < s_mask) {
53
+ if (add) {
52
+ t_addr = TCG_REG_R0;
54
+ a += b;
53
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
55
+ } else {
54
+ addrlo, s_mask - a_mask);
56
+ a -= b;
55
+ }
57
+ }
56
if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
58
+
57
- tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
59
+ al = sextract64(a, 0, 32);
58
-
60
+ ah = sextract64(a, 32, 32);
59
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
61
} else {
60
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
62
- a -= b;
61
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
63
+ Int128 a = int128_make128(al, ah);
62
- addrlo, TCG_REG_TMP, 0);
64
+ Int128 b = int128_make128(bl, bh);
63
+ t_addr, TCG_REG_TMP, 0);
65
+
64
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
66
+ if (add) {
65
} else {
67
+ a = int128_add(a, b);
66
- if (a_bits) {
68
+ } else {
67
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
69
+ a = int128_sub(a, b);
68
- (1 << a_bits) - 1);
70
+ }
69
+ if (a_mask) {
71
+
70
+ tcg_debug_assert(a_mask <= 0xff);
72
+ al = int128_getlo(a);
71
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
73
+ ah = int128_gethi(a);
74
}
72
}
75
73
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
76
rl = op->args[0];
74
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
77
rh = op->args[1];
75
SHIFT_IMM_LSR(TARGET_PAGE_BITS));
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
76
- tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
77
+ tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
80
+
78
0, TCG_REG_R2, TCG_REG_TMP,
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
79
SHIFT_IMM_LSL(TARGET_PAGE_BITS));
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
83
+
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
86
return true;
87
}
80
}
88
return false;
89
}
90
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
93
{
94
- return fold_addsub2_i32(ctx, op, true);
95
+ return fold_addsub2(ctx, op, true);
96
}
97
98
static bool fold_and(OptContext *ctx, TCGOp *op)
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
100
return false;
101
}
102
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
105
{
106
- return fold_addsub2_i32(ctx, op, false);
107
+ return fold_addsub2(ctx, op, false);
108
}
109
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
CASE_OP_32_64_VEC(add):
113
done = fold_add(&ctx, op);
114
break;
115
- case INDEX_op_add2_i32:
116
- done = fold_add2_i32(&ctx, op);
117
+ CASE_OP_32_64(add2):
118
+ done = fold_add2(&ctx, op);
119
break;
120
CASE_OP_32_64_VEC(and):
121
done = fold_and(&ctx, op);
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
CASE_OP_32_64_VEC(sub):
124
done = fold_sub(&ctx, op);
125
break;
126
- case INDEX_op_sub2_i32:
127
- done = fold_sub2_i32(&ctx, op);
128
+ CASE_OP_32_64(sub2):
129
+ done = fold_sub2(&ctx, op);
130
break;
131
CASE_OP_32_64_VEC(xor):
132
done = fold_xor(&ctx, op);
133
--
81
--
134
2.25.1
82
2.25.1
135
83
136
84
diff view generated by jsdifflib
1
This "garbage" setting pre-dates the addition of the type
1
Reserve a register for the guest_base using aarch64 for reference.
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
2
By doing so, we do not have to recompute it for every memory load.
3
and INDEX_op_extr{l,h}_i64_i32.
4
3
5
So now we have a definitive points at which to adjust z_mask
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
to eliminate such bits from the 32-bit operands.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
7
tcg/arm/tcg-target.c.inc | 39 ++++++++++++++++++++++++++++-----------
13
1 file changed, 16 insertions(+), 19 deletions(-)
8
1 file changed, 28 insertions(+), 11 deletions(-)
14
9
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/optimize.c
12
--- a/tcg/arm/tcg-target.c.inc
18
+++ b/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
14
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[2] = {
20
ti->is_const = true;
15
21
ti->val = ts->val;
16
#define TCG_REG_TMP TCG_REG_R12
22
ti->z_mask = ts->val;
17
#define TCG_VEC_TMP TCG_REG_Q15
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
18
+#ifndef CONFIG_SOFTMMU
24
- /* High bits of a 32-bit quantity are garbage. */
19
+#define TCG_REG_GUEST_BASE TCG_REG_R11
25
- ti->z_mask |= ~0xffffffffull;
20
+#endif
26
- }
21
22
typedef enum {
23
COND_EQ = 0x0,
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
25
26
static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
27
TCGReg datalo, TCGReg datahi,
28
- TCGReg addrlo, TCGReg addend)
29
+ TCGReg addrlo, TCGReg addend,
30
+ bool scratch_addend)
31
{
32
/* Byte swapping is left to middle-end expansion. */
33
tcg_debug_assert((opc & MO_BSWAP) == 0);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
35
if (get_alignment_bits(opc) >= MO_64
36
&& (datalo & 1) == 0 && datahi == datalo + 1) {
37
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
38
- } else if (datalo != addend) {
39
+ } else if (scratch_addend) {
40
tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
41
tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
42
} else {
43
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
44
label_ptr = s->code_ptr;
45
tcg_out_bl_imm(s, COND_NE, 0);
46
47
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
48
+ tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
49
50
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
51
s->code_ptr, label_ptr);
52
#else /* !CONFIG_SOFTMMU */
53
if (guest_base) {
54
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
55
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
56
+ tcg_out_qemu_ld_index(s, opc, datalo, datahi,
57
+ addrlo, TCG_REG_GUEST_BASE, false);
27
} else {
58
} else {
28
ti->is_const = false;
59
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
29
ti->z_mask = -1;
60
}
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
31
TCGTemp *src_ts = arg_temp(src);
62
32
TempOptInfo *di;
63
static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
33
TempOptInfo *si;
64
TCGReg datalo, TCGReg datahi,
34
- uint64_t z_mask;
65
- TCGReg addrlo, TCGReg addend)
35
TCGOpcode new_op;
66
+ TCGReg addrlo, TCGReg addend,
36
67
+ bool scratch_addend)
37
if (ts_are_copies(dst_ts, src_ts)) {
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
39
op->args[0] = dst;
40
op->args[1] = src;
41
42
- z_mask = si->z_mask;
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
44
- /* High bits of the destination are now garbage. */
45
- z_mask |= ~0xffffffffull;
46
- }
47
- di->z_mask = z_mask;
48
+ di->z_mask = si->z_mask;
49
50
if (src_ts->type == dst_ts->type) {
51
TempOptInfo *ni = ts_info(si->next_copy);
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
54
TCGArg dst, uint64_t val)
55
{
68
{
56
- /* Convert movi to mov with constant temp. */
69
/* Byte swapping is left to middle-end expansion. */
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
70
tcg_debug_assert((opc & MO_BSWAP) == 0);
58
+ TCGTemp *tv;
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
59
72
if (get_alignment_bits(opc) >= MO_64
60
+ if (ctx->type == TCG_TYPE_I32) {
73
&& (datalo & 1) == 0 && datahi == datalo + 1) {
61
+ val = (int32_t)val;
74
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
75
- } else {
76
+ } else if (scratch_addend) {
77
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
78
tcg_out_st32_12(s, cond, datahi, addend, 4);
79
+ } else {
80
+ tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
81
+ addend, addrlo, SHIFT_IMM_LSL(0));
82
+ tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
83
+ tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
84
}
85
break;
86
default:
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
88
mem_index = get_mmuidx(oi);
89
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
90
91
- tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
92
+ tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
93
+ addrlo, addend, true);
94
95
/* The conditional call must come last, as we're going to return here. */
96
label_ptr = s->code_ptr;
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
98
s->code_ptr, label_ptr);
99
#else /* !CONFIG_SOFTMMU */
100
if (guest_base) {
101
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
102
- tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
103
- datahi, addrlo, TCG_REG_TMP);
104
+ tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
105
+ addrlo, TCG_REG_GUEST_BASE, false);
106
} else {
107
tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
108
}
109
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
110
111
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
112
113
+#ifndef CONFIG_SOFTMMU
114
+ if (guest_base) {
115
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
116
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
62
+ }
117
+ }
118
+#endif
63
+
119
+
64
+ /* Convert movi to mov with constant temp. */
120
tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
65
+ tv = tcg_constant_internal(ctx->type, val);
66
init_ts_info(ctx, tv);
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
68
}
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
70
uint64_t z_mask = ctx->z_mask;
71
121
72
/*
122
/*
73
- * 32-bit ops generate 32-bit results. For the result is zero test
74
- * below, we can ignore high bits, but for further optimizations we
75
- * need to record that the high bits contain garbage.
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
77
+ * simplifying tcg are sign-extended. Certainly that's how we
78
+ * represent our constants elsewhere. Note that the bits will
79
+ * be reset properly for a 64-bit value when encountering the
80
+ * type changing opcodes.
81
*/
82
if (ctx->type == TCG_TYPE_I32) {
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
86
+ a_mask = (int32_t)a_mask;
87
+ z_mask = (int32_t)z_mask;
88
+ ctx->z_mask = z_mask;
89
}
90
91
if (z_mask == 0) {
92
--
123
--
93
2.25.1
124
2.25.1
94
125
95
126
diff view generated by jsdifflib
1
Split out the conditional conversion from a more complex logical
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
operation to a simple NOT. Create a couple more helpers to make
3
this easy for the outer-most logical operations.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
4
tcg/arm/tcg-target.h | 2 -
9
1 file changed, 86 insertions(+), 72 deletions(-)
5
tcg/arm/tcg-target.c.inc | 83 +++++++++++++++++++++++++++++++++++++++-
6
2 files changed, 81 insertions(+), 4 deletions(-)
10
7
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
10
--- a/tcg/arm/tcg-target.h
14
+++ b/tcg/optimize.c
11
+++ b/tcg/arm/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
16
return false;
13
/* not defined -- call should be eliminated at compile time */
14
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/arm/tcg-target.c.inc
25
+++ b/tcg/arm/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
*/
28
29
#include "elf.h"
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
33
int arm_arch = __ARM_ARCH;
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
17
}
35
}
18
36
19
+/*
37
#ifdef CONFIG_SOFTMMU
20
+ * Convert @op to NOT, if NOT is supported by the host.
38
-#include "../tcg-ldst.c.inc"
21
+ * Return true f the conversion is successful, which will still
39
-
22
+ * indicate that the processing is complete.
40
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
23
+ */
41
* int mmu_idx, uintptr_t ra)
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
42
*/
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
43
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
44
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
45
return true;
46
}
47
+#else
48
+
49
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
50
+ TCGReg addrhi, unsigned a_bits)
26
+{
51
+{
27
+ TCGOpcode not_op;
52
+ unsigned a_mask = (1 << a_bits) - 1;
28
+ bool have_not;
53
+ TCGLabelQemuLdst *label = new_ldst_label(s);
29
+
54
+
30
+ switch (ctx->type) {
55
+ label->is_ld = is_ld;
31
+ case TCG_TYPE_I32:
56
+ label->addrlo_reg = addrlo;
32
+ not_op = INDEX_op_not_i32;
57
+ label->addrhi_reg = addrhi;
33
+ have_not = TCG_TARGET_HAS_not_i32;
58
+
34
+ break;
59
+ /* We are expecting a_bits to max out at 7, and can easily support 8. */
35
+ case TCG_TYPE_I64:
60
+ tcg_debug_assert(a_mask <= 0xff);
36
+ not_op = INDEX_op_not_i64;
61
+ /* tst addr, #mask */
37
+ have_not = TCG_TARGET_HAS_not_i64;
62
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
38
+ break;
63
+
39
+ case TCG_TYPE_V64:
64
+ /* blne slow_path */
40
+ case TCG_TYPE_V128:
65
+ label->label_ptr[0] = s->code_ptr;
41
+ case TCG_TYPE_V256:
66
+ tcg_out_bl_imm(s, COND_NE, 0);
42
+ not_op = INDEX_op_not_vec;
67
+
43
+ have_not = TCG_TARGET_HAS_not_vec;
68
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+ if (have_not) {
49
+ op->opc = not_op;
50
+ op->args[1] = op->args[idx];
51
+ return fold_not(ctx, op);
52
+ }
53
+ return false;
54
+}
69
+}
55
+
70
+
56
+/* If the binary operation has first argument @i, fold to NOT. */
71
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
58
+{
72
+{
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
73
+ if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
60
+ return fold_to_not(ctx, op, 2);
74
+ return false;
61
+ }
75
+ }
62
+ return false;
76
+
77
+ if (TARGET_LONG_BITS == 64) {
78
+ /* 64-bit target address is aligned into R2:R3. */
79
+ if (l->addrhi_reg != TCG_REG_R2) {
80
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
81
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
82
+ } else if (l->addrlo_reg != TCG_REG_R3) {
83
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
84
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
85
+ } else {
86
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
87
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
88
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
89
+ }
90
+ } else {
91
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
92
+ }
93
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0);
94
+
95
+ /*
96
+ * Tail call to the helper, with the return address back inline,
97
+ * just for the clarity of the debugging traceback -- the helper
98
+ * cannot return. We have used BLNE to arrive here, so LR is
99
+ * already set.
100
+ */
101
+ tcg_out_goto(s, COND_AL, (const void *)
102
+ (l->is_ld ? helper_unaligned_ld : helper_unaligned_st));
103
+ return true;
63
+}
104
+}
64
+
105
+
65
/* If the binary operation has second argument @i, fold to @i. */
106
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
67
{
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
69
return false;
70
}
71
72
+/* If the binary operation has second argument @i, fold to NOT. */
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
74
+{
107
+{
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
108
+ return tcg_out_fail_alignment(s, l);
76
+ return fold_to_not(ctx, op, 1);
77
+ }
78
+ return false;
79
+}
109
+}
80
+
110
+
81
/* If the binary operation has both arguments equal, fold to @i. */
111
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
112
+{
83
{
113
+ return tcg_out_fail_alignment(s, l);
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
114
+}
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
115
#endif /* SOFTMMU */
86
{
116
87
if (fold_const2(ctx, op) ||
117
static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
88
- fold_xx_to_i(ctx, op, 0)) {
118
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
89
+ fold_xx_to_i(ctx, op, 0) ||
119
int mem_index;
90
+ fold_ix_to_not(ctx, op, -1)) {
120
TCGReg addend;
91
return true;
121
tcg_insn_unit *label_ptr;
92
}
122
+#else
93
return false;
123
+ unsigned a_bits;
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
124
#endif
95
125
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
126
datalo = *args++;
97
{
127
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
98
- return fold_const2(ctx, op);
128
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
99
+ if (fold_const2(ctx, op) ||
129
s->code_ptr, label_ptr);
100
+ fold_xi_to_not(ctx, op, 0)) {
130
#else /* !CONFIG_SOFTMMU */
101
+ return true;
131
+ a_bits = get_alignment_bits(opc);
132
+ if (a_bits) {
133
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
102
+ }
134
+ }
103
+ return false;
135
if (guest_base) {
104
}
136
tcg_out_qemu_ld_index(s, opc, datalo, datahi,
105
137
addrlo, TCG_REG_GUEST_BASE, false);
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
138
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
139
int mem_index;
108
140
TCGReg addend;
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
141
tcg_insn_unit *label_ptr;
110
{
142
+#else
111
- return fold_const2(ctx, op);
143
+ unsigned a_bits;
112
+ if (fold_const2(ctx, op) ||
144
#endif
113
+ fold_xi_to_not(ctx, op, -1)) {
145
114
+ return true;
146
datalo = *args++;
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
148
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
149
s->code_ptr, label_ptr);
150
#else /* !CONFIG_SOFTMMU */
151
+ a_bits = get_alignment_bits(opc);
152
+ if (a_bits) {
153
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
115
+ }
154
+ }
116
+ return false;
155
if (guest_base) {
117
}
156
tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
118
157
addrlo, TCG_REG_GUEST_BASE, false);
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
121
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
123
{
124
- return fold_const2(ctx, op);
125
+ if (fold_const2(ctx, op) ||
126
+ fold_xi_to_not(ctx, op, 0)) {
127
+ return true;
128
+ }
129
+ return false;
130
}
131
132
static bool fold_not(OptContext *ctx, TCGOp *op)
133
{
134
- return fold_const1(ctx, op);
135
+ if (fold_const1(ctx, op)) {
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
141
+ return true;
142
}
143
144
static bool fold_or(OptContext *ctx, TCGOp *op)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
146
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
148
{
149
- return fold_const2(ctx, op);
150
+ if (fold_const2(ctx, op) ||
151
+ fold_ix_to_not(ctx, op, 0)) {
152
+ return true;
153
+ }
154
+ return false;
155
}
156
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
160
{
161
if (fold_const2(ctx, op) ||
162
- fold_xx_to_i(ctx, op, 0)) {
163
+ fold_xx_to_i(ctx, op, 0) ||
164
+ fold_xi_to_not(ctx, op, -1)) {
165
return true;
166
}
167
return false;
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
170
}
171
break;
172
- CASE_OP_32_64_VEC(xor):
173
- CASE_OP_32_64(nand):
174
- if (!arg_is_const(op->args[1])
175
- && arg_is_const(op->args[2])
176
- && arg_info(op->args[2])->val == -1) {
177
- i = 1;
178
- goto try_not;
179
- }
180
- break;
181
- CASE_OP_32_64(nor):
182
- if (!arg_is_const(op->args[1])
183
- && arg_is_const(op->args[2])
184
- && arg_info(op->args[2])->val == 0) {
185
- i = 1;
186
- goto try_not;
187
- }
188
- break;
189
- CASE_OP_32_64_VEC(andc):
190
- if (!arg_is_const(op->args[2])
191
- && arg_is_const(op->args[1])
192
- && arg_info(op->args[1])->val == -1) {
193
- i = 2;
194
- goto try_not;
195
- }
196
- break;
197
- CASE_OP_32_64_VEC(orc):
198
- CASE_OP_32_64(eqv):
199
- if (!arg_is_const(op->args[2])
200
- && arg_is_const(op->args[1])
201
- && arg_info(op->args[1])->val == 0) {
202
- i = 2;
203
- goto try_not;
204
- }
205
- break;
206
- try_not:
207
- {
208
- TCGOpcode not_op;
209
- bool have_not;
210
-
211
- switch (ctx.type) {
212
- case TCG_TYPE_I32:
213
- not_op = INDEX_op_not_i32;
214
- have_not = TCG_TARGET_HAS_not_i32;
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
239
}
240
--
158
--
241
2.25.1
159
2.25.1
242
160
243
161
diff view generated by jsdifflib
1
There was no real reason for calls to have separate code here.
1
This is kinda sorta the opposite of the other tcg hosts, where
2
Unify init for calls vs non-calls using the call path, which
2
we get (normal) alignment checks for free with host SIGBUS and
3
handles TCG_CALL_DUMMY_ARG.
3
need to add code to support unaligned accesses.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Fortunately, the ISA contains pairs of instructions that are
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
used to implement unaligned memory accesses. Use them.
7
8
Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
9
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
12
---
10
tcg/optimize.c | 25 +++++++++++--------------
13
tcg/mips/tcg-target.h | 2 -
11
1 file changed, 11 insertions(+), 14 deletions(-)
14
tcg/mips/tcg-target.c.inc | 334 +++++++++++++++++++++++++++++++++++++-
15
2 files changed, 328 insertions(+), 8 deletions(-)
12
16
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
19
--- a/tcg/mips/tcg-target.h
16
+++ b/tcg/optimize.c
20
+++ b/tcg/mips/tcg-target.h
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
21
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
22
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
23
QEMU_ERROR("code path is reachable");
24
25
-#ifdef CONFIG_SOFTMMU
26
#define TCG_TARGET_NEED_LDST_LABELS
27
-#endif
28
29
#endif
30
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/mips/tcg-target.c.inc
33
+++ b/tcg/mips/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@
35
* THE SOFTWARE.
36
*/
37
38
+#include "../tcg-ldst.c.inc"
39
+
40
#ifdef HOST_WORDS_BIGENDIAN
41
# define MIPS_BE 1
42
#else
43
@@ -XXX,XX +XXX,XX @@ typedef enum {
44
OPC_ORI = 015 << 26,
45
OPC_XORI = 016 << 26,
46
OPC_LUI = 017 << 26,
47
+ OPC_BNEL = 025 << 26,
48
+ OPC_BNEZALC_R6 = 030 << 26,
49
OPC_DADDIU = 031 << 26,
50
+ OPC_LDL = 032 << 26,
51
+ OPC_LDR = 033 << 26,
52
OPC_LB = 040 << 26,
53
OPC_LH = 041 << 26,
54
+ OPC_LWL = 042 << 26,
55
OPC_LW = 043 << 26,
56
OPC_LBU = 044 << 26,
57
OPC_LHU = 045 << 26,
58
+ OPC_LWR = 046 << 26,
59
OPC_LWU = 047 << 26,
60
OPC_SB = 050 << 26,
61
OPC_SH = 051 << 26,
62
+ OPC_SWL = 052 << 26,
63
OPC_SW = 053 << 26,
64
+ OPC_SDL = 054 << 26,
65
+ OPC_SDR = 055 << 26,
66
+ OPC_SWR = 056 << 26,
67
OPC_LD = 067 << 26,
68
OPC_SD = 077 << 26,
69
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
71
}
72
73
#if defined(CONFIG_SOFTMMU)
74
-#include "../tcg-ldst.c.inc"
75
-
76
static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
77
[MO_UB] = helper_ret_ldub_mmu,
78
[MO_SB] = helper_ret_ldsb_mmu,
79
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
80
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
81
return true;
82
}
83
-#endif
84
+
85
+#else
86
+
87
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
88
+ TCGReg addrhi, unsigned a_bits)
89
+{
90
+ unsigned a_mask = (1 << a_bits) - 1;
91
+ TCGLabelQemuLdst *l = new_ldst_label(s);
92
+
93
+ l->is_ld = is_ld;
94
+ l->addrlo_reg = addrlo;
95
+ l->addrhi_reg = addrhi;
96
+
97
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
98
+ tcg_debug_assert(a_bits < 16);
99
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
100
+
101
+ l->label_ptr[0] = s->code_ptr;
102
+ if (use_mips32r6_instructions) {
103
+ tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
104
+ } else {
105
+ tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
106
+ tcg_out_nop(s);
107
+ }
108
+
109
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
110
+}
111
+
112
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
113
+{
114
+ void *target;
115
+
116
+ if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
117
+ return false;
118
+ }
119
+
120
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
121
+ /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
122
+ TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
123
+ TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;
124
+
125
+ if (a3 != TCG_REG_A2) {
126
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
127
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
128
+ } else if (a2 != TCG_REG_A3) {
129
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
130
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
131
+ } else {
132
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
133
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
134
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
135
+ }
136
+ } else {
137
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
138
+ }
139
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
140
+
141
+ /*
142
+ * Tail call to the helper, with the return address back inline.
143
+ * We have arrived here via BNEL, so $31 is already set.
144
+ */
145
+ target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
146
+ tcg_out_call_int(s, target, true);
147
+ return true;
148
+}
149
+
150
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
151
+{
152
+ return tcg_out_fail_alignment(s, l);
153
+}
154
+
155
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
156
+{
157
+ return tcg_out_fail_alignment(s, l);
158
+}
159
+#endif /* SOFTMMU */
160
161
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
162
TCGReg base, MemOp opc, bool is_64)
163
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
18
}
164
}
19
}
165
}
20
166
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
167
+static void __attribute__((unused))
22
-{
168
+tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
23
- init_ts_info(ctx, arg_temp(arg));
169
+ TCGReg base, MemOp opc, bool is_64)
24
-}
170
+{
25
-
171
+ const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
172
+ const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
173
+ const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR;
174
+ const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL;
175
+
176
+ bool sgn = (opc & MO_SIGN);
177
+
178
+ switch (opc & (MO_SSIZE | MO_BSWAP)) {
179
+ case MO_SW | MO_BE:
180
+ case MO_UW | MO_BE:
181
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0);
182
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 1);
183
+ if (use_mips32r2_instructions) {
184
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
185
+ } else {
186
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
187
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
188
+ }
189
+ break;
190
+
191
+ case MO_SW | MO_LE:
192
+ case MO_UW | MO_LE:
193
+ if (use_mips32r2_instructions && lo != base) {
194
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
195
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1);
196
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
197
+ } else {
198
+ tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0);
199
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1);
200
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8);
201
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
202
+ }
203
+ break;
204
+
205
+ case MO_SL:
206
+ case MO_UL:
207
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
208
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
209
+ if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
210
+ tcg_out_ext32u(s, lo, lo);
211
+ }
212
+ break;
213
+
214
+ case MO_UL | MO_BSWAP:
215
+ case MO_SL | MO_BSWAP:
216
+ if (use_mips32r2_instructions) {
217
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
218
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
219
+ tcg_out_bswap32(s, lo, lo,
220
+ TCG_TARGET_REG_BITS == 64 && is_64
221
+ ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
222
+ } else {
223
+ const tcg_insn_unit *subr =
224
+ (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
225
+ ? bswap32u_addr : bswap32_addr);
226
+
227
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
228
+ tcg_out_bswap_subr(s, subr);
229
+ /* delay slot */
230
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
231
+ tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
232
+ }
233
+ break;
234
+
235
+ case MO_UQ:
236
+ if (TCG_TARGET_REG_BITS == 64) {
237
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
238
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
239
+ } else {
240
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0);
241
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3);
242
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0);
243
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3);
244
+ }
245
+ break;
246
+
247
+ case MO_UQ | MO_BSWAP:
248
+ if (TCG_TARGET_REG_BITS == 64) {
249
+ if (use_mips32r2_instructions) {
250
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
251
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
252
+ tcg_out_bswap64(s, lo, lo);
253
+ } else {
254
+ tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0);
255
+ tcg_out_bswap_subr(s, bswap64_addr);
256
+ /* delay slot */
257
+ tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7);
258
+ tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
259
+ }
260
+ } else if (use_mips32r2_instructions) {
261
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
262
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
263
+ tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0);
264
+ tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3);
265
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
266
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
267
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
268
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
269
+ } else {
270
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
271
+ tcg_out_bswap_subr(s, bswap32_addr);
272
+ /* delay slot */
273
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
274
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0);
275
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
276
+ tcg_out_bswap_subr(s, bswap32_addr);
277
+ /* delay slot */
278
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3);
279
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
280
+ }
281
+ break;
282
+
283
+ default:
284
+ g_assert_not_reached();
285
+ }
286
+}
287
+
288
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
27
{
289
{
28
TCGTemp *i, *g, *l;
290
TCGReg addr_regl, addr_regh __attribute__((unused));
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
291
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
30
return false;
292
MemOp opc;
31
}
293
#if defined(CONFIG_SOFTMMU)
32
294
tcg_insn_unit *label_ptr[2];
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
295
+#else
34
+{
296
+ unsigned a_bits, s_bits;
35
+ for (int i = 0; i < nb_args; i++) {
297
#endif
36
+ TCGTemp *ts = arg_temp(op->args[i]);
298
TCGReg base = TCG_REG_A0;
37
+ if (ts) {
299
38
+ init_ts_info(ctx, ts);
300
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
39
+ }
301
} else {
40
+ }
302
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
41
+}
303
}
42
+
304
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
43
/* Propagate constants and copies, fold constant expressions. */
305
+ a_bits = get_alignment_bits(opc);
44
void tcg_optimize(TCGContext *s)
306
+ s_bits = opc & MO_SIZE;
307
+ /*
308
+ * R6 removes the left/right instructions but requires the
309
+ * system to support misaligned memory accesses.
310
+ */
311
+ if (use_mips32r6_instructions) {
312
+ if (a_bits) {
313
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
314
+ }
315
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
316
+ } else {
317
+ if (a_bits && a_bits != s_bits) {
318
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
319
+ }
320
+ if (a_bits >= s_bits) {
321
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
322
+ } else {
323
+ tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
324
+ }
325
+ }
326
#endif
327
}
328
329
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
330
}
331
}
332
333
+static void __attribute__((unused))
334
+tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
335
+ TCGReg base, MemOp opc)
336
+{
337
+ const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
338
+ const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL;
339
+ const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR;
340
+ const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL;
341
+
342
+ /* Don't clutter the code below with checks to avoid bswapping ZERO. */
343
+ if ((lo | hi) == 0) {
344
+ opc &= ~MO_BSWAP;
345
+ }
346
+
347
+ switch (opc & (MO_SIZE | MO_BSWAP)) {
348
+ case MO_16 | MO_BE:
349
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
350
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0);
351
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 1);
352
+ break;
353
+
354
+ case MO_16 | MO_LE:
355
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
356
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
357
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1);
358
+ break;
359
+
360
+ case MO_32 | MO_BSWAP:
361
+ tcg_out_bswap32(s, TCG_TMP3, lo, 0);
362
+ lo = TCG_TMP3;
363
+ /* fall through */
364
+ case MO_32:
365
+ tcg_out_opc_imm(s, sw1, lo, base, 0);
366
+ tcg_out_opc_imm(s, sw2, lo, base, 3);
367
+ break;
368
+
369
+ case MO_64 | MO_BSWAP:
370
+ if (TCG_TARGET_REG_BITS == 64) {
371
+ tcg_out_bswap64(s, TCG_TMP3, lo);
372
+ lo = TCG_TMP3;
373
+ } else if (use_mips32r2_instructions) {
374
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo);
375
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi);
376
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
377
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
378
+ hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1;
379
+ lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0;
380
+ } else {
381
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
382
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0);
383
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3);
384
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
385
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0);
386
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3);
387
+ break;
388
+ }
389
+ /* fall through */
390
+ case MO_64:
391
+ if (TCG_TARGET_REG_BITS == 64) {
392
+ tcg_out_opc_imm(s, sd1, lo, base, 0);
393
+ tcg_out_opc_imm(s, sd2, lo, base, 7);
394
+ } else {
395
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0);
396
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3);
397
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0);
398
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3);
399
+ }
400
+ break;
401
+
402
+ default:
403
+ tcg_abort();
404
+ }
405
+}
406
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
45
{
407
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
408
TCGReg addr_regl, addr_regh __attribute__((unused));
47
if (opc == INDEX_op_call) {
409
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
48
nb_oargs = TCGOP_CALLO(op);
410
MemOp opc;
49
nb_iargs = TCGOP_CALLI(op);
411
#if defined(CONFIG_SOFTMMU)
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
412
tcg_insn_unit *label_ptr[2];
51
- TCGTemp *ts = arg_temp(op->args[i]);
413
+#else
52
- if (ts) {
414
+ unsigned a_bits, s_bits;
53
- init_ts_info(&ctx, ts);
415
#endif
54
- }
416
TCGReg base = TCG_REG_A0;
55
- }
417
56
} else {
418
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
57
nb_oargs = def->nb_oargs;
419
data_regl, data_regh, addr_regl, addr_regh,
58
nb_iargs = def->nb_iargs;
420
s->code_ptr, label_ptr);
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
421
#else
60
- init_arg_info(&ctx, op->args[i]);
422
- base = TCG_REG_A0;
61
- }
423
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
62
}
424
tcg_out_ext32u(s, base, addr_regl);
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
425
addr_regl = base;
64
426
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
65
/* Do copy propagation */
427
} else {
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
428
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
429
}
430
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
431
+ a_bits = get_alignment_bits(opc);
432
+ s_bits = opc & MO_SIZE;
433
+ /*
434
+ * R6 removes the left/right instructions but requires the
435
+ * system to support misaligned memory accesses.
436
+ */
437
+ if (use_mips32r6_instructions) {
438
+ if (a_bits) {
439
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
440
+ }
441
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
442
+ } else {
443
+ if (a_bits && a_bits != s_bits) {
444
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
445
+ }
446
+ if (a_bits >= s_bits) {
447
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
448
+ } else {
449
+ tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
450
+ }
451
+ }
452
#endif
453
}
454
67
--
455
--
68
2.25.1
456
2.25.1
69
457
70
458
diff view generated by jsdifflib
1
Return -1 instead of 2 for failure, so that we can
1
We can use the routines just added for user-only to emit
2
use comparisons against 0 for all cases.
2
unaligned accesses in softmmu mode too.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
5
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
9
tcg/mips/tcg-target.c.inc | 91 ++++++++++++++++++++++-----------------
9
1 file changed, 74 insertions(+), 71 deletions(-)
10
1 file changed, 51 insertions(+), 40 deletions(-)
10
11
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
--- a/tcg/mips/tcg-target.c.inc
14
+++ b/tcg/optimize.c
15
+++ b/tcg/mips/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
17
tcg_insn_unit *label_ptr[2], bool is_load)
18
{
19
MemOp opc = get_memop(oi);
20
- unsigned s_bits = opc & MO_SIZE;
21
unsigned a_bits = get_alignment_bits(opc);
22
+ unsigned s_bits = opc & MO_SIZE;
23
+ unsigned a_mask = (1 << a_bits) - 1;
24
+ unsigned s_mask = (1 << s_bits) - 1;
25
int mem_index = get_mmuidx(oi);
26
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
27
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
29
int add_off = offsetof(CPUTLBEntry, addend);
30
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
31
: offsetof(CPUTLBEntry, addr_write));
32
- target_ulong mask;
33
+ target_ulong tlb_mask;
34
35
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
36
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
38
/* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
39
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
40
41
- /* We don't currently support unaligned accesses.
42
- We could do so with mips32r6. */
43
- if (a_bits < s_bits) {
44
- a_bits = s_bits;
45
- }
46
-
47
- /* Mask the page bits, keeping the alignment bits to compare against. */
48
- mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
49
-
50
/* Load the (low-half) tlb comparator. */
51
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
52
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
53
- tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
54
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
55
} else {
56
tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
57
: TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
58
TCG_TMP0, TCG_TMP3, cmp_off);
59
- tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
60
- /* No second compare is required here;
61
- load the tlb addend for the fast path. */
62
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
63
}
64
65
/* Zero extend a 32-bit guest address for a 64-bit host. */
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
67
tcg_out_ext32u(s, base, addrl);
68
addrl = base;
69
}
70
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
71
+
72
+ /*
73
+ * Mask the page bits, keeping the alignment bits to compare against.
74
+ * For unaligned accesses, compare against the end of the access to
75
+ * verify that it does not cross a page boundary.
76
+ */
77
+ tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
78
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask);
79
+ if (a_mask >= s_mask) {
80
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
81
+ } else {
82
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask);
83
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
84
+ }
85
+
86
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
87
+ /* Load the tlb addend for the fast path. */
88
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
89
+ }
90
91
label_ptr[0] = s->code_ptr;
92
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
94
/* Load and test the high half tlb comparator. */
95
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
96
/* delay slot */
97
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
98
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
99
100
/* Load the tlb addend for the fast path. */
101
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
16
}
103
}
17
}
104
}
18
105
19
-/* Return 2 if the condition can't be simplified, and the result
106
-static void __attribute__((unused))
20
- of the condition (0 or 1) if it can */
107
-tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
108
+static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
22
- TCGArg y, TCGCond c)
109
TCGReg base, MemOp opc, bool is_64)
23
+/*
24
+ * Return -1 if the condition can't be simplified,
25
+ * and the result of the condition (0 or 1) if it can.
26
+ */
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
+ TCGArg y, TCGCond c)
29
{
110
{
30
uint64_t xv = arg_info(x)->val;
111
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
31
uint64_t yv = arg_info(y)->val;
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
113
#if defined(CONFIG_SOFTMMU)
33
case TCG_COND_GEU:
114
tcg_insn_unit *label_ptr[2];
34
return 1;
115
#else
35
default:
116
- unsigned a_bits, s_bits;
36
- return 2;
117
#endif
37
+ return -1;
118
+ unsigned a_bits, s_bits;
38
}
119
TCGReg base = TCG_REG_A0;
39
}
120
40
- return 2;
121
data_regl = *args++;
41
+ return -1;
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
123
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
124
oi = *args++;
125
opc = get_memop(oi);
126
+ a_bits = get_alignment_bits(opc);
127
+ s_bits = opc & MO_SIZE;
128
129
+ /*
130
+ * R6 removes the left/right instructions but requires the
131
+ * system to support misaligned memory accesses.
132
+ */
133
#if defined(CONFIG_SOFTMMU)
134
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
135
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
136
+ if (use_mips32r6_instructions || a_bits >= s_bits) {
137
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
138
+ } else {
139
+ tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
140
+ }
141
add_qemu_ldst_label(s, 1, oi,
142
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
143
data_regl, data_regh, addr_regl, addr_regh,
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
145
} else {
146
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
147
}
148
- a_bits = get_alignment_bits(opc);
149
- s_bits = opc & MO_SIZE;
150
- /*
151
- * R6 removes the left/right instructions but requires the
152
- * system to support misaligned memory accesses.
153
- */
154
if (use_mips32r6_instructions) {
155
if (a_bits) {
156
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
158
}
42
}
159
}
43
160
44
-/* Return 2 if the condition can't be simplified, and the result
161
-static void __attribute__((unused))
45
- of the condition (0 or 1) if it can */
162
-tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
163
+static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
47
+/*
164
TCGReg base, MemOp opc)
48
+ * Return -1 if the condition can't be simplified,
49
+ * and the result of the condition (0 or 1) if it can.
50
+ */
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
52
{
165
{
53
TCGArg al = p1[0], ah = p1[1];
166
const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
54
TCGArg bl = p2[0], bh = p2[1];
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
168
MemOp opc;
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
169
#if defined(CONFIG_SOFTMMU)
57
return do_constant_folding_cond_eq(c);
170
tcg_insn_unit *label_ptr[2];
58
}
171
-#else
59
- return 2;
172
- unsigned a_bits, s_bits;
60
+ return -1;
173
#endif
61
}
174
+ unsigned a_bits, s_bits;
62
175
TCGReg base = TCG_REG_A0;
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
176
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
177
data_regl = *args++;
65
break;
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
66
179
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
67
CASE_OP_32_64(setcond):
180
oi = *args++;
68
- tmp = do_constant_folding_cond(opc, op->args[1],
181
opc = get_memop(oi);
69
- op->args[2], op->args[3]);
182
+ a_bits = get_alignment_bits(opc);
70
- if (tmp != 2) {
183
+ s_bits = opc & MO_SIZE;
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
184
72
+ i = do_constant_folding_cond(opc, op->args[1],
185
+ /*
73
+ op->args[2], op->args[3]);
186
+ * R6 removes the left/right instructions but requires the
74
+ if (i >= 0) {
187
+ * system to support misaligned memory accesses.
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
188
+ */
76
continue;
189
#if defined(CONFIG_SOFTMMU)
77
}
190
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
78
break;
191
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
79
192
+ if (use_mips32r6_instructions || a_bits >= s_bits) {
80
CASE_OP_32_64(brcond):
193
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
81
- tmp = do_constant_folding_cond(opc, op->args[0],
194
+ } else {
82
- op->args[1], op->args[2]);
195
+ tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
83
- switch (tmp) {
196
+ }
84
- case 0:
197
add_qemu_ldst_label(s, 0, oi,
85
+ i = do_constant_folding_cond(opc, op->args[0],
198
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
86
+ op->args[1], op->args[2]);
199
data_regl, data_regh, addr_regl, addr_regh,
87
+ if (i == 0) {
200
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
88
tcg_op_remove(s, op);
201
} else {
89
continue;
202
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
90
- case 1:
203
}
91
+ } else if (i > 0) {
204
- a_bits = get_alignment_bits(opc);
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
205
- s_bits = opc & MO_SIZE;
93
op->opc = opc = INDEX_op_br;
206
- /*
94
op->args[0] = op->args[3];
207
- * R6 removes the left/right instructions but requires the
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
208
- * system to support misaligned memory accesses.
96
break;
209
- */
97
210
if (use_mips32r6_instructions) {
98
CASE_OP_32_64(movcond):
211
if (a_bits) {
99
- tmp = do_constant_folding_cond(opc, op->args[1],
212
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
100
- op->args[2], op->args[5]);
101
- if (tmp != 2) {
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
103
+ i = do_constant_folding_cond(opc, op->args[1],
104
+ op->args[2], op->args[5]);
105
+ if (i >= 0) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
107
continue;
108
}
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
break;
112
113
case INDEX_op_brcond2_i32:
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
115
- op->args[4]);
116
- if (tmp == 0) {
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
118
+ op->args[4]);
119
+ if (i == 0) {
120
do_brcond_false:
121
tcg_op_remove(s, op);
122
continue;
123
}
124
- if (tmp == 1) {
125
+ if (i > 0) {
126
do_brcond_true:
127
op->opc = opc = INDEX_op_br;
128
op->args[0] = op->args[5];
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
if (op->args[4] == TCG_COND_EQ) {
131
/* Simplify EQ comparisons where one of the pairs
132
can be simplified. */
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
134
- op->args[0], op->args[2],
135
- TCG_COND_EQ);
136
- if (tmp == 0) {
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
138
+ op->args[0], op->args[2],
139
+ TCG_COND_EQ);
140
+ if (i == 0) {
141
goto do_brcond_false;
142
- } else if (tmp == 1) {
143
+ } else if (i > 0) {
144
goto do_brcond_high;
145
}
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
147
- op->args[1], op->args[3],
148
- TCG_COND_EQ);
149
- if (tmp == 0) {
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
151
+ op->args[1], op->args[3],
152
+ TCG_COND_EQ);
153
+ if (i == 0) {
154
goto do_brcond_false;
155
- } else if (tmp != 1) {
156
+ } else if (i < 0) {
157
break;
158
}
159
do_brcond_low:
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
161
if (op->args[4] == TCG_COND_NE) {
162
/* Simplify NE comparisons where one of the pairs
163
can be simplified. */
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
165
- op->args[0], op->args[2],
166
- TCG_COND_NE);
167
- if (tmp == 0) {
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
169
+ op->args[0], op->args[2],
170
+ TCG_COND_NE);
171
+ if (i == 0) {
172
goto do_brcond_high;
173
- } else if (tmp == 1) {
174
+ } else if (i > 0) {
175
goto do_brcond_true;
176
}
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
178
- op->args[1], op->args[3],
179
- TCG_COND_NE);
180
- if (tmp == 0) {
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
182
+ op->args[1], op->args[3],
183
+ TCG_COND_NE);
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
268
--
213
--
269
2.25.1
214
2.25.1
270
215
271
216
diff view generated by jsdifflib
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
1
When BH is constant, it is constrained to 11 bits for use in MOVCC.
2
and muls2_i64.
2
For the cases in which we must load the constant BH into a register,
3
we do not need the full logic of tcg_out_movi; we can use the simpler
4
function for emitting a 13 bit constant.
3
5
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
This eliminates the only case in which TCG_REG_T2 was passed to
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
tcg_out_movi, which will shortly become invalid.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
12
tcg/sparc/tcg-target.c.inc | 10 +++++++---
9
1 file changed, 35 insertions(+), 9 deletions(-)
13
1 file changed, 7 insertions(+), 3 deletions(-)
10
14
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
17
--- a/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
18
+++ b/tcg/sparc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
16
return false;
20
if (use_vis3_instructions && !is_sub) {
17
}
21
/* Note that ADDXC doesn't accept immediates. */
18
22
if (bhconst && bh != 0) {
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
23
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
24
+ tcg_out_movi_imm13(s, TCG_REG_T2, bh);
21
{
25
bh = TCG_REG_T2;
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
26
}
23
- uint32_t a = arg_info(op->args[2])->val;
27
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
24
- uint32_t b = arg_info(op->args[3])->val;
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
25
- uint64_t r = (uint64_t)a * b;
29
     tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
26
+ uint64_t a = arg_info(op->args[2])->val;
30
    }
27
+ uint64_t b = arg_info(op->args[3])->val;
31
} else {
28
+ uint64_t h, l;
32
- /* Otherwise adjust BH as if there is carry into T2 ... */
29
TCGArg rl, rh;
33
+ /*
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
34
+ * Otherwise adjust BH as if there is carry into T2.
31
+ TCGOp *op2;
35
+ * Note that constant BH is constrained to 11 bits for the MOVCC,
32
+
36
+ * so the adjustment fits 12 bits.
33
+ switch (op->opc) {
37
+ */
34
+ case INDEX_op_mulu2_i32:
38
if (bhconst) {
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
39
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
36
+ h = (int32_t)(l >> 32);
40
+ tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
37
+ l = (int32_t)l;
41
} else {
38
+ break;
42
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
39
+ case INDEX_op_muls2_i32:
43
is_sub ? ARITH_SUB : ARITH_ADD);
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
41
+ h = l >> 32;
42
+ l = (int32_t)l;
43
+ break;
44
+ case INDEX_op_mulu2_i64:
45
+ mulu64(&l, &h, a, b);
46
+ break;
47
+ case INDEX_op_muls2_i64:
48
+ muls64(&l, &h, a, b);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
52
+ }
53
54
rl = op->args[0];
55
rh = op->args[1];
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
58
+
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
61
+
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
64
return true;
65
}
66
return false;
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
CASE_OP_32_64(muluh):
69
done = fold_mul_highpart(&ctx, op);
70
break;
71
- case INDEX_op_mulu2_i32:
72
- done = fold_mulu2_i32(&ctx, op);
73
+ CASE_OP_32_64(muls2):
74
+ CASE_OP_32_64(mulu2):
75
+ done = fold_multiply2(&ctx, op);
76
break;
77
CASE_OP_32_64(nand):
78
done = fold_nand(&ctx, op);
79
--
44
--
80
2.25.1
45
2.25.1
81
46
82
47
diff view generated by jsdifflib
1
Continue splitting tcg_optimize.
1
Handle 32-bit constants with a separate function, so that
2
tcg_out_movi_int does not need to recurse. This slightly
3
rearranges the order of tests for small constants, but
4
produces the same output.
2
5
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/optimize.c | 22 ++++++++++++++--------
9
tcg/sparc/tcg-target.c.inc | 36 +++++++++++++++++++++---------------
9
1 file changed, 14 insertions(+), 8 deletions(-)
10
1 file changed, 21 insertions(+), 15 deletions(-)
10
11
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
--- a/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
16
}
17
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
17
}
18
}
18
19
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
20
+static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
20
+ int nb_oargs, int nb_iargs)
21
+{
21
+{
22
+ TCGContext *s = ctx->tcg;
22
+ if (check_fit_i32(arg, 13)) {
23
+
23
+ /* A 13-bit constant sign-extended to 64-bits. */
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
24
+ tcg_out_movi_imm13(s, ret, arg);
25
+ TCGTemp *ts = arg_temp(op->args[i]);
25
+ } else {
26
+ if (ts && ts_is_copy(ts)) {
26
+ /* A 32-bit constant zero-extended to 64 bits. */
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
27
+ tcg_out_sethi(s, ret, arg);
28
+ if (arg & 0x3ff) {
29
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
28
+ }
30
+ }
29
+ }
31
+ }
30
+}
32
+}
31
+
33
+
32
/* Propagate constants and copies, fold constant expressions. */
34
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
33
void tcg_optimize(TCGContext *s)
35
tcg_target_long arg, bool in_prologue)
34
{
36
{
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
tcg_target_long hi, lo = (int32_t)arg;
36
nb_iargs = def->nb_iargs;
38
tcg_target_long test, lsb;
39
40
- /* Make sure we test 32-bit constants for imm13 properly. */
41
- if (type == TCG_TYPE_I32) {
42
- arg = lo;
43
+ /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
44
+ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
45
+ tcg_out_movi_imm32(s, ret, arg);
46
+ return;
47
}
48
49
/* A 13-bit constant sign-extended to 64-bits. */
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
37
}
51
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
52
}
53
54
- /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
55
- if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
56
- tcg_out_sethi(s, ret, arg);
57
- if (arg & 0x3ff) {
58
- tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
59
- }
60
- return;
61
- }
39
-
62
-
40
- /* Do copy propagation */
63
/* A 32-bit constant sign-extended to 64-bits. */
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
64
if (arg == lo) {
42
- TCGTemp *ts = arg_temp(op->args[i]);
65
tcg_out_sethi(s, ret, ~arg);
43
- if (ts && ts_is_copy(ts)) {
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
67
/* A 64-bit constant decomposed into 2 32-bit pieces. */
45
- }
68
if (check_fit_i32(lo, 13)) {
46
- }
69
hi = (arg - lo) >> 32;
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
70
- tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
48
71
+ tcg_out_movi_imm32(s, ret, hi);
49
/* For commutative operations make constant second argument */
72
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
50
switch (opc) {
73
tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
74
} else {
75
hi = arg >> 32;
76
- tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
77
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
78
+ tcg_out_movi_imm32(s, ret, hi);
79
+ tcg_out_movi_imm32(s, TCG_REG_T2, lo);
80
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
81
tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
82
}
51
--
83
--
52
2.25.1
84
2.25.1
53
85
54
86
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
This will allow us to control exactly what scratch register is
2
used for loading the constant.
2
3
3
These will be used to implement new decimal floating point
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
instructions from Power ISA 3.1.
5
6
The remainder is now returned directly by divu128/divs128,
7
freeing up phigh to receive the high 64 bits of the quotient.
8
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/hw/clock.h | 6 +-
7
tcg/sparc/tcg-target.c.inc | 15 +++++++++------
15
include/qemu/host-utils.h | 20 ++++--
8
1 file changed, 9 insertions(+), 6 deletions(-)
16
target/ppc/int_helper.c | 9 +--
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
18
4 files changed, 108 insertions(+), 60 deletions(-)
19
9
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
10
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
22
--- a/include/hw/clock.h
12
--- a/tcg/sparc/tcg-target.c.inc
23
+++ b/include/hw/clock.h
13
+++ b/tcg/sparc/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
25
if (clk->period == 0) {
26
return 0;
27
}
28
- /*
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
30
- * divu128 does not return a valid truncated quotient, so the result will
31
- * be wrong.
32
- */
33
+
34
divu128(&lo, &hi, clk->period);
35
return lo;
36
}
15
}
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
16
38
index XXXXXXX..XXXXXXX 100644
17
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
39
--- a/include/qemu/host-utils.h
18
- tcg_target_long arg, bool in_prologue)
40
+++ b/include/qemu/host-utils.h
19
+ tcg_target_long arg, bool in_prologue,
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
20
+ TCGReg scratch)
42
return (__int128_t)a * b / c;
43
}
44
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
47
+ uint64_t divisor)
48
{
21
{
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
22
tcg_target_long hi, lo = (int32_t)arg;
50
__uint128_t result = dividend / divisor;
23
tcg_target_long test, lsb;
51
+
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
52
*plow = result;
53
- *phigh = dividend % divisor;
54
+ *phigh = result >> 64;
55
+ return dividend % divisor;
56
}
57
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
60
+ int64_t divisor)
61
{
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
64
__int128_t result = dividend / divisor;
65
+
66
*plow = result;
67
- *phigh = dividend % divisor;
68
+ *phigh = result >> 64;
69
+ return dividend % divisor;
70
}
71
#else
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
78
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
80
{
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/ppc/int_helper.c
84
+++ b/target/ppc/int_helper.c
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
86
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
88
{
89
- int64_t rt = 0;
90
+ uint64_t rt = 0;
91
int64_t ra = (int64_t)rau;
92
int64_t rb = (int64_t)rbu;
93
int overflow = 0;
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
95
int cr;
96
uint64_t lo_value;
97
uint64_t hi_value;
98
+ uint64_t rem;
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
100
101
if (b->VsrSD(0) < 0) {
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
103
* In that case, we leave r unchanged.
104
*/
105
} else {
25
} else {
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
26
hi = arg >> 32;
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
27
tcg_out_movi_imm32(s, ret, hi);
108
28
- tcg_out_movi_imm32(s, TCG_REG_T2, lo);
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
29
+ tcg_out_movi_imm32(s, scratch, lo);
110
- bcd_put_digit(&ret, hi_value % 10, i);
30
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
111
+ for (i = 1; i < 16; rem /= 10, i++) {
31
- tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
112
+ bcd_put_digit(&ret, rem % 10, i);
32
+ tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
113
}
114
115
for (; i < 32; lo_value /= 10, i++) {
116
diff --git a/util/host-utils.c b/util/host-utils.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/host-utils.c
119
+++ b/util/host-utils.c
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
121
}
122
123
/*
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
125
- * remainder via phigh.
126
- * The result must fit in 64 bits (plow) - otherwise, the result
127
- * is undefined.
128
- * This function will cause a division by zero if passed a zero divisor.
129
+ * Unsigned 128-by-64 division.
130
+ * Returns the remainder.
131
+ * Returns quotient via plow and phigh.
132
+ * Also returns the remainder via the function return value.
133
*/
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
136
{
137
uint64_t dhi = *phigh;
138
uint64_t dlo = *plow;
139
- unsigned i;
140
- uint64_t carry = 0;
141
+ uint64_t rem, dhighest;
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
33
}
205
}
34
}
206
35
207
/*
36
static void tcg_out_movi(TCGContext *s, TCGType type,
208
- * Signed 128-by-64 division. Returns quotient via plow and
37
TCGReg ret, tcg_target_long arg)
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
38
{
220
- int sgn_dvdnd = *phigh < 0;
39
- tcg_out_movi_int(s, type, ret, arg, false);
221
- int sgn_divsr = divisor < 0;
40
+ tcg_debug_assert(ret != TCG_REG_T2);
222
+ bool neg_quotient = false, neg_remainder = false;
41
+ tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
42
}
224
+ uint64_t rem;
43
225
44
static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
226
- if (sgn_dvdnd) {
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
227
- *plow = ~(*plow);
46
} else {
228
- *phigh = ~(*phigh);
47
uintptr_t desti = (uintptr_t)dest;
229
- if (*plow == (int64_t)-1) {
48
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
230
+ if (*phigh < 0) {
49
- desti & ~0xfff, in_prologue);
231
+ neg_quotient = !neg_quotient;
50
+ desti & ~0xfff, in_prologue, TCG_REG_O7);
232
+ neg_remainder = !neg_remainder;
51
tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
239
+ }
240
+ }
241
+
242
+ if (divisor < 0) {
243
+ neg_quotient = !neg_quotient;
244
+
245
+ divisor = -divisor;
246
+ }
247
+
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
249
+
250
+ if (neg_quotient) {
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
269
- }
270
-
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
272
-
273
- if (sgn_dvdnd ^ sgn_divsr) {
274
- *plow = 0 - *plow;
275
+ if (neg_remainder) {
276
+ return -rem;
277
+ } else {
278
+ return rem;
279
}
52
}
280
}
53
}
54
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
55
56
#ifndef CONFIG_SOFTMMU
57
if (guest_base != 0) {
58
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
59
+ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
60
+ guest_base, true, TCG_REG_T1);
61
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
62
}
281
#endif
63
#endif
282
--
64
--
283
2.25.1
65
2.25.1
284
66
285
67
diff view generated by jsdifflib
1
This will allow callers to tail call to these functions
1
We had code for checking for 13 and 21-bit shifted constants,
2
and return true indicating processing complete.
2
but we can do better and allow 32-bit shifted constants.
3
This is still 2 insns shorter than the full 64-bit sequence.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/optimize.c | 9 +++++----
9
tcg/sparc/tcg-target.c.inc | 12 ++++++------
10
1 file changed, 5 insertions(+), 4 deletions(-)
10
1 file changed, 6 insertions(+), 6 deletions(-)
11
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
14
--- a/tcg/sparc/tcg-target.c.inc
15
+++ b/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
17
return;
18
}
18
}
19
19
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
20
- /* A 21-bit constant, shifted. */
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
21
+ /* A 32-bit constant, shifted. */
22
{
22
lsb = ctz64(arg);
23
TCGTemp *dst_ts = arg_temp(dst);
23
test = (tcg_target_long)arg >> lsb;
24
TCGTemp *src_ts = arg_temp(src);
24
- if (check_fit_tl(test, 13)) {
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
25
- tcg_out_movi_imm13(s, ret, test);
26
26
- tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
27
if (ts_are_copies(dst_ts, src_ts)) {
28
tcg_op_remove(ctx->tcg, op);
29
- return;
27
- return;
30
+ return true;
28
- } else if (lsb > 10 && test == extract64(test, 0, 21)) {
29
+ if (lsb > 10 && test == extract64(test, 0, 21)) {
30
tcg_out_sethi(s, ret, test << 10);
31
tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
32
return;
33
+ } else if (test == (uint32_t)test || test == (int32_t)test) {
34
+ tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
35
+ tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
36
+ return;
31
}
37
}
32
38
33
reset_ts(dst_ts);
39
/* A 64-bit constant decomposed into 2 32-bit pieces. */
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
35
di->is_const = si->is_const;
36
di->val = si->val;
37
}
38
+ return true;
39
}
40
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
43
TCGArg dst, uint64_t val)
44
{
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
47
/* Convert movi to mov with constant temp. */
48
tv = tcg_constant_internal(type, val);
49
init_ts_info(ctx, tv);
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
52
}
53
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
55
--
40
--
56
2.25.1
41
2.25.1
57
42
58
43
diff view generated by jsdifflib
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
1
Since 7ecd02a06f8, if patch_reloc fails we restart translation
2
and use it in fold_shift.
2
with a smaller TB. SPARC had its function signature changed,
3
but not the logic. Replace assert with return false.
3
4
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/optimize.c | 28 ++++++++++------------------
9
tcg/sparc/tcg-target.c.inc | 8 ++++++--
9
1 file changed, 10 insertions(+), 18 deletions(-)
10
1 file changed, 6 insertions(+), 2 deletions(-)
10
11
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
--- a/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
16
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
16
return false;
17
17
}
18
switch (type) {
18
19
case R_SPARC_WDISP16:
19
+/* If the binary operation has first argument @i, fold to @i. */
20
- assert(check_fit_ptr(pcrel >> 2, 16));
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+ if (!check_fit_ptr(pcrel >> 2, 16)) {
21
+{
22
+ return false;
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
23
+ }
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
insn &= ~INSN_OFF16(-1);
24
+ }
25
insn |= INSN_OFF16(pcrel);
25
+ return false;
26
break;
26
+}
27
case R_SPARC_WDISP19:
27
+
28
- assert(check_fit_ptr(pcrel >> 2, 19));
28
/* If the binary operation has first argument @i, fold to NOT. */
29
+ if (!check_fit_ptr(pcrel >> 2, 19)) {
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
30
+ return false;
30
{
31
+ }
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
32
insn &= ~INSN_OFF19(-1);
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
33
insn |= INSN_OFF19(pcrel);
33
{
34
break;
34
if (fold_const2(ctx, op) ||
35
+ fold_ix_to_i(ctx, op, 0) ||
36
fold_xi_to_x(ctx, op, 0)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
break;
41
}
42
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
44
- and "sub r, 0, a => neg r, a" case. */
45
- switch (opc) {
46
- CASE_OP_32_64(shl):
47
- CASE_OP_32_64(shr):
48
- CASE_OP_32_64(sar):
49
- CASE_OP_32_64(rotl):
50
- CASE_OP_32_64(rotr):
51
- if (arg_is_const(op->args[1])
52
- && arg_info(op->args[1])->val == 0) {
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
- continue;
55
- }
56
- break;
57
- default:
58
- break;
59
- }
60
-
61
/* Simplify using known-zero bits. Currently only ops with a single
62
output argument is supported. */
63
z_mask = -1;
64
--
35
--
65
2.25.1
36
2.25.1
66
37
67
38
diff view generated by jsdifflib
1
Certain targets, like riscv, produce signed 32-bit results.
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
This can lead to lots of redundant extensions as values are
3
manipulated.
4
5
Begin by tracking only the obvious sign-extensions, and
6
converting them to simple copies when possible.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
3
---
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
4
tcg/sparc/tcg-target.c.inc | 15 +++++++++++++++
13
1 file changed, 102 insertions(+), 21 deletions(-)
5
1 file changed, 15 insertions(+)
14
6
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/optimize.c
9
--- a/tcg/sparc/tcg-target.c.inc
18
+++ b/tcg/optimize.c
10
+++ b/tcg/sparc/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
11
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
20
TCGTemp *next_copy;
12
insn &= ~INSN_OFF19(-1);
21
uint64_t val;
13
insn |= INSN_OFF19(pcrel);
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
14
break;
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
15
+ case R_SPARC_13:
24
} TempOptInfo;
16
+ if (!check_fit_ptr(value, 13)) {
25
17
+ return false;
26
typedef struct OptContext {
18
+ }
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
+ insn &= ~INSN_IMM13(-1);
28
/* In flight values from optimization. */
20
+ insn |= INSN_IMM13(value);
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
21
+ break;
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
32
TCGType type;
33
} OptContext;
34
35
+/* Calculate the smask for a specific value. */
36
+static uint64_t smask_from_value(uint64_t value)
37
+{
38
+ int rep = clrsb64(value);
39
+ return ~(~0ull >> rep);
40
+}
41
+
42
+/*
43
+ * Calculate the smask for a given set of known-zeros.
44
+ * If there are lots of zeros on the left, we can consider the remainder
45
+ * an unsigned field, and thus the corresponding signed field is one bit
46
+ * larger.
47
+ */
48
+static uint64_t smask_from_zmask(uint64_t zmask)
49
+{
50
+ /*
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
52
+ * must be zero, else we have no sign information.
53
+ */
54
+ int rep = clz64(zmask);
55
+ if (rep == 0) {
56
+ return 0;
57
+ }
58
+ rep -= 1;
59
+ return ~(~0ull >> rep);
60
+}
61
+
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
63
{
64
return ts->state_ptr;
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
66
ti->prev_copy = ts;
67
ti->is_const = false;
68
ti->z_mask = -1;
69
+ ti->s_mask = 0;
70
}
71
72
static void reset_temp(TCGArg arg)
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
74
ti->is_const = true;
75
ti->val = ts->val;
76
ti->z_mask = ts->val;
77
+ ti->s_mask = smask_from_value(ts->val);
78
} else {
79
ti->is_const = false;
80
ti->z_mask = -1;
81
+ ti->s_mask = 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
op->args[1] = src;
87
88
di->z_mask = si->z_mask;
89
+ di->s_mask = si->s_mask;
90
91
if (src_ts->type == dst_ts->type) {
92
TempOptInfo *ni = ts_info(si->next_copy);
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
94
95
nb_oargs = def->nb_oargs;
96
for (i = 0; i < nb_oargs; i++) {
97
- reset_temp(op->args[i]);
98
+ TCGTemp *ts = arg_temp(op->args[i]);
99
+ reset_ts(ts);
100
/*
101
- * Save the corresponding known-zero bits mask for the
102
+ * Save the corresponding known-zero/sign bits mask for the
103
* first output argument (only one supported so far).
104
*/
105
if (i == 0) {
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
108
+ ts_info(ts)->s_mask = ctx->s_mask;
109
}
110
}
111
}
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
113
{
114
uint64_t a_mask = ctx->a_mask;
115
uint64_t z_mask = ctx->z_mask;
116
+ uint64_t s_mask = ctx->s_mask;
117
118
/*
119
* 32-bit ops generate 32-bit results, which for the purpose of
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
121
if (ctx->type == TCG_TYPE_I32) {
122
a_mask = (int32_t)a_mask;
123
z_mask = (int32_t)z_mask;
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
125
ctx->z_mask = z_mask;
126
+ ctx->s_mask = s_mask;
127
}
128
129
if (z_mask == 0) {
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
131
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
133
{
134
- uint64_t z_mask, sign;
135
+ uint64_t z_mask, s_mask, sign;
136
137
if (arg_is_const(op->args[1])) {
138
uint64_t t = arg_info(op->args[1])->val;
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
140
}
141
142
z_mask = arg_info(op->args[1])->z_mask;
143
+
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
148
default:
22
default:
149
g_assert_not_reached();
23
g_assert_not_reached();
150
}
24
}
151
+ s_mask = smask_from_zmask(z_mask);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
152
26
return;
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
154
case TCG_BSWAP_OZ:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
156
/* If the sign bit may be 1, force all the bits above to 1. */
157
if (z_mask & sign) {
158
z_mask |= sign;
159
+ s_mask = sign << 1;
160
}
161
break;
162
default:
163
/* The high bits are undefined: force all bits above the sign to 1. */
164
z_mask |= sign << 1;
165
+ s_mask = 0;
166
break;
167
}
27
}
168
ctx->z_mask = z_mask;
28
169
+ ctx->s_mask = s_mask;
29
+ /* Use the constant pool, if possible. */
170
30
+ if (!in_prologue && USE_REG_TB) {
171
return fold_masks(ctx, op);
31
+ new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
172
}
32
+ tcg_tbrel_diff(s, NULL));
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
33
+ tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
34
+ return;
175
{
176
uint64_t z_mask_old, z_mask;
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
221
222
if (z_mask & sign) {
223
z_mask |= sign;
224
- } else if (!type_change) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
226
}
227
+ s_mask |= sign << 1;
228
+
229
ctx->z_mask = z_mask;
230
+ ctx->s_mask = s_mask;
231
+ if (!type_change) {
232
+ ctx->a_mask = s_mask & ~s_mask_old;
233
+ }
234
235
return fold_masks(ctx, op);
236
}
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
238
}
239
240
ctx->z_mask = z_mask;
241
+ ctx->s_mask = smask_from_zmask(z_mask);
242
if (!type_change) {
243
ctx->a_mask = z_mask_old ^ z_mask;
244
}
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
246
MemOp mop = get_memop(oi);
247
int width = 8 * memop_size(mop);
248
249
- if (!(mop & MO_SIGN) && width < 64) {
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
251
+ if (width < 64) {
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
253
+ if (!(mop & MO_SIGN)) {
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
255
+ ctx->s_mask <<= 1;
256
+ }
257
}
258
259
/* Opcodes that touch guest memory stop the mb optimization. */
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
261
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
{
264
- int64_t z_mask_old, z_mask;
265
+ uint64_t z_mask, s_mask, s_mask_old;
266
+ int pos = op->args[2];
267
+ int len = op->args[3];
268
269
if (arg_is_const(op->args[1])) {
270
uint64_t t;
271
272
t = arg_info(op->args[1])->val;
273
- t = sextract64(t, op->args[2], op->args[3]);
274
+ t = sextract64(t, pos, len);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
276
}
277
278
- z_mask_old = arg_info(op->args[1])->z_mask;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
280
- if (op->args[2] == 0 && z_mask >= 0) {
281
- ctx->a_mask = z_mask_old ^ z_mask;
282
- }
283
+ z_mask = arg_info(op->args[1])->z_mask;
284
+ z_mask = sextract64(z_mask, pos, len);
285
ctx->z_mask = z_mask;
286
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
288
+ s_mask = sextract64(s_mask_old, pos, len);
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
290
+ ctx->s_mask = s_mask;
291
+
292
+ if (pos == 0) {
293
+ ctx->a_mask = s_mask & ~s_mask_old;
294
+ }
35
+ }
295
+
36
+
296
return fold_masks(ctx, op);
37
/* A 64-bit constant decomposed into 2 32-bit pieces. */
297
}
38
if (check_fit_i32(lo, 13)) {
298
39
hi = (arg - lo) >> 32;
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
300
{
301
/* We can't do any folding with a load, but we can record bits. */
302
switch (op->opc) {
303
+ CASE_OP_32_64(ld8s):
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
305
+ break;
306
CASE_OP_32_64(ld8u):
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
309
+ break;
310
+ CASE_OP_32_64(ld16s):
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
312
break;
313
CASE_OP_32_64(ld16u):
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
316
+ break;
317
+ case INDEX_op_ld32s_i64:
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
319
break;
320
case INDEX_op_ld32u_i64:
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
323
break;
324
default:
325
g_assert_not_reached();
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
327
ctx.type = TCG_TYPE_I32;
328
}
329
330
- /* Assume all bits affected, and no bits known zero. */
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
350
--
40
--
351
2.25.1
41
2.25.1
352
42
353
43
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
Due to mapping changes, we now rarely place the code_gen_buffer
2
near the main executable. Which means that direct calls will
3
now rarely be in range.
2
4
3
In preparation for changing the divu128/divs128 implementations
5
So, always use indirect calls for tail calls, which allows us to
4
to allow for quotients larger than 64 bits, move the div-by-zero
6
avoid clobbering %o7, and therefore we need not save and restore it.
5
and overflow checks to the callers.
6
7
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
10
---
12
include/hw/clock.h | 5 +++--
11
tcg/sparc/tcg-target.c.inc | 37 +++++++++++++++++++++++--------------
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
12
1 file changed, 23 insertions(+), 14 deletions(-)
14
target/ppc/int_helper.c | 14 +++++++++-----
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
16
4 files changed, 42 insertions(+), 51 deletions(-)
17
13
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
14
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
19
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/clock.h
16
--- a/tcg/sparc/tcg-target.c.inc
21
+++ b/include/hw/clock.h
17
+++ b/tcg/sparc/tcg-target.c.inc
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
23
return 0;
19
tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
24
}
25
/*
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- * need different behaviour for overflow.
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ * be wrong.
31
*/
32
divu128(&lo, &hi, clk->period);
33
return lo;
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/qemu/host-utils.h
37
+++ b/include/qemu/host-utils.h
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
20
}
41
21
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
22
+static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
23
+ bool in_prologue, bool tail_call)
24
+{
25
+ uintptr_t desti = (uintptr_t)dest;
26
+
27
+ /* Be careful not to clobber %o7 for a tail call. */
28
+ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
29
+ desti & ~0xfff, in_prologue,
30
+ tail_call ? TCG_REG_G2 : TCG_REG_O7);
31
+ tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
32
+ TCG_REG_T1, desti & 0xfff, JMPL);
33
+}
34
+
35
static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
36
bool in_prologue)
44
{
37
{
45
- if (divisor == 0) {
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
46
- return 1;
39
if (disp == (int32_t)disp) {
47
- } else {
40
tcg_out32(s, CALL | (uint32_t)disp >> 2);
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
96
-
97
- if (unlikely(overflow)) {
98
+ if (unlikely(rb == 0 || ra >= rb)) {
99
+ overflow = 1;
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
41
} else {
158
42
- uintptr_t desti = (uintptr_t)dest;
159
for (i = 0; i < 64; i++) {
43
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
- desti & ~0xfff, in_prologue, TCG_REG_O7);
161
45
- tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
162
*plow = dlo;
46
+ tcg_out_jmpl_const(s, dest, in_prologue, false);
163
*phigh = dhi;
164
- return 0;
165
}
47
}
166
}
48
}
167
49
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
50
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
169
+/*
51
170
+ * Signed 128-by-64 division. Returns quotient via plow and
52
/* Set the retaddr operand. */
171
+ * remainder via phigh.
53
tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
54
- /* Set the env operand. */
173
+ * is undefined.
55
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
174
+ * This function will cause a division by zero if passed a zero divisor.
56
/* Tail call. */
175
+ */
57
- tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
58
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
177
{
59
+ tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
178
int sgn_dvdnd = *phigh < 0;
60
+ /* delay slot -- set the env argument */
179
int sgn_divsr = divisor < 0;
61
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
62
}
187
63
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
64
for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
65
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
190
66
if (ra >= TCG_REG_O6) {
191
if (sgn_dvdnd ^ sgn_divsr) {
67
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
192
*plow = 0 - *plow;
68
TCG_TARGET_CALL_STACK_OFFSET);
69
- ra = TCG_REG_G1;
70
+ } else {
71
+ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
72
}
73
- tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
74
- /* Set the env operand. */
75
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
76
+
77
/* Tail call. */
78
- tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
79
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
80
+ tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
81
+ /* delay slot -- set the env argument */
82
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
193
}
83
}
194
-
195
- if (!overflow) {
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
197
- overflow = 1;
198
- }
199
- }
200
-
201
- return overflow;
202
}
84
}
203
#endif
85
#endif
204
205
--
86
--
206
2.25.1
87
2.25.1
207
88
208
89
diff view generated by jsdifflib
1
Copy z_mask into OptContext, for writeback to the
1
This is kinda sorta the opposite of the other tcg hosts, where
2
first output within the new function.
2
we get (normal) alignment checks for free with host SIGBUS and
3
3
need to add code to support unaligned accesses.
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
This inline code expansion is somewhat large, but it takes quite
6
a few instructions to make a function call to a helper anyway.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
11
tcg/sparc/tcg-target.c.inc | 219 +++++++++++++++++++++++++++++++++++--
9
1 file changed, 33 insertions(+), 16 deletions(-)
12
1 file changed, 211 insertions(+), 8 deletions(-)
10
13
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
16
--- a/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
17
+++ b/tcg/sparc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
18
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[] = {
16
TCGContext *tcg;
19
#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
17
TCGOp *prev_mb;
20
#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
18
TCGTempSet temps_used;
21
#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
19
+
22
+#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
20
+ /* In flight values from optimization. */
23
#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
21
+ uint64_t z_mask;
24
#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
22
} OptContext;
25
#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
23
26
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
27
tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
26
}
28
}
27
}
29
}
28
30
+#else
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
31
+static const tcg_insn_unit *qemu_unalign_ld_trampoline;
32
+static const tcg_insn_unit *qemu_unalign_st_trampoline;
33
+
34
+static void build_trampolines(TCGContext *s)
30
+{
35
+{
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
36
+ for (int ld = 0; ld < 2; ++ld) {
32
+ int i, nb_oargs;
37
+ void *helper;
38
+
39
+ while ((uintptr_t)s->code_ptr & 15) {
40
+ tcg_out_nop(s);
41
+ }
42
+
43
+ if (ld) {
44
+ helper = helper_unaligned_ld;
45
+ qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
46
+ } else {
47
+ helper = helper_unaligned_st;
48
+ qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
49
+ }
50
+
51
+ if (!SPARC64 && TARGET_LONG_BITS == 64) {
52
+ /* Install the high part of the address. */
53
+ tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
54
+ }
55
+
56
+ /* Tail call. */
57
+ tcg_out_jmpl_const(s, helper, true, true);
58
+ /* delay slot -- set the env argument */
59
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
60
+ }
61
+}
62
#endif
63
64
/* Generate global QEMU prologue and epilogue code */
65
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
66
/* delay slot */
67
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
68
69
-#ifdef CONFIG_SOFTMMU
70
build_trampolines(s);
71
-#endif
72
}
73
74
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
75
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
76
static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
77
[MO_UB] = LDUB,
78
[MO_SB] = LDSB,
79
+ [MO_UB | MO_LE] = LDUB,
80
+ [MO_SB | MO_LE] = LDSB,
81
82
[MO_BEUW] = LDUH,
83
[MO_BESW] = LDSH,
84
[MO_BEUL] = LDUW,
85
[MO_BESL] = LDSW,
86
[MO_BEUQ] = LDX,
87
+ [MO_BESQ] = LDX,
88
89
[MO_LEUW] = LDUH_LE,
90
[MO_LESW] = LDSH_LE,
91
[MO_LEUL] = LDUW_LE,
92
[MO_LESL] = LDSW_LE,
93
[MO_LEUQ] = LDX_LE,
94
+ [MO_LESQ] = LDX_LE,
95
};
96
97
static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
98
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
99
MemOpIdx oi, bool is_64)
100
{
101
MemOp memop = get_memop(oi);
102
+ tcg_insn_unit *label_ptr;
103
+
104
#ifdef CONFIG_SOFTMMU
105
unsigned memi = get_mmuidx(oi);
106
TCGReg addrz, param;
107
const tcg_insn_unit *func;
108
- tcg_insn_unit *label_ptr;
109
110
addrz = tcg_out_tlb_load(s, addr, memi, memop,
111
offsetof(CPUTLBEntry, addr_read));
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
113
114
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
115
#else
116
+ TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
117
+ unsigned a_bits = get_alignment_bits(memop);
118
+ unsigned s_bits = memop & MO_SIZE;
119
+ unsigned t_bits;
120
+
121
if (SPARC64 && TARGET_LONG_BITS == 32) {
122
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
123
addr = TCG_REG_T1;
124
}
125
- tcg_out_ldst_rr(s, data, addr,
126
- (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
33
+
127
+
34
+ /*
128
+ /*
35
+ * For an opcode that ends a BB, reset all temp data.
129
+ * Normal case: alignment equal to access size.
36
+ * We do no cross-BB optimization.
37
+ */
130
+ */
38
+ if (def->flags & TCG_OPF_BB_END) {
131
+ if (a_bits == s_bits) {
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
132
+ tcg_out_ldst_rr(s, data, addr, index,
40
+ ctx->prev_mb = NULL;
133
+ qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
41
+ return;
134
+ return;
42
+ }
135
+ }
43
+
136
+
44
+ nb_oargs = def->nb_oargs;
137
+ /*
45
+ for (i = 0; i < nb_oargs; i++) {
138
+ * Test for at least natural alignment, and assume most accesses
46
+ reset_temp(op->args[i]);
139
+ * will be aligned -- perform a straight load in the delay slot.
140
+ * This is required to preserve atomicity for aligned accesses.
141
+ */
142
+ t_bits = MAX(a_bits, s_bits);
143
+ tcg_debug_assert(t_bits < 13);
144
+ tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
145
+
146
+ /* beq,a,pt %icc, label */
147
+ label_ptr = s->code_ptr;
148
+ tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
149
+ /* delay slot */
150
+ tcg_out_ldst_rr(s, data, addr, index,
151
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
152
+
153
+ if (a_bits >= s_bits) {
47
+ /*
154
+ /*
48
+ * Save the corresponding known-zero bits mask for the
155
+ * Overalignment: A successful alignment test will perform the memory
49
+ * first output argument (only one supported so far).
156
+ * operation in the delay slot, and failure need only invoke the
157
+ * handler for SIGBUS.
50
+ */
158
+ */
51
+ if (i == 0) {
159
+ TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
160
+ tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
53
+ }
161
+ /* delay slot -- move to low part of argument reg */
54
+ }
162
+ tcg_out_mov_delay(s, arg_low, addr);
55
+}
163
+ } else {
56
+
164
+ /* Underalignment: load by pieces of minimum alignment. */
57
static bool fold_call(OptContext *ctx, TCGOp *op)
165
+ int ld_opc, a_size, s_size, i;
166
+
167
+ /*
168
+ * Force full address into T1 early; avoids problems with
169
+ * overlap between @addr and @data.
170
+ */
171
+ tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
172
+
173
+ a_size = 1 << a_bits;
174
+ s_size = 1 << s_bits;
175
+ if ((memop & MO_BSWAP) == MO_BE) {
176
+ ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
177
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
178
+ ld_opc = qemu_ld_opc[a_bits | MO_BE];
179
+ for (i = a_size; i < s_size; i += a_size) {
180
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
181
+ tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
182
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
183
+ }
184
+ } else if (a_bits == 0) {
185
+ ld_opc = LDUB;
186
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
187
+ for (i = a_size; i < s_size; i += a_size) {
188
+ if ((memop & MO_SIGN) && i == s_size - a_size) {
189
+ ld_opc = LDSB;
190
+ }
191
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
192
+ tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
193
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
194
+ }
195
+ } else {
196
+ ld_opc = qemu_ld_opc[a_bits | MO_LE];
197
+ tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
198
+ for (i = a_size; i < s_size; i += a_size) {
199
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
200
+ if ((memop & MO_SIGN) && i == s_size - a_size) {
201
+ ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
202
+ }
203
+ tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
204
+ tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
205
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
206
+ }
207
+ }
208
+ }
209
+
210
+ *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
211
#endif /* CONFIG_SOFTMMU */
212
}
213
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
215
MemOpIdx oi)
58
{
216
{
59
TCGContext *s = ctx->tcg;
217
MemOp memop = get_memop(oi);
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
218
+ tcg_insn_unit *label_ptr;
61
partmask &= 0xffffffffu;
219
+
62
affected &= 0xffffffffu;
220
#ifdef CONFIG_SOFTMMU
63
}
221
unsigned memi = get_mmuidx(oi);
64
+ ctx.z_mask = z_mask;
222
TCGReg addrz, param;
65
223
const tcg_insn_unit *func;
66
if (partmask == 0) {
224
- tcg_insn_unit *label_ptr;
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
225
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
226
addrz = tcg_out_tlb_load(s, addr, memi, memop,
69
break;
227
offsetof(CPUTLBEntry, addr_write));
70
}
228
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
71
229
72
- /* Some of the folding above can change opc. */
230
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
73
- opc = op->opc;
231
#else
74
- def = &tcg_op_defs[opc];
232
+ TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
75
- if (def->flags & TCG_OPF_BB_END) {
233
+ unsigned a_bits = get_alignment_bits(memop);
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
234
+ unsigned s_bits = memop & MO_SIZE;
77
- } else {
235
+ unsigned t_bits;
78
- int nb_oargs = def->nb_oargs;
236
+
79
- for (i = 0; i < nb_oargs; i++) {
237
if (SPARC64 && TARGET_LONG_BITS == 32) {
80
- reset_temp(op->args[i]);
238
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
81
- /* Save the corresponding known-zero bits mask for the
239
addr = TCG_REG_T1;
82
- first output argument (only one supported so far). */
240
}
83
- if (i == 0) {
241
- tcg_out_ldst_rr(s, data, addr,
84
- arg_info(op->args[i])->z_mask = z_mask;
242
- (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
85
- }
243
+
86
- }
244
+ /*
87
- }
245
+ * Normal case: alignment equal to access size.
88
+ finish_folding(&ctx, op);
246
+ */
89
247
+ if (a_bits == s_bits) {
90
/* Eliminate duplicate and redundant fence instructions. */
248
+ tcg_out_ldst_rr(s, data, addr, index,
91
if (ctx.prev_mb) {
249
+ qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
250
+ return;
251
+ }
252
+
253
+ /*
254
+ * Test for at least natural alignment, and assume most accesses
255
+ * will be aligned -- perform a straight store in the delay slot.
256
+ * This is required to preserve atomicity for aligned accesses.
257
+ */
258
+ t_bits = MAX(a_bits, s_bits);
259
+ tcg_debug_assert(t_bits < 13);
260
+ tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
261
+
262
+ /* beq,a,pt %icc, label */
263
+ label_ptr = s->code_ptr;
264
+ tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
265
+ /* delay slot */
266
+ tcg_out_ldst_rr(s, data, addr, index,
267
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
268
+
269
+ if (a_bits >= s_bits) {
270
+ /*
271
+ * Overalignment: A successful alignment test will perform the memory
272
+ * operation in the delay slot, and failure need only invoke the
273
+ * handler for SIGBUS.
274
+ */
275
+ TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
276
+ tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
277
+ /* delay slot -- move to low part of argument reg */
278
+ tcg_out_mov_delay(s, arg_low, addr);
279
+ } else {
280
+ /* Underalignment: store by pieces of minimum alignment. */
281
+ int st_opc, a_size, s_size, i;
282
+
283
+ /*
284
+ * Force full address into T1 early; avoids problems with
285
+ * overlap between @addr and @data.
286
+ */
287
+ tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
288
+
289
+ a_size = 1 << a_bits;
290
+ s_size = 1 << s_bits;
291
+ if ((memop & MO_BSWAP) == MO_BE) {
292
+ st_opc = qemu_st_opc[a_bits | MO_BE];
293
+ for (i = 0; i < s_size; i += a_size) {
294
+ TCGReg d = data;
295
+ int shift = (s_size - a_size - i) * 8;
296
+ if (shift) {
297
+ d = TCG_REG_T2;
298
+ tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
299
+ }
300
+ tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
301
+ }
302
+ } else if (a_bits == 0) {
303
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
304
+ for (i = 1; i < s_size; i++) {
305
+ tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
306
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
307
+ }
308
+ } else {
309
+ /* Note that ST*A with immediate asi must use indexed address. */
310
+ st_opc = qemu_st_opc[a_bits + MO_LE];
311
+ tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
312
+ for (i = a_size; i < s_size; i += a_size) {
313
+ tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
314
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
315
+ tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
316
+ }
317
+ }
318
+ }
319
+
320
+ *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
321
#endif /* CONFIG_SOFTMMU */
322
}
323
92
--
324
--
93
2.25.1
325
2.25.1
94
326
95
327
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
A mostly generic test for unaligned access raising SIGBUS.
2
2
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
6
tests/tcg/multiarch/sigbus.c | 68 ++++++++++++++++++++++++++++++++++++
9
tests/unit/meson.build | 1 +
7
1 file changed, 68 insertions(+)
10
2 files changed, 198 insertions(+)
8
create mode 100644 tests/tcg/multiarch/sigbus.c
11
create mode 100644 tests/unit/test-div128.c
12
9
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
10
diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c
14
new file mode 100644
11
new file mode 100644
15
index XXXXXXX..XXXXXXX
12
index XXXXXXX..XXXXXXX
16
--- /dev/null
13
--- /dev/null
17
+++ b/tests/unit/test-div128.c
14
+++ b/tests/tcg/multiarch/sigbus.c
18
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@
19
+/*
16
+#define _GNU_SOURCE 1
20
+ * Test 128-bit division functions
21
+ *
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
23
+ *
24
+ * This library is free software; you can redistribute it and/or
25
+ * modify it under the terms of the GNU Lesser General Public
26
+ * License as published by the Free Software Foundation; either
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ *
29
+ * This library is distributed in the hope that it will be useful,
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * Lesser General Public License for more details.
33
+ *
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36
+ */
37
+
17
+
38
+#include "qemu/osdep.h"
18
+#include <assert.h>
39
+#include "qemu/host-utils.h"
19
+#include <stdlib.h>
20
+#include <signal.h>
21
+#include <endian.h>
40
+
22
+
41
+typedef struct {
42
+ uint64_t high;
43
+ uint64_t low;
44
+ uint64_t rhigh;
45
+ uint64_t rlow;
46
+ uint64_t divisor;
47
+ uint64_t remainder;
48
+} test_data_unsigned;
49
+
23
+
50
+typedef struct {
24
+unsigned long long x = 0x8877665544332211ull;
51
+ int64_t high;
25
+void * volatile p = (void *)&x + 1;
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
26
+
59
+static const test_data_unsigned test_table_unsigned[] = {
27
+void sigbus(int sig, siginfo_t *info, void *uc)
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
121
+};
122
+
123
+static const test_data_signed test_table_signed[] = {
124
+ /* Positive dividend, positive/negative divisors */
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
175
+};
176
+
177
+static void test_divu128(void)
178
+{
28
+{
179
+ int i;
29
+ assert(sig == SIGBUS);
180
+ uint64_t rem;
30
+ assert(info->si_signo == SIGBUS);
181
+ test_data_unsigned tmp;
31
+#ifdef BUS_ADRALN
182
+
32
+ assert(info->si_code == BUS_ADRALN);
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
33
+#endif
184
+ tmp = test_table_unsigned[i];
34
+ assert(info->si_addr == p);
185
+
35
+ exit(EXIT_SUCCESS);
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
190
+ }
191
+}
36
+}
192
+
37
+
193
+static void test_divs128(void)
38
+int main()
194
+{
39
+{
195
+ int i;
40
+ struct sigaction sa = {
196
+ int64_t rem;
41
+ .sa_sigaction = sigbus,
197
+ test_data_signed tmp;
42
+ .sa_flags = SA_SIGINFO
43
+ };
44
+ int allow_fail = 0;
45
+ int tmp;
198
+
46
+
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
47
+ tmp = sigaction(SIGBUS, &sa, NULL);
200
+ tmp = test_table_signed[i];
48
+ assert(tmp == 0);
201
+
49
+
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
50
+ /*
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
51
+ * Select an operation that's likely to enforce alignment.
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
52
+ * On many guests that support unaligned accesses by default,
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
53
+ * this is often an atomic operation.
54
+ */
55
+#if defined(__aarch64__)
56
+ asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
57
+#elif defined(__alpha__)
58
+ asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
59
+#elif defined(__arm__)
60
+ asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
61
+#elif defined(__powerpc__)
62
+ asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
63
+#elif defined(__riscv_atomic)
64
+ asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
65
+#else
66
+ /* No insn known to fault unaligned -- try for a straight load. */
67
+ allow_fail = 1;
68
+ tmp = *(volatile int *)p;
69
+#endif
70
+
71
+ assert(allow_fail);
72
+
73
+ /*
74
+ * We didn't see a signal.
75
+ * We might as well validate the unaligned load worked.
76
+ */
77
+ if (BYTE_ORDER == LITTLE_ENDIAN) {
78
+ assert(tmp == 0x55443322);
79
+ } else {
80
+ assert(tmp == 0x77665544);
206
+ }
81
+ }
82
+ return EXIT_SUCCESS;
207
+}
83
+}
208
+
209
+int main(int argc, char **argv)
210
+{
211
+ g_test_init(&argc, &argv, NULL);
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
214
+ return g_test_run();
215
+}
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
217
index XXXXXXX..XXXXXXX 100644
218
--- a/tests/unit/meson.build
219
+++ b/tests/unit/meson.build
220
@@ -XXX,XX +XXX,XX @@ tests = {
221
# all code tested by test-x86-cpuid is inside topology.h
222
'test-x86-cpuid': [],
223
'test-cutils': [],
224
+ 'test-div128': [],
225
'test-shift128': [],
226
'test-mul64': [],
227
# all code tested by test-int128 is inside int128.h
228
--
84
--
229
2.25.1
85
2.25.1
230
86
231
87
diff view generated by jsdifflib
Deleted patch
1
Prepare for tracking different masks by renaming this one.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
9
1 file changed, 72 insertions(+), 70 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
16
TCGTemp *prev_copy;
17
TCGTemp *next_copy;
18
uint64_t val;
19
- uint64_t mask;
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
21
} TempOptInfo;
22
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
25
ti->next_copy = ts;
26
ti->prev_copy = ts;
27
ti->is_const = false;
28
- ti->mask = -1;
29
+ ti->z_mask = -1;
30
}
31
32
static void reset_temp(TCGArg arg)
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
34
if (ts->kind == TEMP_CONST) {
35
ti->is_const = true;
36
ti->val = ts->val;
37
- ti->mask = ts->val;
38
+ ti->z_mask = ts->val;
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
40
/* High bits of a 32-bit quantity are garbage. */
41
- ti->mask |= ~0xffffffffull;
42
+ ti->z_mask |= ~0xffffffffull;
43
}
44
} else {
45
ti->is_const = false;
46
- ti->mask = -1;
47
+ ti->z_mask = -1;
48
}
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
52
const TCGOpDef *def;
53
TempOptInfo *di;
54
TempOptInfo *si;
55
- uint64_t mask;
56
+ uint64_t z_mask;
57
TCGOpcode new_op;
58
59
if (ts_are_copies(dst_ts, src_ts)) {
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
61
op->args[0] = dst;
62
op->args[1] = src;
63
64
- mask = si->mask;
65
+ z_mask = si->z_mask;
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
67
/* High bits of the destination are now garbage. */
68
- mask |= ~0xffffffffull;
69
+ z_mask |= ~0xffffffffull;
70
}
71
- di->mask = mask;
72
+ di->z_mask = z_mask;
73
74
if (src_ts->type == dst_ts->type) {
75
TempOptInfo *ni = ts_info(si->next_copy);
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
}
78
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
80
- uint64_t mask, partmask, affected, tmp;
81
+ uint64_t z_mask, partmask, affected, tmp;
82
int nb_oargs, nb_iargs;
83
TCGOpcode opc = op->opc;
84
const TCGOpDef *def = &tcg_op_defs[opc];
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
87
/* Simplify using known-zero bits. Currently only ops with a single
88
output argument is supported. */
89
- mask = -1;
90
+ z_mask = -1;
91
affected = -1;
92
switch (opc) {
93
CASE_OP_32_64(ext8s):
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
96
break;
97
}
98
QEMU_FALLTHROUGH;
99
CASE_OP_32_64(ext8u):
100
- mask = 0xff;
101
+ z_mask = 0xff;
102
goto and_const;
103
CASE_OP_32_64(ext16s):
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
106
break;
107
}
108
QEMU_FALLTHROUGH;
109
CASE_OP_32_64(ext16u):
110
- mask = 0xffff;
111
+ z_mask = 0xffff;
112
goto and_const;
113
case INDEX_op_ext32s_i64:
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
116
break;
117
}
118
QEMU_FALLTHROUGH;
119
case INDEX_op_ext32u_i64:
120
- mask = 0xffffffffU;
121
+ z_mask = 0xffffffffU;
122
goto and_const;
123
124
CASE_OP_32_64(and):
125
- mask = arg_info(op->args[2])->mask;
126
+ z_mask = arg_info(op->args[2])->z_mask;
127
if (arg_is_const(op->args[2])) {
128
and_const:
129
- affected = arg_info(op->args[1])->mask & ~mask;
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
131
}
132
- mask = arg_info(op->args[1])->mask & mask;
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
134
break;
135
136
case INDEX_op_ext_i32_i64:
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
139
break;
140
}
141
QEMU_FALLTHROUGH;
142
case INDEX_op_extu_i32_i64:
143
/* We do not compute affected as it is a size changing op. */
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
146
break;
147
148
CASE_OP_32_64(andc):
149
/* Known-zeros does not imply known-ones. Therefore unless
150
op->args[2] is constant, we can't infer anything from it. */
151
if (arg_is_const(op->args[2])) {
152
- mask = ~arg_info(op->args[2])->mask;
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
154
goto and_const;
155
}
156
/* But we certainly know nothing outside args[1] may be set. */
157
- mask = arg_info(op->args[1])->mask;
158
+ z_mask = arg_info(op->args[1])->z_mask;
159
break;
160
161
case INDEX_op_sar_i32:
162
if (arg_is_const(op->args[2])) {
163
tmp = arg_info(op->args[2])->val & 31;
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
166
}
167
break;
168
case INDEX_op_sar_i64:
169
if (arg_is_const(op->args[2])) {
170
tmp = arg_info(op->args[2])->val & 63;
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
173
}
174
break;
175
176
case INDEX_op_shr_i32:
177
if (arg_is_const(op->args[2])) {
178
tmp = arg_info(op->args[2])->val & 31;
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
181
}
182
break;
183
case INDEX_op_shr_i64:
184
if (arg_is_const(op->args[2])) {
185
tmp = arg_info(op->args[2])->val & 63;
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
188
}
189
break;
190
191
case INDEX_op_extrl_i64_i32:
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
194
break;
195
case INDEX_op_extrh_i64_i32:
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
198
break;
199
200
CASE_OP_32_64(shl):
201
if (arg_is_const(op->args[2])) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
203
- mask = arg_info(op->args[1])->mask << tmp;
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
205
}
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
367
}
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
369
vs the high word of the input. */
370
do_setcond_high:
371
reset_temp(op->args[0]);
372
- arg_info(op->args[0])->mask = 1;
373
+ arg_info(op->args[0])->z_mask = 1;
374
op->opc = INDEX_op_setcond_i32;
375
op->args[1] = op->args[2];
376
op->args[2] = op->args[4];
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
378
}
379
do_setcond_low:
380
reset_temp(op->args[0]);
381
- arg_info(op->args[0])->mask = 1;
382
+ arg_info(op->args[0])->z_mask = 1;
383
op->opc = INDEX_op_setcond_i32;
384
op->args[2] = op->args[3];
385
op->args[3] = op->args[5];
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
387
/* Default case: we know nothing about operation (or were unable
388
to compute the operation result) so no propagation is done.
389
We trash everything if the operation is the end of a basic
390
- block, otherwise we only trash the output args. "mask" is
391
+ block, otherwise we only trash the output args. "z_mask" is
392
the non-zero bits mask for the first output arg. */
393
if (def->flags & TCG_OPF_BB_END) {
394
memset(&temps_used, 0, sizeof(temps_used));
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
396
/* Save the corresponding known-zero bits mask for the
397
first output argument (only one supported so far). */
398
if (i == 0) {
399
- arg_info(op->args[i])->mask = mask;
400
+ arg_info(op->args[i])->z_mask = z_mask;
401
}
402
}
403
}
404
--
405
2.25.1
406
407
diff view generated by jsdifflib
Deleted patch
1
This will expose the variable to subroutines that
2
will be broken out of tcg_optimize.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 11 ++++++-----
10
1 file changed, 6 insertions(+), 5 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
18
typedef struct OptContext {
19
TCGContext *tcg;
20
+ TCGOp *prev_mb;
21
TCGTempSet temps_used;
22
} OptContext;
23
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
25
void tcg_optimize(TCGContext *s)
26
{
27
int nb_temps, nb_globals, i;
28
- TCGOp *op, *op_next, *prev_mb = NULL;
29
+ TCGOp *op, *op_next;
30
OptContext ctx = { .tcg = s };
31
32
/* Array VALS has an element for each temp.
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
36
/* Eliminate duplicate and redundant fence instructions. */
37
- if (prev_mb) {
38
+ if (ctx.prev_mb) {
39
switch (opc) {
40
case INDEX_op_mb:
41
/* Merge two barriers of the same type into one,
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
* barrier. This is stricter than specified but for
44
* the purposes of TCG is better than not optimizing.
45
*/
46
- prev_mb->args[0] |= op->args[0];
47
+ ctx.prev_mb->args[0] |= op->args[0];
48
tcg_op_remove(s, op);
49
break;
50
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
case INDEX_op_qemu_st_i64:
53
case INDEX_op_call:
54
/* Opcodes that touch guest memory stop the optimization. */
55
- prev_mb = NULL;
56
+ ctx.prev_mb = NULL;
57
break;
58
}
59
} else if (opc == INDEX_op_mb) {
60
- prev_mb = op;
61
+ ctx.prev_mb = op;
62
}
63
}
64
}
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
Deleted patch
1
Rather than try to keep these up-to-date across folding,
2
re-read nb_oargs at the end, after re-reading the opcode.
3
1
4
A couple of asserts need dropping, but that will take care
5
of itself as we split the function further.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 14 ++++----------
12
1 file changed, 4 insertions(+), 10 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
21
uint64_t z_mask, partmask, affected, tmp;
22
- int nb_oargs, nb_iargs;
23
TCGOpcode opc = op->opc;
24
const TCGOpDef *def;
25
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
}
28
29
def = &tcg_op_defs[opc];
30
- nb_oargs = def->nb_oargs;
31
- nb_iargs = def->nb_iargs;
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
41
CASE_OP_32_64(qemu_ld):
42
{
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
45
MemOp mop = get_memop(oi);
46
if (!(mop & MO_SIGN)) {
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
}
50
51
if (partmask == 0) {
52
- tcg_debug_assert(nb_oargs == 1);
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
continue;
55
}
56
if (affected == 0) {
57
- tcg_debug_assert(nb_oargs == 1);
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
59
continue;
60
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
} else if (args_are_copies(op->args[1], op->args[2])) {
63
op->opc = INDEX_op_dup_vec;
64
TCGOP_VECE(op) = MO_32;
65
- nb_iargs = 1;
66
}
67
break;
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
op->opc = opc = (opc == INDEX_op_movcond_i32
71
? INDEX_op_setcond_i32
72
: INDEX_op_setcond_i64);
73
- nb_iargs = 2;
74
}
75
break;
76
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
if (def->flags & TCG_OPF_BB_END) {
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
80
} else {
81
+ int nb_oargs = def->nb_oargs;
82
for (i = 0; i < nb_oargs; i++) {
83
reset_temp(op->args[i]);
84
/* Save the corresponding known-zero bits mask for the
85
--
86
2.25.1
87
88
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++++++---
7
1 file changed, 6 insertions(+), 3 deletions(-)
8
1
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
14
uint64_t z_mask, partmask, affected, tmp;
15
TCGOpcode opc = op->opc;
16
const TCGOpDef *def;
17
+ bool done = false;
18
19
/* Calls are special. */
20
if (opc == INDEX_op_call) {
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
22
allocator where needed and possible. Also detect copies. */
23
switch (opc) {
24
CASE_OP_32_64_VEC(mov):
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
26
- continue;
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
28
+ break;
29
30
case INDEX_op_dup_vec:
31
if (arg_is_const(op->args[1])) {
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
33
break;
34
}
35
36
- finish_folding(&ctx, op);
37
+ if (!done) {
38
+ finish_folding(&ctx, op);
39
+ }
40
41
/* Eliminate duplicate and redundant fence instructions. */
42
if (ctx.prev_mb) {
43
--
44
2.25.1
45
46
diff view generated by jsdifflib
Deleted patch
1
Reduce some code duplication by folding the NE and EQ cases.
2
1
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
8
1 file changed, 72 insertions(+), 73 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
19
+{
20
+ TCGCond cond = op->args[5];
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
22
+ int inv = 0;
23
+
24
+ if (i >= 0) {
25
+ goto do_setcond_const;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
37
+ goto do_setcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
50
+ op->args[3], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_setcond_const;
54
+ case 1:
55
+ goto do_setcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
59
+ op->args[4], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_setcond_const;
63
+ case 1:
64
+ op->args[2] = op->args[3];
65
+ op->args[3] = cond;
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
88
{
89
return fold_const2(ctx, op);
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
}
92
break;
93
94
- case INDEX_op_setcond2_i32:
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
96
- op->args[5]);
97
- if (i >= 0) {
98
- do_setcond_const:
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
100
- continue;
101
- }
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
180
--
181
2.25.1
182
183
diff view generated by jsdifflib
Deleted patch
1
Reduce some code duplication by folding the NE and EQ cases.
2
1
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
7
1 file changed, 81 insertions(+), 78 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
14
return fold_const2(ctx, op);
15
}
16
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
18
+{
19
+ TCGCond cond = op->args[4];
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
21
+ TCGArg label = op->args[5];
22
+ int inv = 0;
23
+
24
+ if (i >= 0) {
25
+ goto do_brcond_const;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
37
+ goto do_brcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
50
+ op->args[2], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_brcond_const;
54
+ case 1:
55
+ goto do_brcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ op->args[3], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_brcond_const;
63
+ case 1:
64
+ op->opc = INDEX_op_brcond_i32;
65
+ op->args[1] = op->args[2];
66
+ op->args[2] = cond;
67
+ op->args[3] = label;
68
+ break;
69
+ }
70
+ break;
71
+
72
+ default:
73
+ break;
74
+
75
+ do_brcond_high:
76
+ op->opc = INDEX_op_brcond_i32;
77
+ op->args[0] = op->args[1];
78
+ op->args[1] = op->args[3];
79
+ op->args[2] = cond;
80
+ op->args[3] = label;
81
+ break;
82
+
83
+ do_brcond_const:
84
+ if (i == 0) {
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
90
+ break;
91
+ }
92
+ return false;
93
+}
94
+
95
static bool fold_call(OptContext *ctx, TCGOp *op)
96
{
97
TCGContext *s = ctx->tcg;
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
193
--
194
2.25.1
195
196
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
6
1 file changed, 19 insertions(+), 14 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
33
{
34
TCGCond cond = op->args[4];
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
}
37
break;
38
39
- CASE_OP_32_64(brcond):
40
- i = do_constant_folding_cond(opc, op->args[0],
41
- op->args[1], op->args[2]);
42
- if (i == 0) {
43
- tcg_op_remove(s, op);
44
- continue;
45
- } else if (i > 0) {
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
52
-
53
CASE_OP_32_64(movcond):
54
i = do_constant_folding_cond(opc, op->args[1],
55
op->args[2], op->args[5]);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
CASE_OP_32_64_VEC(andc):
58
done = fold_andc(&ctx, op);
59
break;
60
+ CASE_OP_32_64(brcond):
61
+ done = fold_brcond(&ctx, op);
62
+ break;
63
case INDEX_op_brcond2_i32:
64
done = fold_brcond2(&ctx, op);
65
break;
66
--
67
2.25.1
68
69
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 23 ++++++++++++++---------
6
1 file changed, 14 insertions(+), 9 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
20
+
21
+ if (i >= 0) {
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
{
29
TCGCond cond = op->args[5];
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
42
-
43
CASE_OP_32_64(movcond):
44
i = do_constant_folding_cond(opc, op->args[1],
45
op->args[2], op->args[5]);
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
CASE_OP_32_64(shr):
48
done = fold_shift(&ctx, op);
49
break;
50
+ CASE_OP_32_64(setcond):
51
+ done = fold_setcond(&ctx, op);
52
+ break;
53
case INDEX_op_setcond2_i32:
54
done = fold_setcond2(&ctx, op);
55
break;
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
6
1 file changed, 21 insertions(+), 16 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
24
+
25
+ rl = op->args[0];
26
+ rh = op->args[1];
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
35
{
36
return fold_const2(ctx, op);
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
}
39
break;
40
41
- case INDEX_op_mulu2_i32:
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
43
- uint32_t a = arg_info(op->args[2])->val;
44
- uint32_t b = arg_info(op->args[3])->val;
45
- uint64_t r = (uint64_t)a * b;
46
- TCGArg rl, rh;
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
48
-
49
- rl = op->args[0];
50
- rh = op->args[1];
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
53
- continue;
54
- }
55
- break;
56
-
57
default:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
70
--
71
2.25.1
72
73
diff view generated by jsdifflib
Deleted patch
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
which will not be simple wrappers forever.
3
1
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
9
1 file changed, 44 insertions(+), 26 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
16
return fold_const2(ctx, op);
17
}
18
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
23
+ uint32_t al = arg_info(op->args[2])->val;
24
+ uint32_t ah = arg_info(op->args[3])->val;
25
+ uint32_t bl = arg_info(op->args[4])->val;
26
+ uint32_t bh = arg_info(op->args[5])->val;
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
29
+ TCGArg rl, rh;
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+
32
+ if (add) {
33
+ a += b;
34
+ } else {
35
+ a -= b;
36
+ }
37
+
38
+ rl = op->args[0];
39
+ rh = op->args[1];
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
42
+ return true;
43
+ }
44
+ return false;
45
+}
46
+
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
48
+{
49
+ return fold_addsub2_i32(ctx, op, true);
50
+}
51
+
52
static bool fold_and(OptContext *ctx, TCGOp *op)
53
{
54
return fold_const2(ctx, op);
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
56
return fold_const2(ctx, op);
57
}
58
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
60
+{
61
+ return fold_addsub2_i32(ctx, op, false);
62
+}
63
+
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
65
{
66
return fold_const2(ctx, op);
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
}
69
break;
70
71
- case INDEX_op_add2_i32:
72
- case INDEX_op_sub2_i32:
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
75
- uint32_t al = arg_info(op->args[2])->val;
76
- uint32_t ah = arg_info(op->args[3])->val;
77
- uint32_t bl = arg_info(op->args[4])->val;
78
- uint32_t bh = arg_info(op->args[5])->val;
79
- uint64_t a = ((uint64_t)ah << 32) | al;
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
81
- TCGArg rl, rh;
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
83
-
84
- if (opc == INDEX_op_add2_i32) {
85
- a += b;
86
- } else {
87
- a -= b;
88
- }
89
-
90
- rl = op->args[0];
91
- rh = op->args[1];
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
94
- continue;
95
- }
96
- break;
97
98
default:
99
break;
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
CASE_OP_32_64_VEC(add):
102
done = fold_add(&ctx, op);
103
break;
104
+ case INDEX_op_add2_i32:
105
+ done = fold_add2_i32(&ctx, op);
106
+ break;
107
CASE_OP_32_64_VEC(and):
108
done = fold_and(&ctx, op);
109
break;
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64_VEC(sub):
112
done = fold_sub(&ctx, op);
113
break;
114
+ case INDEX_op_sub2_i32:
115
+ done = fold_sub2_i32(&ctx, op);
116
+ break;
117
CASE_OP_32_64_VEC(xor):
118
done = fold_xor(&ctx, op);
119
break;
120
--
121
2.25.1
122
123
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
6
1 file changed, 22 insertions(+), 17 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t v1 = arg_info(op->args[1])->val;
20
+ uint64_t v2 = arg_info(op->args[2])->val;
21
+ int shr = op->args[3];
22
+
23
+ if (op->opc == INDEX_op_extract2_i64) {
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
33
+}
34
+
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
36
{
37
return fold_const1(ctx, op);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
}
40
break;
41
42
- CASE_OP_32_64(extract2):
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
44
- uint64_t v1 = arg_info(op->args[1])->val;
45
- uint64_t v2 = arg_info(op->args[2])->val;
46
- int shr = op->args[3];
47
-
48
- if (opc == INDEX_op_extract2_i64) {
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
50
- } else {
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
52
- ((uint32_t)v2 << (32 - shr)));
53
- }
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
55
- continue;
56
- }
57
- break;
58
-
59
default:
60
break;
61
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
CASE_OP_32_64(eqv):
64
done = fold_eqv(&ctx, op);
65
break;
66
+ CASE_OP_32_64(extract2):
67
+ done = fold_extract2(&ctx, op);
68
+ break;
69
CASE_OP_32_64(ext8s):
70
CASE_OP_32_64(ext16s):
71
case INDEX_op_ext32s_i64:
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
6
1 file changed, 30 insertions(+), 18 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t;
20
+
21
+ t = arg_info(op->args[1])->val;
22
+ t = extract64(t, op->args[2], op->args[3]);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
29
{
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
33
}
34
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
36
+{
37
+ if (arg_is_const(op->args[1])) {
38
+ uint64_t t;
39
+
40
+ t = arg_info(op->args[1])->val;
41
+ t = sextract64(t, op->args[2], op->args[3]);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
43
+ }
44
+ return false;
45
+}
46
+
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
48
{
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
95
--
96
2.25.1
97
98
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 25 +++++++++++++++----------
6
1 file changed, 15 insertions(+), 10 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
13
return fold_const1(ctx, op);
14
}
15
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t t1 = arg_info(op->args[1])->val;
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
CASE_OP_32_64(ctpop):
50
done = fold_ctpop(&ctx, op);
51
break;
52
+ CASE_OP_32_64(deposit):
53
+ done = fold_deposit(&ctx, op);
54
+ break;
55
CASE_OP_32_64(div):
56
CASE_OP_32_64(divu):
57
done = fold_divide(&ctx, op);
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
6
1 file changed, 18 insertions(+), 14 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
return true;
14
}
15
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ if (t != 0) {
22
+ t = do_constant_folding(op->opc, t, 0);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
28
+}
29
+
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
31
{
32
return fold_const1(ctx, op);
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
}
35
break;
36
37
- CASE_OP_32_64(clz):
38
- CASE_OP_32_64(ctz):
39
- if (arg_is_const(op->args[1])) {
40
- TCGArg v = arg_info(op->args[1])->val;
41
- if (v != 0) {
42
- tmp = do_constant_folding(opc, v, 0);
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
44
- } else {
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
46
- }
47
- continue;
48
- }
49
- break;
50
-
51
default:
52
break;
53
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
64
break;
65
--
66
2.25.1
67
68
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 27 ++++++++++++++++-----------
6
1 file changed, 16 insertions(+), 11 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
13
return false;
14
}
15
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_call(OptContext *ctx, TCGOp *op)
28
{
29
TCGContext *s = ctx->tcg;
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(bswap16):
35
- CASE_OP_32_64(bswap32):
36
- case INDEX_op_bswap64_i64:
37
- if (arg_is_const(op->args[1])) {
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
case INDEX_op_brcond2_i32:
50
done = fold_brcond2(&ctx, op);
51
break;
52
+ CASE_OP_32_64(bswap16):
53
+ CASE_OP_32_64(bswap32):
54
+ case INDEX_op_bswap64_i64:
55
+ done = fold_bswap(&ctx, op);
56
+ break;
57
CASE_OP_32_64(clz):
58
CASE_OP_32_64(ctz):
59
done = fold_count_zeros(&ctx, op);
60
--
61
2.25.1
62
63
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
6
1 file changed, 31 insertions(+), 22 deletions(-)
7
1
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+ t = dup_const(TCGOP_VECE(op), t);
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
22
+ }
23
+ return false;
24
+}
25
+
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
+{
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
30
+ arg_info(op->args[2])->val);
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
32
+ }
33
+
34
+ if (args_are_copies(op->args[1], op->args[2])) {
35
+ op->opc = INDEX_op_dup_vec;
36
+ TCGOP_VECE(op) = MO_32;
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
42
{
43
return fold_const2(ctx, op);
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
46
break;
47
48
- case INDEX_op_dup_vec:
49
- if (arg_is_const(op->args[1])) {
50
- tmp = arg_info(op->args[1])->val;
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
53
- continue;
54
- }
55
- break;
56
-
57
- case INDEX_op_dup2_vec:
58
- assert(TCG_TARGET_REG_BITS == 32);
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
62
- arg_info(op->args[2])->val));
63
- continue;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
65
- op->opc = INDEX_op_dup_vec;
66
- TCGOP_VECE(op) = MO_32;
67
- }
68
- break;
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
86
--
87
2.25.1
88
89
diff view generated by jsdifflib
Deleted patch
1
This is the final entry in the main switch that was in a
2
different form. After this, we have the option to convert
3
the switch into a function dispatch table.
4
1
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 27 ++++++++++++++-------------
10
1 file changed, 14 insertions(+), 13 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
17
return true;
18
}
19
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
21
+{
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+}
24
+
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
26
{
27
TCGOpcode opc = op->opc;
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
29
break;
30
}
31
32
- /* Propagate constants through copy operations and do constant
33
- folding. Constants will be substituted to arguments by register
34
- allocator where needed and possible. Also detect copies. */
35
+ /*
36
+ * Process each opcode.
37
+ * Sorted alphabetically by opcode as much as possible.
38
+ */
39
switch (opc) {
40
- CASE_OP_32_64_VEC(mov):
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
42
- break;
43
-
44
- default:
45
- break;
46
-
47
- /* ---------------------------------------------------------- */
48
- /* Sorted alphabetically by opcode as much as possible. */
49
-
50
CASE_OP_32_64_VEC(add):
51
done = fold_add(&ctx, op);
52
break;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
case INDEX_op_mb:
55
done = fold_mb(&ctx, op);
56
break;
57
+ CASE_OP_32_64_VEC(mov):
58
+ done = fold_mov(&ctx, op);
59
+ break;
60
CASE_OP_32_64(movcond):
61
done = fold_movcond(&ctx, op);
62
break;
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
64
CASE_OP_32_64_VEC(xor):
65
done = fold_xor(&ctx, op);
66
break;
67
+ default:
68
+ break;
69
}
70
71
if (!done) {
72
--
73
2.25.1
74
75
diff view generated by jsdifflib
Deleted patch
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
3
1
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
9
1 file changed, 24 insertions(+), 17 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
16
return false;
17
}
18
19
+/* If the binary operation has both arguments equal, fold to @i. */
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
*/
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
34
{
35
- return fold_const2(ctx, op);
36
+ if (fold_const2(ctx, op) ||
37
+ fold_xx_to_i(ctx, op, 0)) {
38
+ return true;
39
+ }
40
+ return false;
41
}
42
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
45
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
47
{
48
- return fold_const2(ctx, op);
49
+ if (fold_const2(ctx, op) ||
50
+ fold_xx_to_i(ctx, op, 0)) {
51
+ return true;
52
+ }
53
+ return false;
54
}
55
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
58
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
60
{
61
- return fold_const2(ctx, op);
62
+ if (fold_const2(ctx, op) ||
63
+ fold_xx_to_i(ctx, op, 0)) {
64
+ return true;
65
+ }
66
+ return false;
67
}
68
69
/* Propagate constants and copies, fold constant expressions. */
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
break;
72
}
73
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
75
- switch (opc) {
76
- CASE_OP_32_64_VEC(andc):
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
91
--
92
2.25.1
93
94
diff view generated by jsdifflib
Deleted patch
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
and use it in the outer opcode fold functions.
3
1
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
9
1 file changed, 24 insertions(+), 15 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
16
return false;
17
}
18
19
+/* If the binary operation has both arguments equal, fold to identity. */
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
35
*/
36
37
static bool fold_add(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
39
40
static bool fold_and(OptContext *ctx, TCGOp *op)
41
{
42
- return fold_const2(ctx, op);
43
+ if (fold_const2(ctx, op) ||
44
+ fold_xx_to_x(ctx, op)) {
45
+ return true;
46
+ }
47
+ return false;
48
}
49
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
52
53
static bool fold_or(OptContext *ctx, TCGOp *op)
54
{
55
- return fold_const2(ctx, op);
56
+ if (fold_const2(ctx, op) ||
57
+ fold_xx_to_x(ctx, op)) {
58
+ return true;
59
+ }
60
+ return false;
61
}
62
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
}
67
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
69
- switch (opc) {
70
- CASE_OP_32_64_VEC(or):
71
- CASE_OP_32_64_VEC(and):
72
- if (args_are_copies(op->args[1], op->args[2])) {
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
74
- continue;
75
- }
76
- break;
77
- default:
78
- break;
79
- }
80
-
81
/*
82
* Process each opcode.
83
* Sorted alphabetically by opcode as much as possible.
84
--
85
2.25.1
86
87
diff view generated by jsdifflib
Deleted patch
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
3
1
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
9
1 file changed, 20 insertions(+), 18 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
16
return false;
17
}
18
19
+/* If the binary operation has second argument @i, fold to @i. */
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
21
+{
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
28
/* If the binary operation has both arguments equal, fold to @i. */
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
30
{
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
32
static bool fold_and(OptContext *ctx, TCGOp *op)
33
{
34
if (fold_const2(ctx, op) ||
35
+ fold_xi_to_i(ctx, op, 0) ||
36
fold_xx_to_x(ctx, op)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
40
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
42
{
43
- return fold_const2(ctx, op);
44
+ if (fold_const2(ctx, op) ||
45
+ fold_xi_to_i(ctx, op, 0)) {
46
+ return true;
47
+ }
48
+ return false;
49
}
50
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
52
{
53
- return fold_const2(ctx, op);
54
+ if (fold_const2(ctx, op) ||
55
+ fold_xi_to_i(ctx, op, 0)) {
56
+ return true;
57
+ }
58
+ return false;
59
}
60
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
continue;
64
}
65
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
67
- switch (opc) {
68
- CASE_OP_32_64_VEC(and):
69
- CASE_OP_32_64_VEC(mul):
70
- CASE_OP_32_64(muluh):
71
- CASE_OP_32_64(mulsh):
72
- if (arg_is_const(op->args[2])
73
- && arg_info(op->args[2])->val == 0) {
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
75
- continue;
76
- }
77
- break;
78
- default:
79
- break;
80
- }
81
-
82
/*
83
* Process each opcode.
84
* Sorted alphabetically by opcode as much as possible.
85
--
86
2.25.1
87
88
diff view generated by jsdifflib
Deleted patch
1
Compute the type of the operation early.
2
1
3
There are at least 4 places that used a def->flags ladder
4
to determine the type of the operation being optimized.
5
6
There were two places that assumed !TCG_OPF_64BIT means
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
results for vector operations.
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
14
1 file changed, 89 insertions(+), 60 deletions(-)
15
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/optimize.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
22
/* In flight values from optimization. */
23
uint64_t z_mask;
24
+ TCGType type;
25
} OptContext;
26
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
- const TCGOpDef *def;
33
TempOptInfo *di;
34
TempOptInfo *si;
35
uint64_t z_mask;
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
37
reset_ts(dst_ts);
38
di = ts_info(dst_ts);
39
si = ts_info(src_ts);
40
- def = &tcg_op_defs[op->opc];
41
- if (def->flags & TCG_OPF_VECTOR) {
42
- new_op = INDEX_op_mov_vec;
43
- } else if (def->flags & TCG_OPF_64BIT) {
44
- new_op = INDEX_op_mov_i64;
45
- } else {
46
+
47
+ switch (ctx->type) {
48
+ case TCG_TYPE_I32:
49
new_op = INDEX_op_mov_i32;
50
+ break;
51
+ case TCG_TYPE_I64:
52
+ new_op = INDEX_op_mov_i64;
53
+ break;
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
op->opc = new_op;
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
87
+
88
init_ts_info(ctx, tv);
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
90
}
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
92
}
93
}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
131
}
132
} else if (args_are_copies(x, y)) {
133
return do_constant_folding_cond_eq(c);
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
135
uint64_t t;
136
137
t = arg_info(op->args[1])->val;
138
- t = do_constant_folding(op->opc, t, 0);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
172
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
198
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
200
{
201
- TCGOpcode opc = op->opc;
202
TCGCond cond = op->args[5];
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
210
uint64_t tv = arg_info(op->args[3])->val;
211
uint64_t fv = arg_info(op->args[4])->val;
212
+ TCGOpcode opc;
213
214
- opc = (opc == INDEX_op_movcond_i32
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
216
+ switch (ctx->type) {
217
+ case TCG_TYPE_I32:
218
+ opc = INDEX_op_setcond_i32;
219
+ break;
220
+ case TCG_TYPE_I64:
221
+ opc = INDEX_op_setcond_i64;
222
+ break;
223
+ default:
224
+ g_assert_not_reached();
225
+ }
226
227
if (tv == 1 && fv == 0) {
228
op->opc = opc;
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
249
}
250
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
268
+
269
/* For commutative operations make constant second argument */
270
switch (opc) {
271
CASE_OP_32_64_VEC(add):
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
273
/* Proceed with possible constant folding. */
274
break;
275
}
276
- if (opc == INDEX_op_sub_i32) {
277
+ switch (ctx.type) {
278
+ case TCG_TYPE_I32:
279
neg_op = INDEX_op_neg_i32;
280
have_neg = TCG_TARGET_HAS_neg_i32;
281
- } else if (opc == INDEX_op_sub_i64) {
282
+ break;
283
+ case TCG_TYPE_I64:
284
neg_op = INDEX_op_neg_i64;
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
345
--
346
2.25.1
347
348
diff view generated by jsdifflib
Deleted patch
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
and use them in the outer-most logical operations.
3
1
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
8
1 file changed, 26 insertions(+), 35 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
15
return false;
16
}
17
18
+/* If the binary operation has second argument @i, fold to identity. */
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+ }
24
+ return false;
25
+}
26
+
27
/* If the binary operation has second argument @i, fold to NOT. */
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
29
{
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
31
32
static bool fold_add(OptContext *ctx, TCGOp *op)
33
{
34
- return fold_const2(ctx, op);
35
+ if (fold_const2(ctx, op) ||
36
+ fold_xi_to_x(ctx, op, 0)) {
37
+ return true;
38
+ }
39
+ return false;
40
}
41
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
44
{
45
if (fold_const2(ctx, op) ||
46
fold_xi_to_i(ctx, op, 0) ||
47
+ fold_xi_to_x(ctx, op, -1) ||
48
fold_xx_to_x(ctx, op)) {
49
return true;
50
}
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
52
{
53
if (fold_const2(ctx, op) ||
54
fold_xx_to_i(ctx, op, 0) ||
55
+ fold_xi_to_x(ctx, op, 0) ||
56
fold_ix_to_not(ctx, op, -1)) {
57
return true;
58
}
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
61
{
62
if (fold_const2(ctx, op) ||
63
+ fold_xi_to_x(ctx, op, -1) ||
64
fold_xi_to_not(ctx, op, 0)) {
65
return true;
66
}
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
68
static bool fold_or(OptContext *ctx, TCGOp *op)
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
93
}
94
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
97
{
98
if (fold_const2(ctx, op) ||
99
fold_xx_to_i(ctx, op, 0) ||
100
+ fold_xi_to_x(ctx, op, 0) ||
101
fold_sub_to_neg(ctx, op)) {
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
105
{
106
if (fold_const2(ctx, op) ||
107
fold_xx_to_i(ctx, op, 0) ||
108
+ fold_xi_to_x(ctx, op, 0) ||
109
fold_xi_to_not(ctx, op, -1)) {
110
return true;
111
}
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
break;
114
}
115
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
117
- switch (opc) {
118
- CASE_OP_32_64_VEC(add):
119
- CASE_OP_32_64_VEC(sub):
120
- CASE_OP_32_64_VEC(or):
121
- CASE_OP_32_64_VEC(xor):
122
- CASE_OP_32_64_VEC(andc):
123
- CASE_OP_32_64(shl):
124
- CASE_OP_32_64(shr):
125
- CASE_OP_32_64(sar):
126
- CASE_OP_32_64(rotl):
127
- CASE_OP_32_64(rotr):
128
- if (!arg_is_const(op->args[1])
129
- && arg_is_const(op->args[2])
130
- && arg_info(op->args[2])->val == 0) {
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
132
- continue;
133
- }
134
- break;
135
- CASE_OP_32_64_VEC(and):
136
- CASE_OP_32_64_VEC(orc):
137
- CASE_OP_32_64(eqv):
138
- if (!arg_is_const(op->args[1])
139
- && arg_is_const(op->args[2])
140
- && arg_info(op->args[2])->val == -1) {
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
142
- continue;
143
- }
144
- break;
145
- default:
146
- break;
147
- }
148
-
149
/* Simplify using known-zero bits. Currently only ops with a single
150
output argument is supported. */
151
z_mask = -1;
152
--
153
2.25.1
154
155
diff view generated by jsdifflib