1
The following changes since commit 019fbfa4bcd2d3a835c241295e22ab2b5b56129b:
1
Changes since v1:
2
* Added QEMU_ERROR to wrap __attribute__((error)) -- patch 12.
2
3
3
Merge tag 'pull-misc-2025-04-24' of https://repo.or.cz/qemu/armbru into staging (2025-04-24 13:44:57 -0400)
4
5
r~
6
7
8
The following changes since commit 77f7c747193662edfadeeb3118d63eed0eac51a6:
9
10
Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2018-10-17' into staging (2018-10-18 13:40:19 +0100)
4
11
5
are available in the Git repository at:
12
are available in the Git repository at:
6
13
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20250425
14
https://github.com/rth7680/qemu.git tags/pull-tcg-20181018
8
15
9
for you to fetch changes up to 8038bbe0339fac90fa88970bf635cc9036cf6be9:
16
for you to fetch changes up to 403f290c0603f35f2d09c982bf5549b6d0803ec1:
10
17
11
tcg: Remove tcg_out_op (2025-04-25 13:18:24 -0700)
18
cputlb: read CPUTLBEntry.addr_write atomically (2018-10-18 19:46:53 -0700)
12
19
13
----------------------------------------------------------------
20
----------------------------------------------------------------
14
Convert TCG backend code generators to TCGOutOp structures,
21
Queued tcg patches.
15
decomposing the monolithic tcg_out_op functions.
16
22
17
----------------------------------------------------------------
23
----------------------------------------------------------------
18
Richard Henderson (159):
24
Emilio G. Cota (10):
19
tcg/loongarch64: Fix vec_val computation in tcg_target_const_match
25
tcg: access cpu->icount_decr.u16.high with atomics
20
tcg/loongarch64: Improve constraints for TCG_CT_CONST_VCMP
26
tcg: fix use of uninitialized variable under CONFIG_PROFILER
21
tcg/optimize: Introduce opt_insert_{before,after}
27
tcg: plug holes in struct TCGProfile
22
tcg: Add TCGType to tcg_op_insert_{after,before}
28
tcg: distribute tcg_time into TCG contexts
23
tcg: Add all_outop[]
29
target/alpha: remove tlb_flush from alpha_cpu_initfn
24
tcg: Use extract2 for cross-word 64-bit extract on 32-bit host
30
target/unicore32: remove tlb_flush from uc32_init_fn
25
tcg: Remove INDEX_op_ext{8,16,32}*
31
exec: introduce tlb_init
26
tcg: Merge INDEX_op_mov_{i32,i64}
32
cputlb: fix assert_cpu_is_self macro
27
tcg: Convert add to TCGOutOpBinary
33
cputlb: serialize tlb updates with env->tlb_lock
28
tcg: Merge INDEX_op_add_{i32,i64}
34
cputlb: read CPUTLBEntry.addr_write atomically
29
tcg: Convert and to TCGOutOpBinary
30
tcg: Merge INDEX_op_and_{i32,i64}
31
tcg/optimize: Fold andc with immediate to and
32
tcg/optimize: Emit add r,r,-1 in fold_setcond_tst_pow2
33
tcg: Convert andc to TCGOutOpBinary
34
tcg: Merge INDEX_op_andc_{i32,i64}
35
tcg: Convert or to TCGOutOpBinary
36
tcg: Merge INDEX_op_or_{i32,i64}
37
tcg/optimize: Fold orc with immediate to or
38
tcg: Convert orc to TCGOutOpBinary
39
tcg: Merge INDEX_op_orc_{i32,i64}
40
tcg: Convert xor to TCGOutOpBinary
41
tcg: Merge INDEX_op_xor_{i32,i64}
42
tcg/optimize: Fold eqv with immediate to xor
43
tcg: Convert eqv to TCGOutOpBinary
44
tcg: Merge INDEX_op_eqv_{i32,i64}
45
tcg: Convert nand to TCGOutOpBinary
46
tcg: Merge INDEX_op_nand_{i32,i64}
47
tcg/loongarch64: Do not accept constant argument to nor
48
tcg: Convert nor to TCGOutOpBinary
49
tcg: Merge INDEX_op_nor_{i32,i64}
50
tcg/arm: Fix constraints for sub
51
tcg: Convert sub to TCGOutOpSubtract
52
tcg: Merge INDEX_op_sub_{i32,i64}
53
tcg: Convert neg to TCGOutOpUnary
54
tcg: Merge INDEX_op_neg_{i32,i64}
55
tcg: Convert not to TCGOutOpUnary
56
tcg: Merge INDEX_op_not_{i32,i64}
57
tcg: Convert mul to TCGOutOpBinary
58
tcg: Merge INDEX_op_mul_{i32,i64}
59
tcg: Convert muluh to TCGOutOpBinary
60
tcg: Merge INDEX_op_muluh_{i32,i64}
61
tcg: Convert mulsh to TCGOutOpBinary
62
tcg: Merge INDEX_op_mulsh_{i32,i64}
63
tcg: Convert div to TCGOutOpBinary
64
tcg: Merge INDEX_op_div_{i32,i64}
65
tcg: Convert divu to TCGOutOpBinary
66
tcg: Merge INDEX_op_divu_{i32,i64}
67
tcg: Convert div2 to TCGOutOpDivRem
68
tcg: Merge INDEX_op_div2_{i32,i64}
69
tcg: Convert divu2 to TCGOutOpDivRem
70
tcg: Merge INDEX_op_divu2_{i32,i64}
71
tcg: Convert rem to TCGOutOpBinary
72
tcg: Merge INDEX_op_rem_{i32,i64}
73
tcg: Convert remu to TCGOutOpBinary
74
tcg: Merge INDEX_op_remu_{i32,i64}
75
tcg: Convert shl to TCGOutOpBinary
76
tcg: Merge INDEX_op_shl_{i32,i64}
77
tcg: Convert shr to TCGOutOpBinary
78
tcg: Merge INDEX_op_shr_{i32,i64}
79
tcg: Convert sar to TCGOutOpBinary
80
tcg: Merge INDEX_op_sar_{i32,i64}
81
tcg: Do not require both rotr and rotl from the backend
82
tcg: Convert rotl, rotr to TCGOutOpBinary
83
tcg: Merge INDEX_op_rot{l,r}_{i32,i64}
84
tcg: Convert clz to TCGOutOpBinary
85
tcg: Merge INDEX_op_clz_{i32,i64}
86
tcg: Convert ctz to TCGOutOpBinary
87
tcg: Merge INDEX_op_ctz_{i32,i64}
88
tcg: Convert ctpop to TCGOutOpUnary
89
tcg: Merge INDEX_op_ctpop_{i32,i64}
90
tcg: Convert muls2 to TCGOutOpMul2
91
tcg: Merge INDEX_op_muls2_{i32,i64}
92
tcg: Convert mulu2 to TCGOutOpMul2
93
tcg: Merge INDEX_op_mulu2_{i32,i64}
94
tcg/loongarch64: Support negsetcond
95
tcg/mips: Support negsetcond
96
tcg/tci: Support negsetcond
97
tcg: Remove TCG_TARGET_HAS_negsetcond_{i32,i64}
98
tcg: Convert setcond, negsetcond to TCGOutOpSetcond
99
tcg: Merge INDEX_op_{neg}setcond_{i32,i64}`
100
tcg: Convert brcond to TCGOutOpBrcond
101
tcg: Merge INDEX_op_brcond_{i32,i64}
102
tcg: Convert movcond to TCGOutOpMovcond
103
tcg: Merge INDEX_op_movcond_{i32,i64}
104
tcg/ppc: Drop fallback constant loading in tcg_out_cmp
105
tcg/arm: Expand arguments to tcg_out_cmp2
106
tcg/ppc: Expand arguments to tcg_out_cmp2
107
tcg: Convert brcond2_i32 to TCGOutOpBrcond2
108
tcg: Convert setcond2_i32 to TCGOutOpSetcond2
109
tcg: Convert bswap16 to TCGOutOpBswap
110
tcg: Merge INDEX_op_bswap16_{i32,i64}
111
tcg: Convert bswap32 to TCGOutOpBswap
112
tcg: Merge INDEX_op_bswap32_{i32,i64}
113
tcg: Convert bswap64 to TCGOutOpUnary
114
tcg: Rename INDEX_op_bswap64_i64 to INDEX_op_bswap64
115
tcg: Convert extract to TCGOutOpExtract
116
tcg: Merge INDEX_op_extract_{i32,i64}
117
tcg: Convert sextract to TCGOutOpExtract
118
tcg: Merge INDEX_op_sextract_{i32,i64}
119
tcg: Convert ext_i32_i64 to TCGOutOpUnary
120
tcg: Convert extu_i32_i64 to TCGOutOpUnary
121
tcg: Convert extrl_i64_i32 to TCGOutOpUnary
122
tcg: Convert extrh_i64_i32 to TCGOutOpUnary
123
tcg: Convert deposit to TCGOutOpDeposit
124
tcg/aarch64: Improve deposit
125
tcg: Merge INDEX_op_deposit_{i32,i64}
126
tcg: Convert extract2 to TCGOutOpExtract2
127
tcg: Merge INDEX_op_extract2_{i32,i64}
128
tcg: Expand fallback add2 with 32-bit operations
129
tcg: Expand fallback sub2 with 32-bit operations
130
tcg: Do not default add2/sub2_i32 for 32-bit hosts
131
tcg/mips: Drop support for add2/sub2
132
tcg/riscv: Drop support for add2/sub2
133
tcg: Move i into each for loop in liveness_pass_1
134
tcg: Sink def, nb_iargs, nb_oargs loads in liveness_pass_1
135
tcg: Add add/sub with carry opcodes and infrastructure
136
tcg: Add TCGOutOp structures for add/sub carry opcodes
137
tcg/optimize: Handle add/sub with carry opcodes
138
tcg/optimize: With two const operands, prefer 0 in arg1
139
tcg: Use add carry opcodes to expand add2
140
tcg: Use sub carry opcodes to expand sub2
141
tcg/i386: Honor carry_live in tcg_out_movi
142
tcg/i386: Implement add/sub carry opcodes
143
tcg/i386: Special case addci r, 0, 0
144
tcg: Add tcg_gen_addcio_{i32,i64,tl}
145
target/arm: Use tcg_gen_addcio_* for ADCS
146
target/hppa: Use tcg_gen_addcio_i64
147
target/microblaze: Use tcg_gen_addcio_i32
148
target/openrisc: Use tcg_gen_addcio_* for ADDC
149
target/ppc: Use tcg_gen_addcio_tl for ADD and SUBF
150
target/s390x: Use tcg_gen_addcio_i64 for op_addc64
151
target/sh4: Use tcg_gen_addcio_i32 for addc
152
target/sparc: Use tcg_gen_addcio_tl for gen_op_addcc_int
153
target/tricore: Use tcg_gen_addcio_i32 for gen_addc_CC
154
tcg/aarch64: Implement add/sub carry opcodes
155
tcg/arm: Implement add/sub carry opcodes
156
tcg/ppc: Implement add/sub carry opcodes
157
tcg/s390x: Honor carry_live in tcg_out_movi
158
tcg/s390x: Add TCG_CT_CONST_N32
159
tcg/s390x: Implement add/sub carry opcodes
160
tcg/s390x: Use ADD LOGICAL WITH SIGNED IMMEDIATE
161
tcg/sparc64: Hoist tcg_cond_to_bcond lookup out of tcg_out_movcc
162
tcg/sparc64: Implement add/sub carry opcodes
163
tcg/tci: Implement add/sub carry opcodes
164
tcg: Remove add2/sub2 opcodes
165
tcg: Formalize tcg_out_mb
166
tcg: Formalize tcg_out_br
167
tcg: Formalize tcg_out_goto_ptr
168
tcg: Convert ld to TCGOutOpLoad
169
tcg: Merge INDEX_op_ld*_{i32,i64}
170
tcg: Convert st to TCGOutOpStore
171
tcg: Merge INDEX_op_st*_{i32,i64}
172
tcg: Stash MemOp size in TCGOP_FLAGS
173
tcg: Remove INDEX_op_qemu_st8_*
174
tcg: Merge INDEX_op_{ld,st}_{i32,i64,i128}
175
tcg: Convert qemu_ld{2} to TCGOutOpLoad{2}
176
tcg: Convert qemu_st{2} to TCGOutOpLdSt{2}
177
tcg: Remove tcg_out_op
178
35
179
include/tcg/tcg-op-common.h | 4 +
36
Richard Henderson (11):
180
include/tcg/tcg-op.h | 2 +
37
tcg: Implement CPU_LOG_TB_NOCHAIN during expansion
181
include/tcg/tcg-opc.h | 212 ++--
38
tcg: Add tlb_index and tlb_entry helpers
182
include/tcg/tcg.h | 15 +-
39
tcg: Split CONFIG_ATOMIC128
183
tcg/aarch64/tcg-target-con-set.h | 5 +-
40
target/i386: Convert to HAVE_CMPXCHG128
184
tcg/aarch64/tcg-target-has.h | 57 -
41
target/arm: Convert to HAVE_CMPXCHG128
185
tcg/arm/tcg-target-con-set.h | 5 +-
42
target/arm: Check HAVE_CMPXCHG128 at translate time
186
tcg/arm/tcg-target-has.h | 27 -
43
target/ppc: Convert to HAVE_CMPXCHG128 and HAVE_ATOMIC128
187
tcg/i386/tcg-target-con-set.h | 4 +-
44
target/s390x: Convert to HAVE_CMPXCHG128 and HAVE_ATOMIC128
188
tcg/i386/tcg-target-con-str.h | 2 +-
45
target/s390x: Split do_cdsg, do_lpq, do_stpq
189
tcg/i386/tcg-target-has.h | 57 -
46
target/s390x: Skip wout, cout helpers if op helper does not return
190
tcg/loongarch64/tcg-target-con-set.h | 9 +-
47
target/s390x: Check HAVE_ATOMIC128 and HAVE_CMPXCHG128 at translate
191
tcg/loongarch64/tcg-target-con-str.h | 1 -
48
192
tcg/loongarch64/tcg-target-has.h | 60 --
49
accel/tcg/atomic_template.h | 20 +++-
193
tcg/mips/tcg-target-con-set.h | 15 +-
50
accel/tcg/softmmu_template.h | 64 +++++-----
194
tcg/mips/tcg-target-con-str.h | 1 -
51
include/exec/cpu-defs.h | 3 +
195
tcg/mips/tcg-target-has.h | 64 --
52
include/exec/cpu_ldst.h | 30 ++++-
196
tcg/ppc/tcg-target-con-set.h | 12 +-
53
include/exec/cpu_ldst_template.h | 25 ++--
197
tcg/ppc/tcg-target-con-str.h | 1 +
54
include/exec/exec-all.h | 8 ++
198
tcg/ppc/tcg-target-has.h | 59 -
55
include/qemu/atomic128.h | 153 ++++++++++++++++++++++++
199
tcg/riscv/tcg-target-con-set.h | 7 +-
56
include/qemu/compiler.h | 11 ++
200
tcg/riscv/tcg-target-con-str.h | 2 -
57
include/qemu/timer.h | 1 -
201
tcg/riscv/tcg-target-has.h | 61 --
58
target/ppc/helper.h | 2 +-
202
tcg/s390x/tcg-target-con-set.h | 7 +-
59
tcg/tcg.h | 20 ++--
203
tcg/s390x/tcg-target-con-str.h | 1 +
60
accel/tcg/cpu-exec.c | 2 +-
204
tcg/s390x/tcg-target-has.h | 57 -
61
accel/tcg/cputlb.c | 235 +++++++++++++++++++-----------------
205
tcg/sparc64/tcg-target-con-set.h | 9 +-
62
accel/tcg/tcg-all.c | 2 +-
206
tcg/sparc64/tcg-target-has.h | 59 -
63
accel/tcg/translate-all.c | 2 +-
207
tcg/tcg-has.h | 47 -
64
accel/tcg/user-exec.c | 5 +-
208
tcg/tcg-internal.h | 4 +-
65
cpus.c | 3 +-
209
tcg/tci/tcg-target-has.h | 59 -
66
exec.c | 1 +
210
target/arm/tcg/translate-a64.c | 10 +-
67
monitor.c | 13 +-
211
target/arm/tcg/translate-sve.c | 2 +-
68
qom/cpu.c | 2 +-
212
target/arm/tcg/translate.c | 17 +-
69
target/alpha/cpu.c | 1 -
213
target/hppa/translate.c | 17 +-
70
target/arm/helper-a64.c | 251 +++++++++++++++++++--------------------
214
target/microblaze/translate.c | 10 +-
71
target/arm/translate-a64.c | 38 +++---
215
target/openrisc/translate.c | 3 +-
72
target/i386/mem_helper.c | 9 +-
216
target/ppc/translate.c | 11 +-
73
target/ppc/mem_helper.c | 33 ++++-
217
target/s390x/tcg/translate.c | 6 +-
74
target/ppc/translate.c | 115 +++++++++---------
218
target/sh4/translate.c | 36 +-
75
target/s390x/mem_helper.c | 202 +++++++++++++++----------------
219
target/sparc/translate.c | 3 +-
76
target/s390x/translate.c | 45 +++++--
220
target/tricore/translate.c | 12 +-
77
target/unicore32/cpu.c | 2 -
221
tcg/optimize.c | 1080 +++++++++++--------
78
tcg/tcg-op.c | 9 +-
222
tcg/tcg-op-ldst.c | 74 +-
79
tcg/tcg.c | 25 +++-
223
tcg/tcg-op.c | 1242 ++++++++++-----------
80
configure | 19 +++
224
tcg/tcg.c | 1313 +++++++++++++++-------
81
32 files changed, 839 insertions(+), 512 deletions(-)
225
tcg/tci.c | 766 +++++--------
82
create mode 100644 include/qemu/atomic128.h
226
docs/devel/tcg-ops.rst | 228 ++--
83
227
target/i386/tcg/emit.c.inc | 12 +-
228
tcg/aarch64/tcg-target.c.inc | 1626 ++++++++++++++++------------
229
tcg/arm/tcg-target.c.inc | 1556 ++++++++++++++++----------
230
tcg/i386/tcg-target.c.inc | 1850 ++++++++++++++++++-------------
231
tcg/loongarch64/tcg-target.c.inc | 1473 ++++++++++++++-----------
232
tcg/mips/tcg-target.c.inc | 1703 ++++++++++++++++-------------
233
tcg/ppc/tcg-target.c.inc | 1978 +++++++++++++++++++---------------
234
tcg/riscv/tcg-target.c.inc | 1375 ++++++++++++-----------
235
tcg/s390x/tcg-target.c.inc | 1945 ++++++++++++++++++---------------
236
tcg/sparc64/tcg-target.c.inc | 1306 ++++++++++++++--------
237
tcg/tci/tcg-target-opc.h.inc | 11 +
238
tcg/tci/tcg-target.c.inc | 1175 +++++++++++++-------
239
60 files changed, 12156 insertions(+), 9609 deletions(-)
diff view generated by jsdifflib
Deleted patch
1
Only use vece for a vector constant. This avoids an assertion
2
failure in sextract64 when vece contains garbage.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/loongarch64/tcg-target.c.inc | 14 ++++++++------
8
1 file changed, 8 insertions(+), 6 deletions(-)
9
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/loongarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
15
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
16
return true;
17
}
18
- int64_t vec_val = sextract64(val, 0, 8 << vece);
19
- if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
20
- return true;
21
- }
22
- if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
23
- return true;
24
+ if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) {
25
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
26
+ if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
27
+ return true;
28
+ }
29
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
30
+ return true;
31
+ }
32
}
33
return false;
34
}
35
--
36
2.43.0
37
38
diff view generated by jsdifflib
Deleted patch
1
Use the TCGCond given to tcg_target_const_match to exactly match
2
the supported constant. Adjust the code generation to assume this
3
has been done -- recall that encode_*_insn contain assertions that
4
the constants are valid.
5
1
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/loongarch64/tcg-target.c.inc | 38 ++++++++++++++++++--------------
10
1 file changed, 21 insertions(+), 17 deletions(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.c.inc
15
+++ b/tcg/loongarch64/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
17
}
18
if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) {
19
int64_t vec_val = sextract64(val, 0, 8 << vece);
20
- if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
21
- return true;
22
+ if (ct & TCG_CT_CONST_VCMP) {
23
+ switch (cond) {
24
+ case TCG_COND_EQ:
25
+ case TCG_COND_LE:
26
+ case TCG_COND_LT:
27
+ return -0x10 <= vec_val && vec_val <= 0x0f;
28
+ case TCG_COND_LEU:
29
+ case TCG_COND_LTU:
30
+ return 0x00 <= vec_val && vec_val <= 0x1f;
31
+ default:
32
+ return false;
33
+ }
34
}
35
if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
36
return true;
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
38
* Try vseqi/vslei/vslti
39
*/
40
int64_t value = sextract64(a2, 0, 8 << vece);
41
- if ((cond == TCG_COND_EQ ||
42
- cond == TCG_COND_LE ||
43
- cond == TCG_COND_LT) &&
44
- (-0x10 <= value && value <= 0x0f)) {
45
+ switch (cond) {
46
+ case TCG_COND_EQ:
47
+ case TCG_COND_LE:
48
+ case TCG_COND_LT:
49
insn = cmp_vec_imm_insn[cond][lasx][vece];
50
tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
51
break;
52
- } else if ((cond == TCG_COND_LEU ||
53
- cond == TCG_COND_LTU) &&
54
- (0x00 <= value && value <= 0x1f)) {
55
+ case TCG_COND_LEU:
56
+ case TCG_COND_LTU:
57
insn = cmp_vec_imm_insn[cond][lasx][vece];
58
tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
59
break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
-
64
- /*
65
- * Fallback to:
66
- * dupi_vec temp, a2
67
- * cmp_vec a0, a1, temp, cond
68
- */
69
- tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
70
- a2 = TCG_VEC_TMP0;
71
+ break;
72
}
73
74
insn = cmp_vec_insn[cond][lasx][vece];
75
--
76
2.43.0
77
78
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Rather than test NOCHAIN before linking, do not emit the
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
goto_tb opcode at all. We already do this for goto_ptr.
3
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
include/tcg/tcg-opc.h | 15 ++++----------
6
accel/tcg/cpu-exec.c | 2 +-
6
tcg/optimize.c | 28 +++++++------------------
7
tcg/tcg-op.c | 9 ++++++++-
7
tcg/tcg-op.c | 14 ++++++-------
8
2 files changed, 9 insertions(+), 2 deletions(-)
8
tcg/tcg.c | 45 +++++++++++++---------------------------
9
tcg/tci.c | 36 +++++++++-----------------------
10
tcg/tci/tcg-target.c.inc | 20 +++++++-----------
11
6 files changed, 50 insertions(+), 108 deletions(-)
12
9
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
10
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
12
--- a/accel/tcg/cpu-exec.c
16
+++ b/include/tcg/tcg-opc.h
13
+++ b/accel/tcg/cpu-exec.c
17
@@ -XXX,XX +XXX,XX @@ DEF(setcond, 1, 2, 1, TCG_OPF_INT)
14
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu,
18
DEF(sextract, 1, 1, 2, TCG_OPF_INT)
19
DEF(shl, 1, 2, 0, TCG_OPF_INT)
20
DEF(shr, 1, 2, 0, TCG_OPF_INT)
21
+DEF(st8, 0, 2, 1, TCG_OPF_INT)
22
+DEF(st16, 0, 2, 1, TCG_OPF_INT)
23
+DEF(st32, 0, 2, 1, TCG_OPF_INT)
24
+DEF(st, 0, 2, 1, TCG_OPF_INT)
25
DEF(sub, 1, 2, 0, TCG_OPF_INT)
26
DEF(xor, 1, 2, 0, TCG_OPF_INT)
27
28
@@ -XXX,XX +XXX,XX @@ DEF(subb1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
29
DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
30
DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
31
32
-/* load/store */
33
-DEF(st8_i32, 0, 2, 1, 0)
34
-DEF(st16_i32, 0, 2, 1, 0)
35
-DEF(st_i32, 0, 2, 1, 0)
36
-
37
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
38
DEF(setcond2_i32, 1, 4, 1, 0)
39
40
-/* load/store */
41
-DEF(st8_i64, 0, 2, 1, 0)
42
-DEF(st16_i64, 0, 2, 1, 0)
43
-DEF(st32_i64, 0, 2, 1, 0)
44
-DEF(st_i64, 0, 2, 1, 0)
45
-
46
/* size changing ops */
47
DEF(ext_i32_i64, 1, 1, 0, 0)
48
DEF(extu_i32_i64, 1, 1, 0, 0)
49
diff --git a/tcg/optimize.c b/tcg/optimize.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/optimize.c
52
+++ b/tcg/optimize.c
53
@@ -XXX,XX +XXX,XX @@
54
#include "tcg-internal.h"
55
#include "tcg-has.h"
56
57
-#define CASE_OP_32_64(x) \
58
- glue(glue(case INDEX_op_, x), _i32): \
59
- glue(glue(case INDEX_op_, x), _i64)
60
-
61
-#define CASE_OP_32_64_VEC(x) \
62
- glue(glue(case INDEX_op_, x), _i32): \
63
- glue(glue(case INDEX_op_, x), _i64): \
64
- glue(glue(case INDEX_op_, x), _vec)
65
66
typedef struct MemCopyInfo {
67
IntervalTreeNode itree;
68
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
69
}
15
}
70
16
#endif
71
switch (op->opc) {
17
/* See if we can patch the calling TB. */
72
- CASE_OP_32_64(st8):
18
- if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
73
+ case INDEX_op_st8:
19
+ if (last_tb) {
74
lm1 = 0;
20
tb_add_jump(last_tb, tb_exit, tb);
75
break;
21
}
76
- CASE_OP_32_64(st16):
22
return tb;
77
+ case INDEX_op_st16:
78
lm1 = 1;
79
break;
80
- case INDEX_op_st32_i64:
81
- case INDEX_op_st_i32:
82
+ case INDEX_op_st32:
83
lm1 = 3;
84
break;
85
- case INDEX_op_st_i64:
86
- lm1 = 7;
87
- break;
88
+ case INDEX_op_st:
89
case INDEX_op_st_vec:
90
lm1 = tcg_type_size(ctx->type) - 1;
91
break;
92
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
93
case INDEX_op_ld_vec:
94
done = fold_tcg_ld_memcopy(&ctx, op);
95
break;
96
- CASE_OP_32_64(st8):
97
- CASE_OP_32_64(st16):
98
- case INDEX_op_st32_i64:
99
+ case INDEX_op_st8:
100
+ case INDEX_op_st16:
101
+ case INDEX_op_st32:
102
done = fold_tcg_st(&ctx, op);
103
break;
104
- case INDEX_op_st_i32:
105
- case INDEX_op_st_i64:
106
+ case INDEX_op_st:
107
case INDEX_op_st_vec:
108
done = fold_tcg_st_memcopy(&ctx, op);
109
break;
110
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
23
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
111
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/tcg-op.c
25
--- a/tcg/tcg-op.c
113
+++ b/tcg/tcg-op.c
26
+++ b/tcg/tcg-op.c
114
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
27
@@ -XXX,XX +XXX,XX @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
115
28
seen this numbered exit before, via tcg_gen_goto_tb. */
116
void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
29
tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx));
117
{
30
#endif
118
- tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
31
+ /* When not chaining, exit without indicating a link. */
119
+ tcg_gen_ldst_op_i32(INDEX_op_st8, arg1, arg2, offset);
32
+ if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
33
+ val = 0;
34
+ }
35
} else {
36
/* This is an exit via the exitreq label. */
37
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
38
@@ -XXX,XX +XXX,XX @@ void tcg_gen_goto_tb(unsigned idx)
39
tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0);
40
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
41
#endif
42
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
43
+ /* When not chaining, we simply fall through to the "fallback" exit. */
44
+ if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
45
+ tcg_gen_op1i(INDEX_op_goto_tb, idx);
46
+ }
120
}
47
}
121
48
122
void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
49
void tcg_gen_lookup_and_goto_ptr(void)
123
{
124
- tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
125
+ tcg_gen_ldst_op_i32(INDEX_op_st16, arg1, arg2, offset);
126
}
127
128
void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
129
{
130
- tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
131
+ tcg_gen_ldst_op_i32(INDEX_op_st, arg1, arg2, offset);
132
}
133
134
135
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
136
void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
137
{
138
if (TCG_TARGET_REG_BITS == 64) {
139
- tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset);
140
+ tcg_gen_ldst_op_i64(INDEX_op_st8, arg1, arg2, offset);
141
} else {
142
tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset);
143
}
144
@@ -XXX,XX +XXX,XX @@ void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
145
void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
146
{
147
if (TCG_TARGET_REG_BITS == 64) {
148
- tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset);
149
+ tcg_gen_ldst_op_i64(INDEX_op_st16, arg1, arg2, offset);
150
} else {
151
tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset);
152
}
153
@@ -XXX,XX +XXX,XX @@ void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
154
void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
155
{
156
if (TCG_TARGET_REG_BITS == 64) {
157
- tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
158
+ tcg_gen_ldst_op_i64(INDEX_op_st32, arg1, arg2, offset);
159
} else {
160
tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset);
161
}
162
@@ -XXX,XX +XXX,XX @@ void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
163
void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
164
{
165
if (TCG_TARGET_REG_BITS == 64) {
166
- tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
167
+ tcg_gen_ldst_op_i64(INDEX_op_st, arg1, arg2, offset);
168
} else if (HOST_BIG_ENDIAN) {
169
tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
170
tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
171
diff --git a/tcg/tcg.c b/tcg/tcg.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/tcg/tcg.c
174
+++ b/tcg/tcg.c
175
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
176
OUTOP(INDEX_op_sextract, TCGOutOpExtract, outop_sextract),
177
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
178
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
179
- OUTOP(INDEX_op_st_i32, TCGOutOpStore, outop_st),
180
- OUTOP(INDEX_op_st_i64, TCGOutOpStore, outop_st),
181
- OUTOP(INDEX_op_st8_i32, TCGOutOpStore, outop_st8),
182
- OUTOP(INDEX_op_st8_i64, TCGOutOpStore, outop_st8),
183
- OUTOP(INDEX_op_st16_i32, TCGOutOpStore, outop_st16),
184
- OUTOP(INDEX_op_st16_i64, TCGOutOpStore, outop_st16),
185
+ OUTOP(INDEX_op_st, TCGOutOpStore, outop_st),
186
+ OUTOP(INDEX_op_st8, TCGOutOpStore, outop_st8),
187
+ OUTOP(INDEX_op_st16, TCGOutOpStore, outop_st16),
188
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
189
OUTOP(INDEX_op_subbi, TCGOutOpAddSubCarry, outop_subbi),
190
OUTOP(INDEX_op_subbio, TCGOutOpAddSubCarry, outop_subbio),
191
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
192
OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
193
OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u),
194
OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s),
195
- OUTOP(INDEX_op_st32_i64, TCGOutOpStore, outop_st),
196
+ OUTOP(INDEX_op_st32, TCGOutOpStore, outop_st),
197
#endif
198
};
199
200
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
201
case INDEX_op_or:
202
case INDEX_op_setcond:
203
case INDEX_op_sextract:
204
+ case INDEX_op_st8:
205
+ case INDEX_op_st16:
206
+ case INDEX_op_st:
207
case INDEX_op_xor:
208
return has_type;
209
210
- case INDEX_op_st8_i32:
211
- case INDEX_op_st16_i32:
212
- case INDEX_op_st_i32:
213
- return true;
214
-
215
case INDEX_op_brcond2_i32:
216
case INDEX_op_setcond2_i32:
217
return TCG_TARGET_REG_BITS == 32;
218
219
case INDEX_op_ld32u:
220
case INDEX_op_ld32s:
221
- case INDEX_op_st8_i64:
222
- case INDEX_op_st16_i64:
223
- case INDEX_op_st32_i64:
224
- case INDEX_op_st_i64:
225
+ case INDEX_op_st32:
226
case INDEX_op_ext_i32_i64:
227
case INDEX_op_extu_i32_i64:
228
case INDEX_op_extrl_i64_i32:
229
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
230
arg_ts->state = 0;
231
232
if (NEED_SYNC_ARG(0)) {
233
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
234
- ? INDEX_op_st_i32
235
- : INDEX_op_st_i64);
236
- TCGOp *sop = tcg_op_insert_after(s, op, sopc,
237
+ TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st,
238
arg_ts->type, 3);
239
TCGTemp *out_ts = dir_ts;
240
241
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
242
243
/* Sync outputs upon their last write. */
244
if (NEED_SYNC_ARG(i)) {
245
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
246
- ? INDEX_op_st_i32
247
- : INDEX_op_st_i64);
248
- TCGOp *sop = tcg_op_insert_after(s, op, sopc,
249
+ TCGOp *sop = tcg_op_insert_after(s, op, INDEX_op_st,
250
arg_ts->type, 3);
251
252
sop->args[0] = temp_arg(dir_ts);
253
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
254
}
255
break;
256
257
- case INDEX_op_st32_i64:
258
+ case INDEX_op_st32:
259
/* Use tcg_op_st w/ I32. */
260
type = TCG_TYPE_I32;
261
/* fall through */
262
- case INDEX_op_st_i32:
263
- case INDEX_op_st_i64:
264
- case INDEX_op_st8_i32:
265
- case INDEX_op_st8_i64:
266
- case INDEX_op_st16_i32:
267
- case INDEX_op_st16_i64:
268
+ case INDEX_op_st:
269
+ case INDEX_op_st8:
270
+ case INDEX_op_st16:
271
{
272
const TCGOutOpStore *out =
273
container_of(all_outop[op->opc], TCGOutOpStore, base);
274
diff --git a/tcg/tci.c b/tcg/tci.c
275
index XXXXXXX..XXXXXXX 100644
276
--- a/tcg/tci.c
277
+++ b/tcg/tci.c
278
@@ -XXX,XX +XXX,XX @@ static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val,
279
}
280
}
281
282
-#if TCG_TARGET_REG_BITS == 64
283
-# define CASE_32_64(x) \
284
- case glue(glue(INDEX_op_, x), _i64): \
285
- case glue(glue(INDEX_op_, x), _i32):
286
-# define CASE_64(x) \
287
- case glue(glue(INDEX_op_, x), _i64):
288
-#else
289
-# define CASE_32_64(x) \
290
- case glue(glue(INDEX_op_, x), _i32):
291
-# define CASE_64(x)
292
-#endif
293
-
294
/* Interpret pseudo code in tb. */
295
/*
296
* Disable CFI checks.
297
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
298
ptr = (void *)(regs[r1] + ofs);
299
regs[r0] = *(tcg_target_ulong *)ptr;
300
break;
301
- CASE_32_64(st8)
302
+ case INDEX_op_st8:
303
tci_args_rrs(insn, &r0, &r1, &ofs);
304
ptr = (void *)(regs[r1] + ofs);
305
*(uint8_t *)ptr = regs[r0];
306
break;
307
- CASE_32_64(st16)
308
+ case INDEX_op_st16:
309
tci_args_rrs(insn, &r0, &r1, &ofs);
310
ptr = (void *)(regs[r1] + ofs);
311
*(uint16_t *)ptr = regs[r0];
312
break;
313
- case INDEX_op_st_i32:
314
- CASE_64(st32)
315
+ case INDEX_op_st:
316
tci_args_rrs(insn, &r0, &r1, &ofs);
317
ptr = (void *)(regs[r1] + ofs);
318
- *(uint32_t *)ptr = regs[r0];
319
+ *(tcg_target_ulong *)ptr = regs[r0];
320
break;
321
322
/* Arithmetic operations (mixed 32/64 bit). */
323
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
324
ptr = (void *)(regs[r1] + ofs);
325
regs[r0] = *(int32_t *)ptr;
326
break;
327
- case INDEX_op_st_i64:
328
+ case INDEX_op_st32:
329
tci_args_rrs(insn, &r0, &r1, &ofs);
330
ptr = (void *)(regs[r1] + ofs);
331
- *(uint64_t *)ptr = regs[r0];
332
+ *(uint32_t *)ptr = regs[r0];
333
break;
334
335
/* Arithmetic operations (64 bit). */
336
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
337
case INDEX_op_ld16s:
338
case INDEX_op_ld32u:
339
case INDEX_op_ld:
340
- case INDEX_op_st8_i32:
341
- case INDEX_op_st8_i64:
342
- case INDEX_op_st16_i32:
343
- case INDEX_op_st16_i64:
344
- case INDEX_op_st32_i64:
345
- case INDEX_op_st_i32:
346
- case INDEX_op_st_i64:
347
+ case INDEX_op_st8:
348
+ case INDEX_op_st16:
349
+ case INDEX_op_st32:
350
+ case INDEX_op_st:
351
tci_args_rrs(insn, &r0, &r1, &s2);
352
info->fprintf_func(info->stream, "%-12s %s, %s, %d",
353
op_name, str_r(r0), str_r(r1), s2);
354
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
355
index XXXXXXX..XXXXXXX 100644
356
--- a/tcg/tci/tcg-target.c.inc
357
+++ b/tcg/tci/tcg-target.c.inc
358
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
359
static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
360
TCGReg base, ptrdiff_t offset)
361
{
362
- tcg_out_ldst(s, INDEX_op_st8_i32, data, base, offset);
363
+ tcg_out_ldst(s, INDEX_op_st8, data, base, offset);
364
}
365
366
static const TCGOutOpStore outop_st8 = {
367
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st8 = {
368
static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
369
TCGReg base, ptrdiff_t offset)
370
{
371
- tcg_out_ldst(s, INDEX_op_st16_i32, data, base, offset);
372
+ tcg_out_ldst(s, INDEX_op_st16, data, base, offset);
373
}
374
375
static const TCGOutOpStore outop_st16 = {
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
377
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
378
intptr_t offset)
379
{
380
- switch (type) {
381
- case TCG_TYPE_I32:
382
- tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
383
- break;
384
-#if TCG_TARGET_REG_BITS == 64
385
- case TCG_TYPE_I64:
386
- tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
387
- break;
388
-#endif
389
- default:
390
- g_assert_not_reached();
391
+ TCGOpcode op = INDEX_op_st;
392
+
393
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
394
+ op = INDEX_op_st32;
395
}
396
+ tcg_out_ldst(s, op, val, base, offset);
397
}
398
399
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
400
--
50
--
401
2.43.0
51
2.17.2
402
52
403
53
diff view generated by jsdifflib
1
The i386 backend can now check TCGOP_FLAGS to select
1
From: "Emilio G. Cota" <cota@braap.org>
2
the correct set of constraints.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Consistently access u16.high with atomics to avoid
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
undefined behaviour in MTTCG.
5
6
Note that icount_decr.u16.low is only used in icount mode,
7
so regular accesses to it are OK.
8
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Emilio G. Cota <cota@braap.org>
11
Message-Id: <20181010144853.13005-2-cota@braap.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
13
---
8
include/tcg/tcg-opc.h | 4 ----
14
accel/tcg/tcg-all.c | 2 +-
9
tcg/aarch64/tcg-target-has.h | 1 -
15
accel/tcg/translate-all.c | 2 +-
10
tcg/arm/tcg-target-has.h | 1 -
16
qom/cpu.c | 2 +-
11
tcg/i386/tcg-target-con-str.h | 2 +-
17
3 files changed, 3 insertions(+), 3 deletions(-)
12
tcg/i386/tcg-target-has.h | 3 ---
13
tcg/loongarch64/tcg-target-has.h | 3 ---
14
tcg/mips/tcg-target-has.h | 1 -
15
tcg/ppc/tcg-target-has.h | 2 --
16
tcg/riscv/tcg-target-has.h | 1 -
17
tcg/s390x/tcg-target-has.h | 1 -
18
tcg/sparc64/tcg-target-has.h | 1 -
19
tcg/tci/tcg-target-has.h | 2 --
20
tcg/optimize.c | 1 -
21
tcg/tcg-op-ldst.c | 9 ++-------
22
tcg/tcg.c | 4 ----
23
docs/devel/tcg-ops.rst | 6 ------
24
tcg/i386/tcg-target.c.inc | 9 ++++-----
25
17 files changed, 7 insertions(+), 44 deletions(-)
26
18
27
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
19
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
28
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
29
--- a/include/tcg/tcg-opc.h
21
--- a/accel/tcg/tcg-all.c
30
+++ b/include/tcg/tcg-opc.h
22
+++ b/accel/tcg/tcg-all.c
31
@@ -XXX,XX +XXX,XX @@ DEF(qemu_ld_i64, DATA64_ARGS, 1, 1,
23
@@ -XXX,XX +XXX,XX @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
32
DEF(qemu_st_i64, 0, DATA64_ARGS + 1, 1,
24
if (!qemu_cpu_is_self(cpu)) {
33
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
25
qemu_cpu_kick(cpu);
34
26
} else {
35
-/* Only used by i386 to cope with stupid register constraints. */
27
- cpu->icount_decr.u16.high = -1;
36
-DEF(qemu_st8_i32, 0, 1 + 1, 1,
28
+ atomic_set(&cpu->icount_decr.u16.high, -1);
37
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
29
if (use_icount &&
38
-
30
!cpu->can_do_io
39
/* Only for 64-bit hosts at the moment. */
31
&& (mask & ~old_mask) != 0) {
40
DEF(qemu_ld_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
32
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
41
DEF(qemu_st_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
42
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
43
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/aarch64/tcg-target-has.h
34
--- a/accel/tcg/translate-all.c
45
+++ b/tcg/aarch64/tcg-target-has.h
35
+++ b/accel/tcg/translate-all.c
46
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@ void cpu_interrupt(CPUState *cpu, int mask)
47
37
{
48
/* optional instructions */
38
g_assert(qemu_mutex_iothread_locked());
49
#define TCG_TARGET_HAS_extr_i64_i32 0
39
cpu->interrupt_request |= mask;
50
-#define TCG_TARGET_HAS_qemu_st8_i32 0
40
- cpu->icount_decr.u16.high = -1;
41
+ atomic_set(&cpu->icount_decr.u16.high, -1);
42
}
51
43
52
/*
44
/*
53
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
45
diff --git a/qom/cpu.c b/qom/cpu.c
54
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
55
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/arm/tcg-target-has.h
47
--- a/qom/cpu.c
57
+++ b/tcg/arm/tcg-target-has.h
48
+++ b/qom/cpu.c
58
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
49
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(CPUState *cpu)
59
#endif
50
cpu->mem_io_pc = 0;
60
51
cpu->mem_io_vaddr = 0;
61
/* optional instructions */
52
cpu->icount_extra = 0;
62
-#define TCG_TARGET_HAS_qemu_st8_i32 0
53
- cpu->icount_decr.u32 = 0;
63
#define TCG_TARGET_HAS_qemu_ldst_i128 0
54
+ atomic_set(&cpu->icount_decr.u32, 0);
64
#define TCG_TARGET_HAS_tst 1
55
cpu->can_do_io = 1;
65
56
cpu->exception_index = -1;
66
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
57
cpu->crash_occurred = false;
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/i386/tcg-target-con-str.h
69
+++ b/tcg/i386/tcg-target-con-str.h
70
@@ -XXX,XX +XXX,XX @@ REGS('r', ALL_GENERAL_REGS)
71
REGS('x', ALL_VECTOR_REGS)
72
REGS('q', ALL_BYTEL_REGS) /* regs that can be used as a byte operand */
73
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_ld/st */
74
-REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
75
+REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st MO_8 data */
76
77
/*
78
* Define constraint letters for constants:
79
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
80
index XXXXXXX..XXXXXXX 100644
81
--- a/tcg/i386/tcg-target-has.h
82
+++ b/tcg/i386/tcg-target-has.h
83
@@ -XXX,XX +XXX,XX @@
84
#if TCG_TARGET_REG_BITS == 64
85
/* Keep 32-bit values zero-extended in a register. */
86
#define TCG_TARGET_HAS_extr_i64_i32 1
87
-#define TCG_TARGET_HAS_qemu_st8_i32 0
88
-#else
89
-#define TCG_TARGET_HAS_qemu_st8_i32 1
90
#endif
91
92
#define TCG_TARGET_HAS_qemu_ldst_i128 \
93
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/loongarch64/tcg-target-has.h
96
+++ b/tcg/loongarch64/tcg-target-has.h
97
@@ -XXX,XX +XXX,XX @@
98
99
#include "host/cpuinfo.h"
100
101
-/* optional instructions */
102
-#define TCG_TARGET_HAS_qemu_st8_i32 0
103
-
104
/* 64-bit operations */
105
#define TCG_TARGET_HAS_extr_i64_i32 1
106
107
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
108
index XXXXXXX..XXXXXXX 100644
109
--- a/tcg/mips/tcg-target-has.h
110
+++ b/tcg/mips/tcg-target-has.h
111
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
112
#endif
113
114
/* optional instructions detected at runtime */
115
-#define TCG_TARGET_HAS_qemu_st8_i32 0
116
#define TCG_TARGET_HAS_qemu_ldst_i128 0
117
#define TCG_TARGET_HAS_tst 0
118
119
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
120
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/ppc/tcg-target-has.h
122
+++ b/tcg/ppc/tcg-target-has.h
123
@@ -XXX,XX +XXX,XX @@
124
#define have_vsx (cpuinfo & CPUINFO_VSX)
125
126
/* optional instructions */
127
-#define TCG_TARGET_HAS_qemu_st8_i32 0
128
-
129
#if TCG_TARGET_REG_BITS == 64
130
#define TCG_TARGET_HAS_extr_i64_i32 0
131
#endif
132
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tcg/riscv/tcg-target-has.h
135
+++ b/tcg/riscv/tcg-target-has.h
136
@@ -XXX,XX +XXX,XX @@
137
#include "host/cpuinfo.h"
138
139
/* optional instructions */
140
-#define TCG_TARGET_HAS_qemu_st8_i32 0
141
#define TCG_TARGET_HAS_extr_i64_i32 1
142
#define TCG_TARGET_HAS_qemu_ldst_i128 0
143
#define TCG_TARGET_HAS_tst 0
144
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/s390x/tcg-target-has.h
147
+++ b/tcg/s390x/tcg-target-has.h
148
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
149
150
/* optional instructions */
151
#define TCG_TARGET_HAS_extr_i64_i32 0
152
-#define TCG_TARGET_HAS_qemu_st8_i32 0
153
#define TCG_TARGET_HAS_qemu_ldst_i128 1
154
#define TCG_TARGET_HAS_tst 1
155
156
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/sparc64/tcg-target-has.h
159
+++ b/tcg/sparc64/tcg-target-has.h
160
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
161
#endif
162
163
/* optional instructions */
164
-#define TCG_TARGET_HAS_qemu_st8_i32 0
165
#define TCG_TARGET_HAS_extr_i64_i32 0
166
#define TCG_TARGET_HAS_qemu_ldst_i128 0
167
#define TCG_TARGET_HAS_tst 1
168
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/tci/tcg-target-has.h
171
+++ b/tcg/tci/tcg-target-has.h
172
@@ -XXX,XX +XXX,XX @@
173
#ifndef TCG_TARGET_HAS_H
174
#define TCG_TARGET_HAS_H
175
176
-#define TCG_TARGET_HAS_qemu_st8_i32 0
177
-
178
#if TCG_TARGET_REG_BITS == 64
179
#define TCG_TARGET_HAS_extr_i64_i32 0
180
#endif /* TCG_TARGET_REG_BITS == 64 */
181
diff --git a/tcg/optimize.c b/tcg/optimize.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/tcg/optimize.c
184
+++ b/tcg/optimize.c
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
case INDEX_op_qemu_ld_i128:
187
done = fold_qemu_ld_2reg(&ctx, op);
188
break;
189
- case INDEX_op_qemu_st8_i32:
190
case INDEX_op_qemu_st_i32:
191
case INDEX_op_qemu_st_i64:
192
case INDEX_op_qemu_st_i128:
193
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tcg/tcg-op-ldst.c
196
+++ b/tcg/tcg-op-ldst.c
197
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
198
{
199
TCGv_i32 swap = NULL;
200
MemOpIdx orig_oi, oi;
201
- TCGOpcode opc;
202
203
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
204
memop = tcg_canonicalize_memop(memop, 0, 1);
205
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
206
oi = make_memop_idx(memop, idx);
207
}
208
209
- if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
210
- opc = INDEX_op_qemu_st8_i32;
211
- } else {
212
- opc = INDEX_op_qemu_st_i32;
213
- }
214
- gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
215
+ gen_ldst(INDEX_op_qemu_st_i32, TCG_TYPE_I32,
216
+ tcgv_i32_temp(val), NULL, addr, oi);
217
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
218
219
if (swap) {
220
diff --git a/tcg/tcg.c b/tcg/tcg.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/tcg/tcg.c
223
+++ b/tcg/tcg.c
224
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
225
case INDEX_op_qemu_st_i64:
226
return true;
227
228
- case INDEX_op_qemu_st8_i32:
229
- return TCG_TARGET_HAS_qemu_st8_i32;
230
-
231
case INDEX_op_qemu_ld_i128:
232
case INDEX_op_qemu_st_i128:
233
return TCG_TARGET_HAS_qemu_ldst_i128;
234
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
235
break;
236
case INDEX_op_qemu_ld_i32:
237
case INDEX_op_qemu_st_i32:
238
- case INDEX_op_qemu_st8_i32:
239
case INDEX_op_qemu_ld_i64:
240
case INDEX_op_qemu_st_i64:
241
case INDEX_op_qemu_ld_i128:
242
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
243
index XXXXXXX..XXXXXXX 100644
244
--- a/docs/devel/tcg-ops.rst
245
+++ b/docs/devel/tcg-ops.rst
246
@@ -XXX,XX +XXX,XX @@ QEMU specific operations
247
248
qemu_st_i32/i64/i128 *t0*, *t1*, *flags*, *memidx*
249
250
- qemu_st8_i32 *t0*, *t1*, *flags*, *memidx*
251
-
252
- | Load data at the guest address *t1* into *t0*, or store data in *t0* at guest
253
address *t1*. The _i32/_i64/_i128 size applies to the size of the input/output
254
register *t0* only. The address *t1* is always sized according to the guest,
255
@@ -XXX,XX +XXX,XX @@ QEMU specific operations
256
64-bit memory access specified in *flags*.
257
|
258
| For qemu_ld/st_i128, these are only supported for a 64-bit host.
259
- |
260
- | For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of
261
- the memory operation is known to be 8-bit. This allows the backend to
262
- provide a different set of register constraints.
263
264
265
Host vector operations
266
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
267
index XXXXXXX..XXXXXXX 100644
268
--- a/tcg/i386/tcg-target.c.inc
269
+++ b/tcg/i386/tcg-target.c.inc
270
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
271
272
switch (memop & MO_SIZE) {
273
case MO_8:
274
- /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
275
+ /* This is handled with constraints on INDEX_op_qemu_st_i32. */
276
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
277
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
278
datalo, h.base, h.index, 0, h.ofs);
279
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
280
break;
281
282
case INDEX_op_qemu_st_i32:
283
- case INDEX_op_qemu_st8_i32:
284
tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I32);
285
break;
286
case INDEX_op_qemu_st_i64:
287
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
288
return C_O1_I1(r, L);
289
290
case INDEX_op_qemu_st_i32:
291
- return C_O0_I2(L, L);
292
- case INDEX_op_qemu_st8_i32:
293
- return C_O0_I2(s, L);
294
+ return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
295
+ ? C_O0_I2(s, L)
296
+ : C_O0_I2(L, L));
297
298
case INDEX_op_qemu_ld_i64:
299
return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
300
--
58
--
301
2.43.0
59
2.17.2
302
60
303
61
diff view generated by jsdifflib
1
All integer opcodes are now converted to TCGOutOp.
1
From: "Emilio G. Cota" <cota@braap.org>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
We forgot to initialize n in commit 15fa08f845 ("tcg: Dynamically
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
allocate TCGOps", 2017-12-29).
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Emilio G. Cota <cota@braap.org>
8
Message-Id: <20181010144853.13005-3-cota@braap.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
tcg/tcg.c | 12 +++---------
11
tcg/tcg.c | 2 +-
8
tcg/aarch64/tcg-target.c.inc | 7 -------
12
1 file changed, 1 insertion(+), 1 deletion(-)
9
tcg/arm/tcg-target.c.inc | 7 -------
10
tcg/i386/tcg-target.c.inc | 7 -------
11
tcg/loongarch64/tcg-target.c.inc | 7 -------
12
tcg/mips/tcg-target.c.inc | 7 -------
13
tcg/ppc/tcg-target.c.inc | 7 -------
14
tcg/riscv/tcg-target.c.inc | 7 -------
15
tcg/s390x/tcg-target.c.inc | 7 -------
16
tcg/sparc64/tcg-target.c.inc | 7 -------
17
tcg/tci/tcg-target.c.inc | 7 -------
18
11 files changed, 3 insertions(+), 79 deletions(-)
19
13
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
14
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
16
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
17
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, unsigned bar);
18
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
25
static void tcg_out_br(TCGContext *s, TCGLabel *l);
19
26
static void tcg_out_set_carry(TCGContext *s);
20
#ifdef CONFIG_PROFILER
27
static void tcg_out_set_borrow(TCGContext *s);
21
{
28
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
22
- int n;
29
- const TCGArg args[TCG_MAX_OP_ARGS],
23
+ int n = 0;
30
- const int const_args[TCG_MAX_OP_ARGS]);
24
31
#if TCG_TARGET_MAYBE_vec
25
QTAILQ_FOREACH(op, &s->ops, link) {
32
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
26
n++;
33
TCGReg dst, TCGReg src);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
35
break;
36
37
default:
38
- if (def->flags & TCG_OPF_VECTOR) {
39
- tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
40
- TCGOP_VECE(op), new_args, const_args);
41
- } else {
42
- tcg_out_op(s, op->opc, type, new_args, const_args);
43
- }
44
+ tcg_debug_assert(def->flags & TCG_OPF_VECTOR);
45
+ tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
46
+ TCGOP_VECE(op), new_args, const_args);
47
break;
48
}
49
50
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/aarch64/tcg-target.c.inc
53
+++ b/tcg/aarch64/tcg-target.c.inc
54
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
55
.out_r = tcg_out_st,
56
};
57
58
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
59
- const TCGArg args[TCG_MAX_OP_ARGS],
60
- const int const_args[TCG_MAX_OP_ARGS])
61
-{
62
- g_assert_not_reached();
63
-}
64
-
65
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
66
unsigned vecl, unsigned vece,
67
const TCGArg args[TCG_MAX_OP_ARGS],
68
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/arm/tcg-target.c.inc
71
+++ b/tcg/arm/tcg-target.c.inc
72
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
73
.out_r = tcg_out_st,
74
};
75
76
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
77
- const TCGArg args[TCG_MAX_OP_ARGS],
78
- const int const_args[TCG_MAX_OP_ARGS])
79
-{
80
- g_assert_not_reached();
81
-}
82
-
83
static TCGConstraintSetIndex
84
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
85
{
86
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/i386/tcg-target.c.inc
89
+++ b/tcg/i386/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
91
.out_i = tgen_st_i,
92
};
93
94
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
95
- const TCGArg args[TCG_MAX_OP_ARGS],
96
- const int const_args[TCG_MAX_OP_ARGS])
97
-{
98
- g_assert_not_reached();
99
-}
100
-
101
static int const umin_insn[4] = {
102
OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
103
};
104
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/loongarch64/tcg-target.c.inc
107
+++ b/tcg/loongarch64/tcg-target.c.inc
108
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
109
.out_r = tcg_out_st,
110
};
111
112
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
113
- const TCGArg args[TCG_MAX_OP_ARGS],
114
- const int const_args[TCG_MAX_OP_ARGS])
115
-{
116
- g_assert_not_reached();
117
-}
118
-
119
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
120
TCGReg rd, TCGReg rs)
121
{
122
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/mips/tcg-target.c.inc
125
+++ b/tcg/mips/tcg-target.c.inc
126
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
127
};
128
129
130
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
131
- const TCGArg args[TCG_MAX_OP_ARGS],
132
- const int const_args[TCG_MAX_OP_ARGS])
133
-{
134
- g_assert_not_reached();
135
-}
136
-
137
static TCGConstraintSetIndex
138
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
139
{
140
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/ppc/tcg-target.c.inc
143
+++ b/tcg/ppc/tcg-target.c.inc
144
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
145
};
146
147
148
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
149
- const TCGArg args[TCG_MAX_OP_ARGS],
150
- const int const_args[TCG_MAX_OP_ARGS])
151
-{
152
- g_assert_not_reached();
153
-}
154
-
155
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
156
{
157
switch (opc) {
158
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/riscv/tcg-target.c.inc
161
+++ b/tcg/riscv/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
163
};
164
165
166
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
167
- const TCGArg args[TCG_MAX_OP_ARGS],
168
- const int const_args[TCG_MAX_OP_ARGS])
169
-{
170
- g_assert_not_reached();
171
-}
172
-
173
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
174
unsigned vecl, unsigned vece,
175
const TCGArg args[TCG_MAX_OP_ARGS],
176
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/s390x/tcg-target.c.inc
179
+++ b/tcg/s390x/tcg-target.c.inc
180
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
181
};
182
183
184
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
185
- const TCGArg args[TCG_MAX_OP_ARGS],
186
- const int const_args[TCG_MAX_OP_ARGS])
187
-{
188
- g_assert_not_reached();
189
-}
190
-
191
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
192
TCGReg dst, TCGReg src)
193
{
194
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/sparc64/tcg-target.c.inc
197
+++ b/tcg/sparc64/tcg-target.c.inc
198
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
199
};
200
201
202
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
203
- const TCGArg args[TCG_MAX_OP_ARGS],
204
- const int const_args[TCG_MAX_OP_ARGS])
205
-{
206
- g_assert_not_reached();
207
-}
208
-
209
static TCGConstraintSetIndex
210
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
211
{
212
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/tci/tcg-target.c.inc
215
+++ b/tcg/tci/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
217
TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_st2,
218
};
219
220
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
221
- const TCGArg args[TCG_MAX_OP_ARGS],
222
- const int const_args[TCG_MAX_OP_ARGS])
223
-{
224
- g_assert_not_reached();
225
-}
226
-
227
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
228
intptr_t offset)
229
{
230
--
27
--
231
2.43.0
28
2.17.2
232
29
233
30
diff view generated by jsdifflib
1
This will enable removing INDEX_op_qemu_st8_*_i32,
1
From: "Emilio G. Cota" <cota@braap.org>
2
by exposing the operand size to constraint selection.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
This plugs two 4-byte holes in 64-bit.
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Signed-off-by: Emilio G. Cota <cota@braap.org>
6
Message-Id: <20181010144853.13005-4-cota@braap.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/tcg-op-ldst.c | 8 ++++++--
9
tcg/tcg.h | 2 +-
9
1 file changed, 6 insertions(+), 2 deletions(-)
10
1 file changed, 1 insertion(+), 1 deletion(-)
10
11
11
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
12
diff --git a/tcg/tcg.h b/tcg/tcg.h
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tcg-op-ldst.c
14
--- a/tcg/tcg.h
14
+++ b/tcg/tcg-op-ldst.c
15
+++ b/tcg/tcg.h
15
@@ -XXX,XX +XXX,XX @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
16
@@ -XXX,XX +XXX,XX @@ typedef struct TCGProfile {
16
static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
17
int64_t tb_count;
17
TCGTemp *addr, MemOpIdx oi)
18
int64_t op_count; /* total insn count */
18
{
19
int op_count_max; /* max insn per TB */
19
+ TCGOp *op;
20
- int64_t temp_count;
20
+
21
int temp_count_max;
21
if (vh) {
22
+ int64_t temp_count;
22
- tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
23
int64_t del_op_count;
23
+ op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
24
int64_t code_in_len;
24
+ temp_arg(addr), oi);
25
int64_t code_out_len;
25
} else {
26
- tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
27
+ op = tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
28
}
29
+ TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
30
}
31
32
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
33
--
26
--
34
2.43.0
27
2.17.2
35
28
36
29
diff view generated by jsdifflib
1
Most tcg backends already have a function for this;
1
From: "Emilio G. Cota" <cota@braap.org>
2
the rest can split one out from tcg_out_op.
3
Call it directly from tcg_gen_code.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
When we implemented per-vCPU TCG contexts, we forgot to also
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
distribute the tcg_time counter, which has remained as a global
5
accessed without any serialization, leading to potentially missed
6
counts.
7
8
Fix it by distributing the field over the TCG contexts, embedding
9
it into TCGProfile with a field called "cpu_exec_time", which is more
10
descriptive than "tcg_time". Add a function to query this value
11
directly, and for completeness, fill in the field in
12
tcg_profile_snapshot, even though its callers do not use it.
13
14
Signed-off-by: Emilio G. Cota <cota@braap.org>
15
Message-Id: <20181010144853.13005-5-cota@braap.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
17
---
9
tcg/tcg.c | 4 ++++
18
include/qemu/timer.h | 1 -
10
tcg/aarch64/tcg-target.c.inc | 6 +-----
19
tcg/tcg.h | 2 ++
11
tcg/arm/tcg-target.c.inc | 6 +-----
20
cpus.c | 3 ++-
12
tcg/i386/tcg-target.c.inc | 5 +----
21
monitor.c | 13 ++++++++++---
13
tcg/loongarch64/tcg-target.c.inc | 6 +-----
22
tcg/tcg.c | 23 +++++++++++++++++++++++
14
tcg/mips/tcg-target.c.inc | 5 +----
23
5 files changed, 37 insertions(+), 5 deletions(-)
15
tcg/ppc/tcg-target.c.inc | 6 +-----
16
tcg/riscv/tcg-target.c.inc | 6 +-----
17
tcg/s390x/tcg-target.c.inc | 20 +++++++++++---------
18
tcg/sparc64/tcg-target.c.inc | 6 +-----
19
tcg/tci/tcg-target.c.inc | 9 +++++----
20
11 files changed, 28 insertions(+), 51 deletions(-)
21
24
25
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/qemu/timer.h
28
+++ b/include/qemu/timer.h
29
@@ -XXX,XX +XXX,XX @@ static inline int64_t profile_getclock(void)
30
return get_clock();
31
}
32
33
-extern int64_t tcg_time;
34
extern int64_t dev_time;
35
#endif
36
37
diff --git a/tcg/tcg.h b/tcg/tcg.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/tcg.h
40
+++ b/tcg/tcg.h
41
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOp {
42
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
43
44
typedef struct TCGProfile {
45
+ int64_t cpu_exec_time;
46
int64_t tb_count1;
47
int64_t tb_count;
48
int64_t op_count; /* total insn count */
49
@@ -XXX,XX +XXX,XX @@ int tcg_check_temp_count(void);
50
#define tcg_check_temp_count() 0
51
#endif
52
53
+int64_t tcg_cpu_exec_time(void);
54
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
55
void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
56
57
diff --git a/cpus.c b/cpus.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/cpus.c
60
+++ b/cpus.c
61
@@ -XXX,XX +XXX,XX @@ static int tcg_cpu_exec(CPUState *cpu)
62
ret = cpu_exec(cpu);
63
cpu_exec_end(cpu);
64
#ifdef CONFIG_PROFILER
65
- tcg_time += profile_getclock() - ti;
66
+ atomic_set(&tcg_ctx->prof.cpu_exec_time,
67
+ tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
68
#endif
69
return ret;
70
}
71
diff --git a/monitor.c b/monitor.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/monitor.c
74
+++ b/monitor.c
75
@@ -XXX,XX +XXX,XX @@
76
#include "sysemu/cpus.h"
77
#include "sysemu/iothread.h"
78
#include "qemu/cutils.h"
79
+#include "tcg/tcg.h"
80
81
#if defined(TARGET_S390X)
82
#include "hw/s390x/storage-keys.h"
83
@@ -XXX,XX +XXX,XX @@ static void hmp_info_numa(Monitor *mon, const QDict *qdict)
84
85
#ifdef CONFIG_PROFILER
86
87
-int64_t tcg_time;
88
int64_t dev_time;
89
90
static void hmp_info_profile(Monitor *mon, const QDict *qdict)
91
{
92
+ static int64_t last_cpu_exec_time;
93
+ int64_t cpu_exec_time;
94
+ int64_t delta;
95
+
96
+ cpu_exec_time = tcg_cpu_exec_time();
97
+ delta = cpu_exec_time - last_cpu_exec_time;
98
+
99
monitor_printf(mon, "async time %" PRId64 " (%0.3f)\n",
100
dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
101
monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n",
102
- tcg_time, tcg_time / (double)NANOSECONDS_PER_SECOND);
103
- tcg_time = 0;
104
+ delta, delta / (double)NANOSECONDS_PER_SECOND);
105
+ last_cpu_exec_time = cpu_exec_time;
106
dev_time = 0;
107
}
108
#else
22
diff --git a/tcg/tcg.c b/tcg/tcg.c
109
diff --git a/tcg/tcg.c b/tcg/tcg.c
23
index XXXXXXX..XXXXXXX 100644
110
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/tcg.c
111
--- a/tcg/tcg.c
25
+++ b/tcg/tcg.c
112
+++ b/tcg/tcg.c
26
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
113
@@ -XXX,XX +XXX,XX @@
27
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
114
/* Define to jump the ELF file used to communicate with GDB. */
28
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
115
#undef DEBUG_JIT
29
static void tcg_out_goto_tb(TCGContext *s, int which);
116
30
+static void tcg_out_mb(TCGContext *s, unsigned bar);
117
+#include "qemu/error-report.h"
31
static void tcg_out_set_carry(TCGContext *s);
118
#include "qemu/cutils.h"
32
static void tcg_out_set_borrow(TCGContext *s);
119
#include "qemu/host-utils.h"
33
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
120
#include "qemu/timer.h"
34
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
121
@@ -XXX,XX +XXX,XX @@ void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
35
case INDEX_op_goto_tb:
122
const TCGProfile *orig = &s->prof;
36
tcg_out_goto_tb(s, op->args[0]);
123
37
break;
124
if (counters) {
38
+ case INDEX_op_mb:
125
+ PROF_ADD(prof, orig, cpu_exec_time);
39
+ tcg_out_mb(s, op->args[0]);
126
PROF_ADD(prof, orig, tb_count1);
40
+ break;
127
PROF_ADD(prof, orig, tb_count);
41
case INDEX_op_dup2_vec:
128
PROF_ADD(prof, orig, op_count);
42
if (tcg_reg_alloc_dup2(s, op)) {
129
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
43
break;
130
prof.table_op_count[i]);
44
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/aarch64/tcg-target.c.inc
47
+++ b/tcg/aarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
49
tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
50
}
51
52
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
53
+static void tcg_out_mb(TCGContext *s, unsigned a0)
54
{
55
static const uint32_t sync[] = {
56
[0 ... TCG_MO_ALL] = DMB_ISH | DMB_LD | DMB_ST,
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
58
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
59
break;
60
61
- case INDEX_op_mb:
62
- tcg_out_mb(s, a0);
63
- break;
64
-
65
case INDEX_op_call: /* Always emitted via tcg_out_call. */
66
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
67
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
68
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/arm/tcg-target.c.inc
71
+++ b/tcg/arm/tcg-target.c.inc
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
73
}
131
}
74
}
132
}
75
133
+
76
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
134
+int64_t tcg_cpu_exec_time(void)
77
+static void tcg_out_mb(TCGContext *s, unsigned a0)
135
+{
136
+ unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
137
+ unsigned int i;
138
+ int64_t ret = 0;
139
+
140
+ for (i = 0; i < n_ctxs; i++) {
141
+ const TCGContext *s = atomic_read(&tcg_ctxs[i]);
142
+ const TCGProfile *prof = &s->prof;
143
+
144
+ ret += atomic_read(&prof->cpu_exec_time);
145
+ }
146
+ return ret;
147
+}
148
#else
149
void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
78
{
150
{
79
if (use_armv7_instructions) {
151
cpu_fprintf(f, "[TCG profiler not compiled]\n");
80
tcg_out32(s, INSN_DMB_ISH);
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
82
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
83
break;
84
85
- case INDEX_op_mb:
86
- tcg_out_mb(s, args[0]);
87
- break;
88
-
89
case INDEX_op_call: /* Always emitted via tcg_out_call. */
90
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
91
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
92
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/i386/tcg-target.c.inc
95
+++ b/tcg/i386/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
97
}
98
}
152
}
99
153
+
100
-static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
154
+int64_t tcg_cpu_exec_time(void)
101
+static void tcg_out_mb(TCGContext *s, unsigned a0)
155
+{
102
{
156
+ error_report("%s: TCG profiler not compiled", __func__);
103
/* Given the strength of x86 memory ordering, we only need care for
157
+ exit(EXIT_FAILURE);
104
store-load ordering. Experimentally, "lock orl $0,0(%esp)" is
158
+}
105
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
106
break;
107
#endif
159
#endif
108
160
109
- case INDEX_op_mb:
161
110
- tcg_out_mb(s, a0);
111
- break;
112
case INDEX_op_call: /* Always emitted via tcg_out_call. */
113
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
114
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
115
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/loongarch64/tcg-target.c.inc
118
+++ b/tcg/loongarch64/tcg-target.c.inc
119
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
120
* TCG intrinsics
121
*/
122
123
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
124
+static void tcg_out_mb(TCGContext *s, unsigned a0)
125
{
126
/* Baseline LoongArch only has the full barrier, unfortunately. */
127
tcg_out_opc_dbar(s, 0);
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
129
TCGArg a3 = args[3];
130
131
switch (opc) {
132
- case INDEX_op_mb:
133
- tcg_out_mb(s, a0);
134
- break;
135
-
136
case INDEX_op_goto_ptr:
137
tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
138
break;
139
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/mips/tcg-target.c.inc
142
+++ b/tcg/mips/tcg-target.c.inc
143
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
144
}
145
}
146
147
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
148
+static void tcg_out_mb(TCGContext *s, unsigned a0)
149
{
150
static const MIPSInsn sync[] = {
151
/* Note that SYNC_MB is a slightly weaker than SYNC 0,
152
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
153
}
154
break;
155
156
- case INDEX_op_mb:
157
- tcg_out_mb(s, a0);
158
- break;
159
case INDEX_op_call: /* Always emitted via tcg_out_call. */
160
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
161
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
162
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/ppc/tcg-target.c.inc
165
+++ b/tcg/ppc/tcg-target.c.inc
166
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond2 outop_brcond2 = {
167
.out = tgen_brcond2,
168
};
169
170
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
171
+static void tcg_out_mb(TCGContext *s, unsigned a0)
172
{
173
uint32_t insn;
174
175
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
176
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
177
break;
178
179
- case INDEX_op_mb:
180
- tcg_out_mb(s, args[0]);
181
- break;
182
-
183
case INDEX_op_call: /* Always emitted via tcg_out_call. */
184
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
185
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
186
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/riscv/tcg-target.c.inc
189
+++ b/tcg/riscv/tcg-target.c.inc
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
191
tcg_out_call_int(s, arg, false);
192
}
193
194
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
195
+static void tcg_out_mb(TCGContext *s, unsigned a0)
196
{
197
tcg_insn_unit insn = OPC_FENCE;
198
199
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
200
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
201
break;
202
203
- case INDEX_op_mb:
204
- tcg_out_mb(s, a0);
205
- break;
206
-
207
case INDEX_op_call: /* Always emitted via tcg_out_call. */
208
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
209
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
210
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/s390x/tcg-target.c.inc
213
+++ b/tcg/s390x/tcg-target.c.inc
214
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
215
.out_rr = tgen_not,
216
};
217
218
+static void tcg_out_mb(TCGContext *s, unsigned a0)
219
+{
220
+ /*
221
+ * The host memory model is quite strong, we simply need to
222
+ * serialize the instruction stream.
223
+ */
224
+ if (a0 & TCG_MO_ST_LD) {
225
+ /* fast-bcr-serialization facility (45) is present */
226
+ tcg_out_insn(s, RR, BCR, 14, 0);
227
+ }
228
+}
229
230
# define OP_32_64(x) \
231
case glue(glue(INDEX_op_,x),_i32): \
232
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
233
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
234
break;
235
236
- case INDEX_op_mb:
237
- /* The host memory model is quite strong, we simply need to
238
- serialize the instruction stream. */
239
- if (args[0] & TCG_MO_ST_LD) {
240
- /* fast-bcr-serialization facility (45) is present */
241
- tcg_out_insn(s, RR, BCR, 14, 0);
242
- }
243
- break;
244
-
245
case INDEX_op_call: /* Always emitted via tcg_out_call. */
246
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
247
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
248
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
249
index XXXXXXX..XXXXXXX 100644
250
--- a/tcg/sparc64/tcg-target.c.inc
251
+++ b/tcg/sparc64/tcg-target.c.inc
252
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
253
tcg_out_nop(s);
254
}
255
256
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
257
+static void tcg_out_mb(TCGContext *s, unsigned a0)
258
{
259
/* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
260
tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
261
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
262
tcg_out_ldst(s, a0, a1, a2, STX);
263
break;
264
265
- case INDEX_op_mb:
266
- tcg_out_mb(s, a0);
267
- break;
268
-
269
case INDEX_op_call: /* Always emitted via tcg_out_call. */
270
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
271
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
272
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
273
index XXXXXXX..XXXXXXX 100644
274
--- a/tcg/tci/tcg-target.c.inc
275
+++ b/tcg/tci/tcg-target.c.inc
276
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond2 outop_setcond2 = {
277
.out = tgen_setcond2,
278
};
279
280
+static void tcg_out_mb(TCGContext *s, unsigned a0)
281
+{
282
+ tcg_out_op_v(s, INDEX_op_mb);
283
+}
284
+
285
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
286
const TCGArg args[TCG_MAX_OP_ARGS],
287
const int const_args[TCG_MAX_OP_ARGS])
288
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
289
}
290
break;
291
292
- case INDEX_op_mb:
293
- tcg_out_op_v(s, opc);
294
- break;
295
-
296
case INDEX_op_call: /* Always emitted via tcg_out_call. */
297
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
298
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
299
--
162
--
300
2.43.0
163
2.17.2
301
164
302
165
diff view generated by jsdifflib
1
Add infrastructure for more consolidated output of opcodes.
1
From: "Emilio G. Cota" <cota@braap.org>
2
The base structure allows for constraints to be either static
2
3
or dynamic, and for the existence of those constraints to
3
As far as I can tell tlb_flush does not need to be called
4
replace TCG_TARGET_HAS_* and the bulk of tcg_op_supported.
4
this early. tlb_flush is eventually called after the CPU
5
has been realized.
6
7
This change paves the way to the introduction of tlb_init,
8
which will be called from cpu_exec_realizefn.
5
9
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Emilio G. Cota <cota@braap.org>
13
Message-Id: <20181009174557.16125-2-cota@braap.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
15
---
10
tcg/tcg.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++------
16
target/alpha/cpu.c | 1 -
11
1 file changed, 68 insertions(+), 8 deletions(-)
17
1 file changed, 1 deletion(-)
12
18
13
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tcg.c
21
--- a/target/alpha/cpu.c
16
+++ b/tcg/tcg.c
22
+++ b/target/alpha/cpu.c
17
@@ -XXX,XX +XXX,XX @@ static int tcg_out_pool_finalize(TCGContext *s)
23
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_initfn(Object *obj)
18
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
24
CPUAlphaState *env = &cpu->env;
19
25
20
typedef enum {
26
cs->env_ptr = env;
21
+ C_Dynamic = -2,
27
- tlb_flush(cs);
22
C_NotImplemented = -1,
28
23
#include "tcg-target-con-set.h"
29
env->lock_addr = -1;
24
} TCGConstraintSetIndex;
30
#if defined(CONFIG_USER_ONLY)
25
@@ -XXX,XX +XXX,XX @@ static const TCGConstraintSet constraint_sets[] = {
26
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
27
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
28
29
+/*
30
+ * TCGOutOp is the base class for a set of structures that describe how
31
+ * to generate code for a given TCGOpcode.
32
+ *
33
+ * @static_constraint:
34
+ * C_NotImplemented: The TCGOpcode is not supported by the backend.
35
+ * C_Dynamic: Use @dynamic_constraint to select a constraint set
36
+ * based on any of @type, @flags, or host isa.
37
+ * Otherwise: The register allocation constrains for the TCGOpcode.
38
+ *
39
+ * Subclasses of TCGOutOp will define a set of output routines that may
40
+ * be used. Such routines will often be selected by the set of registers
41
+ * and constants that come out of register allocation. The set of
42
+ * routines that are provided will guide the set of constraints that are
43
+ * legal. In particular, assume that tcg_optimize() has done its job in
44
+ * swapping commutative operands and folding operations for which all
45
+ * operands are constant.
46
+ */
47
+typedef struct TCGOutOp {
48
+ TCGConstraintSetIndex static_constraint;
49
+ TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags);
50
+} TCGOutOp;
51
+
52
#include "tcg-target.c.inc"
53
54
#ifndef CONFIG_TCG_INTERPRETER
55
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
56
< MIN_TLB_MASK_TABLE_OFS);
57
#endif
58
59
+/* Register allocation descriptions for every TCGOpcode. */
60
+static const TCGOutOp * const all_outop[NB_OPS] = {
61
+};
62
+
63
/*
64
* All TCG threads except the parent (i.e. the one that called tcg_context_init
65
* and registered the target's TCG globals) must register with this function
66
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
67
return has_type && TCG_TARGET_HAS_cmpsel_vec;
68
69
default:
70
- tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
71
+ if (op < INDEX_op_last_generic) {
72
+ const TCGOutOp *outop;
73
+ TCGConstraintSetIndex con_set;
74
+
75
+ if (!has_type) {
76
+ return false;
77
+ }
78
+
79
+ outop = all_outop[op];
80
+ tcg_debug_assert(outop != NULL);
81
+
82
+ con_set = outop->static_constraint;
83
+ if (con_set == C_Dynamic) {
84
+ con_set = outop->dynamic_constraint(type, flags);
85
+ }
86
+ if (con_set >= 0) {
87
+ return true;
88
+ }
89
+ tcg_debug_assert(con_set == C_NotImplemented);
90
+ return false;
91
+ }
92
+ tcg_debug_assert(op < NB_OPS);
93
return true;
94
+
95
+ case INDEX_op_last_generic:
96
+ g_assert_not_reached();
97
}
98
}
99
100
@@ -XXX,XX +XXX,XX @@ static void process_constraint_sets(void)
101
102
static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
103
{
104
- const TCGOpDef *def = &tcg_op_defs[op->opc];
105
+ TCGOpcode opc = op->opc;
106
+ TCGType type = TCGOP_TYPE(op);
107
+ unsigned flags = TCGOP_FLAGS(op);
108
+ const TCGOpDef *def = &tcg_op_defs[opc];
109
+ const TCGOutOp *outop = all_outop[opc];
110
TCGConstraintSetIndex con_set;
111
112
-#ifdef CONFIG_DEBUG_TCG
113
- assert(tcg_op_supported(op->opc, TCGOP_TYPE(op), TCGOP_FLAGS(op)));
114
-#endif
115
-
116
if (def->flags & TCG_OPF_NOT_PRESENT) {
117
return empty_cts;
118
}
119
120
- con_set = tcg_target_op_def(op->opc, TCGOP_TYPE(op), TCGOP_FLAGS(op));
121
- tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
122
+ if (outop) {
123
+ con_set = outop->static_constraint;
124
+ if (con_set == C_Dynamic) {
125
+ con_set = outop->dynamic_constraint(type, flags);
126
+ }
127
+ } else {
128
+ con_set = tcg_target_op_def(opc, type, flags);
129
+ }
130
+ tcg_debug_assert(con_set >= 0);
131
+ tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
132
133
/* The constraint arguments must match TCGOpcode arguments. */
134
tcg_debug_assert(constraint_sets[con_set].nb_oargs == def->nb_oargs);
135
--
31
--
136
2.43.0
32
2.17.2
137
33
138
34
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
From: "Emilio G. Cota" <cota@braap.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
As far as I can tell tlb_flush does not need to be called
4
this early. tlb_flush is eventually called after the CPU
5
has been realized.
6
7
This change paves the way to the introduction of tlb_init,
8
which will be called from cpu_exec_realizefn.
9
10
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Emilio G. Cota <cota@braap.org>
14
Message-Id: <20181009174557.16125-3-cota@braap.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
16
---
5
include/tcg/tcg-opc.h | 19 +++++-------
17
target/unicore32/cpu.c | 2 --
6
tcg/optimize.c | 27 ++++++++---------
18
1 file changed, 2 deletions(-)
7
tcg/tcg-op.c | 24 +++++++--------
8
tcg/tcg.c | 64 ++++++++++++++--------------------------
9
tcg/tci.c | 43 +++++++++++----------------
10
tcg/tci/tcg-target.c.inc | 28 +++++++-----------
11
6 files changed, 83 insertions(+), 122 deletions(-)
12
19
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
20
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
14
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
22
--- a/target/unicore32/cpu.c
16
+++ b/include/tcg/tcg-opc.h
23
+++ b/target/unicore32/cpu.c
17
@@ -XXX,XX +XXX,XX @@ DEF(divu2, 2, 3, 0, TCG_OPF_INT)
24
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_initfn(Object *obj)
18
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
25
env->uncached_asr = ASR_MODE_PRIV;
19
DEF(extract, 1, 1, 2, TCG_OPF_INT)
26
env->regs[31] = 0x03000000;
20
DEF(extract2, 1, 2, 1, TCG_OPF_INT)
27
#endif
21
+DEF(ld8u, 1, 1, 1, TCG_OPF_INT)
28
-
22
+DEF(ld8s, 1, 1, 1, TCG_OPF_INT)
29
- tlb_flush(cs);
23
+DEF(ld16u, 1, 1, 1, TCG_OPF_INT)
24
+DEF(ld16s, 1, 1, 1, TCG_OPF_INT)
25
+DEF(ld32u, 1, 1, 1, TCG_OPF_INT)
26
+DEF(ld32s, 1, 1, 1, TCG_OPF_INT)
27
+DEF(ld, 1, 1, 1, TCG_OPF_INT)
28
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
29
DEF(mul, 1, 2, 0, TCG_OPF_INT)
30
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
31
@@ -XXX,XX +XXX,XX @@ DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
32
DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
33
34
/* load/store */
35
-DEF(ld8u_i32, 1, 1, 1, 0)
36
-DEF(ld8s_i32, 1, 1, 1, 0)
37
-DEF(ld16u_i32, 1, 1, 1, 0)
38
-DEF(ld16s_i32, 1, 1, 1, 0)
39
-DEF(ld_i32, 1, 1, 1, 0)
40
DEF(st8_i32, 0, 2, 1, 0)
41
DEF(st16_i32, 0, 2, 1, 0)
42
DEF(st_i32, 0, 2, 1, 0)
43
@@ -XXX,XX +XXX,XX @@ DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
44
DEF(setcond2_i32, 1, 4, 1, 0)
45
46
/* load/store */
47
-DEF(ld8u_i64, 1, 1, 1, 0)
48
-DEF(ld8s_i64, 1, 1, 1, 0)
49
-DEF(ld16u_i64, 1, 1, 1, 0)
50
-DEF(ld16s_i64, 1, 1, 1, 0)
51
-DEF(ld32u_i64, 1, 1, 1, 0)
52
-DEF(ld32s_i64, 1, 1, 1, 0)
53
-DEF(ld_i64, 1, 1, 1, 0)
54
DEF(st8_i64, 0, 2, 1, 0)
55
DEF(st16_i64, 0, 2, 1, 0)
56
DEF(st32_i64, 0, 2, 1, 0)
57
diff --git a/tcg/optimize.c b/tcg/optimize.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/optimize.c
60
+++ b/tcg/optimize.c
61
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
62
63
/* We can't do any folding with a load, but we can record bits. */
64
switch (op->opc) {
65
- CASE_OP_32_64(ld8s):
66
+ case INDEX_op_ld8s:
67
s_mask = INT8_MIN;
68
break;
69
- CASE_OP_32_64(ld8u):
70
+ case INDEX_op_ld8u:
71
z_mask = MAKE_64BIT_MASK(0, 8);
72
break;
73
- CASE_OP_32_64(ld16s):
74
+ case INDEX_op_ld16s:
75
s_mask = INT16_MIN;
76
break;
77
- CASE_OP_32_64(ld16u):
78
+ case INDEX_op_ld16u:
79
z_mask = MAKE_64BIT_MASK(0, 16);
80
break;
81
- case INDEX_op_ld32s_i64:
82
+ case INDEX_op_ld32s:
83
s_mask = INT32_MIN;
84
break;
85
- case INDEX_op_ld32u_i64:
86
+ case INDEX_op_ld32u:
87
z_mask = MAKE_64BIT_MASK(0, 32);
88
break;
89
default:
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
case INDEX_op_extrh_i64_i32:
92
done = fold_extu(&ctx, op);
93
break;
94
- CASE_OP_32_64(ld8s):
95
- CASE_OP_32_64(ld8u):
96
- CASE_OP_32_64(ld16s):
97
- CASE_OP_32_64(ld16u):
98
- case INDEX_op_ld32s_i64:
99
- case INDEX_op_ld32u_i64:
100
+ case INDEX_op_ld8s:
101
+ case INDEX_op_ld8u:
102
+ case INDEX_op_ld16s:
103
+ case INDEX_op_ld16u:
104
+ case INDEX_op_ld32s:
105
+ case INDEX_op_ld32u:
106
done = fold_tcg_ld(&ctx, op);
107
break;
108
- case INDEX_op_ld_i32:
109
- case INDEX_op_ld_i64:
110
+ case INDEX_op_ld:
111
case INDEX_op_ld_vec:
112
done = fold_tcg_ld_memcopy(&ctx, op);
113
break;
114
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/tcg/tcg-op.c
117
+++ b/tcg/tcg-op.c
118
@@ -XXX,XX +XXX,XX @@ void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a)
119
120
void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
121
{
122
- tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
123
+ tcg_gen_ldst_op_i32(INDEX_op_ld8u, ret, arg2, offset);
124
}
30
}
125
31
126
void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
32
static const VMStateDescription vmstate_uc32_cpu = {
127
{
128
- tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
129
+ tcg_gen_ldst_op_i32(INDEX_op_ld8s, ret, arg2, offset);
130
}
131
132
void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
133
{
134
- tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
135
+ tcg_gen_ldst_op_i32(INDEX_op_ld16u, ret, arg2, offset);
136
}
137
138
void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
139
{
140
- tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
141
+ tcg_gen_ldst_op_i32(INDEX_op_ld16s, ret, arg2, offset);
142
}
143
144
void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset)
145
{
146
- tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
147
+ tcg_gen_ldst_op_i32(INDEX_op_ld, ret, arg2, offset);
148
}
149
150
void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
151
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg)
152
void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
153
{
154
if (TCG_TARGET_REG_BITS == 64) {
155
- tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
156
+ tcg_gen_ldst_op_i64(INDEX_op_ld8u, ret, arg2, offset);
157
} else {
158
tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset);
159
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
160
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
161
void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
162
{
163
if (TCG_TARGET_REG_BITS == 64) {
164
- tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
165
+ tcg_gen_ldst_op_i64(INDEX_op_ld8s, ret, arg2, offset);
166
} else {
167
tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset);
168
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
169
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
170
void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
171
{
172
if (TCG_TARGET_REG_BITS == 64) {
173
- tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
174
+ tcg_gen_ldst_op_i64(INDEX_op_ld16u, ret, arg2, offset);
175
} else {
176
tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset);
177
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
178
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
179
void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
180
{
181
if (TCG_TARGET_REG_BITS == 64) {
182
- tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
183
+ tcg_gen_ldst_op_i64(INDEX_op_ld16s, ret, arg2, offset);
184
} else {
185
tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset);
186
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
187
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
188
void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
189
{
190
if (TCG_TARGET_REG_BITS == 64) {
191
- tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
192
+ tcg_gen_ldst_op_i64(INDEX_op_ld32u, ret, arg2, offset);
193
} else {
194
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
195
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
196
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
197
void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
198
{
199
if (TCG_TARGET_REG_BITS == 64) {
200
- tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
201
+ tcg_gen_ldst_op_i64(INDEX_op_ld32s, ret, arg2, offset);
202
} else {
203
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset);
204
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
205
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset)
206
* they cannot be the same temporary -- no chance of overlap.
207
*/
208
if (TCG_TARGET_REG_BITS == 64) {
209
- tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
210
+ tcg_gen_ldst_op_i64(INDEX_op_ld, ret, arg2, offset);
211
} else if (HOST_BIG_ENDIAN) {
212
tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
213
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
214
diff --git a/tcg/tcg.c b/tcg/tcg.c
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tcg.c
217
+++ b/tcg/tcg.c
218
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
219
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
220
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
221
OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
222
- OUTOP(INDEX_op_ld8u_i32, TCGOutOpLoad, outop_ld8u),
223
- OUTOP(INDEX_op_ld8u_i64, TCGOutOpLoad, outop_ld8u),
224
- OUTOP(INDEX_op_ld8s_i32, TCGOutOpLoad, outop_ld8s),
225
- OUTOP(INDEX_op_ld8s_i64, TCGOutOpLoad, outop_ld8s),
226
- OUTOP(INDEX_op_ld16u_i32, TCGOutOpLoad, outop_ld16u),
227
- OUTOP(INDEX_op_ld16u_i64, TCGOutOpLoad, outop_ld16u),
228
- OUTOP(INDEX_op_ld16s_i32, TCGOutOpLoad, outop_ld16s),
229
- OUTOP(INDEX_op_ld16s_i64, TCGOutOpLoad, outop_ld16s),
230
- OUTOP(INDEX_op_ld_i32, TCGOutOpLoad, outop_ld),
231
- OUTOP(INDEX_op_ld_i64, TCGOutOpLoad, outop_ld),
232
+ OUTOP(INDEX_op_ld8u, TCGOutOpLoad, outop_ld8u),
233
+ OUTOP(INDEX_op_ld8s, TCGOutOpLoad, outop_ld8s),
234
+ OUTOP(INDEX_op_ld16u, TCGOutOpLoad, outop_ld16u),
235
+ OUTOP(INDEX_op_ld16s, TCGOutOpLoad, outop_ld16s),
236
+ OUTOP(INDEX_op_ld, TCGOutOpLoad, outop_ld),
237
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
238
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
239
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
240
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
241
OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
242
OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
243
OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
244
- OUTOP(INDEX_op_ld32u_i64, TCGOutOpLoad, outop_ld32u),
245
- OUTOP(INDEX_op_ld32s_i64, TCGOutOpLoad, outop_ld32s),
246
+ OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u),
247
+ OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s),
248
#endif
249
};
250
251
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
252
case INDEX_op_brcond:
253
case INDEX_op_deposit:
254
case INDEX_op_extract:
255
+ case INDEX_op_ld8u:
256
+ case INDEX_op_ld8s:
257
+ case INDEX_op_ld16u:
258
+ case INDEX_op_ld16s:
259
+ case INDEX_op_ld:
260
case INDEX_op_mov:
261
case INDEX_op_movcond:
262
case INDEX_op_negsetcond:
263
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
264
case INDEX_op_xor:
265
return has_type;
266
267
- case INDEX_op_ld8u_i32:
268
- case INDEX_op_ld8s_i32:
269
- case INDEX_op_ld16u_i32:
270
- case INDEX_op_ld16s_i32:
271
- case INDEX_op_ld_i32:
272
case INDEX_op_st8_i32:
273
case INDEX_op_st16_i32:
274
case INDEX_op_st_i32:
275
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
276
case INDEX_op_setcond2_i32:
277
return TCG_TARGET_REG_BITS == 32;
278
279
- case INDEX_op_ld8u_i64:
280
- case INDEX_op_ld8s_i64:
281
- case INDEX_op_ld16u_i64:
282
- case INDEX_op_ld16s_i64:
283
- case INDEX_op_ld32u_i64:
284
- case INDEX_op_ld32s_i64:
285
- case INDEX_op_ld_i64:
286
+ case INDEX_op_ld32u:
287
+ case INDEX_op_ld32s:
288
case INDEX_op_st8_i64:
289
case INDEX_op_st16_i64:
290
case INDEX_op_st32_i64:
291
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
292
arg_ts = arg_temp(op->args[i]);
293
dir_ts = arg_ts->state_ptr;
294
if (dir_ts && arg_ts->state == TS_DEAD) {
295
- TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
296
- ? INDEX_op_ld_i32
297
- : INDEX_op_ld_i64);
298
- TCGOp *lop = tcg_op_insert_before(s, op, lopc,
299
+ TCGOp *lop = tcg_op_insert_before(s, op, INDEX_op_ld,
300
arg_ts->type, 3);
301
302
lop->args[0] = temp_arg(dir_ts);
303
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
304
}
305
break;
306
307
- case INDEX_op_ld32u_i64:
308
- case INDEX_op_ld32s_i64:
309
- tcg_debug_assert(type == TCG_TYPE_I64);
310
- /* fall through */
311
- case INDEX_op_ld8u_i32:
312
- case INDEX_op_ld8u_i64:
313
- case INDEX_op_ld8s_i32:
314
- case INDEX_op_ld8s_i64:
315
- case INDEX_op_ld16u_i32:
316
- case INDEX_op_ld16u_i64:
317
- case INDEX_op_ld16s_i32:
318
- case INDEX_op_ld16s_i64:
319
- case INDEX_op_ld_i32:
320
- case INDEX_op_ld_i64:
321
+ case INDEX_op_ld8u:
322
+ case INDEX_op_ld8s:
323
+ case INDEX_op_ld16u:
324
+ case INDEX_op_ld16s:
325
+ case INDEX_op_ld32u:
326
+ case INDEX_op_ld32s:
327
+ case INDEX_op_ld:
328
{
329
const TCGOutOpLoad *out =
330
container_of(all_outop[op->opc], TCGOutOpLoad, base);
331
diff --git a/tcg/tci.c b/tcg/tci.c
332
index XXXXXXX..XXXXXXX 100644
333
--- a/tcg/tci.c
334
+++ b/tcg/tci.c
335
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
336
337
/* Load/store operations (32 bit). */
338
339
- CASE_32_64(ld8u)
340
+ case INDEX_op_ld8u:
341
tci_args_rrs(insn, &r0, &r1, &ofs);
342
ptr = (void *)(regs[r1] + ofs);
343
regs[r0] = *(uint8_t *)ptr;
344
break;
345
- CASE_32_64(ld8s)
346
+ case INDEX_op_ld8s:
347
tci_args_rrs(insn, &r0, &r1, &ofs);
348
ptr = (void *)(regs[r1] + ofs);
349
regs[r0] = *(int8_t *)ptr;
350
break;
351
- CASE_32_64(ld16u)
352
+ case INDEX_op_ld16u:
353
tci_args_rrs(insn, &r0, &r1, &ofs);
354
ptr = (void *)(regs[r1] + ofs);
355
regs[r0] = *(uint16_t *)ptr;
356
break;
357
- CASE_32_64(ld16s)
358
+ case INDEX_op_ld16s:
359
tci_args_rrs(insn, &r0, &r1, &ofs);
360
ptr = (void *)(regs[r1] + ofs);
361
regs[r0] = *(int16_t *)ptr;
362
break;
363
- case INDEX_op_ld_i32:
364
- CASE_64(ld32u)
365
+ case INDEX_op_ld:
366
tci_args_rrs(insn, &r0, &r1, &ofs);
367
ptr = (void *)(regs[r1] + ofs);
368
- regs[r0] = *(uint32_t *)ptr;
369
+ regs[r0] = *(tcg_target_ulong *)ptr;
370
break;
371
CASE_32_64(st8)
372
tci_args_rrs(insn, &r0, &r1, &ofs);
373
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
374
#if TCG_TARGET_REG_BITS == 64
375
/* Load/store operations (64 bit). */
376
377
- case INDEX_op_ld32s_i64:
378
+ case INDEX_op_ld32u:
379
+ tci_args_rrs(insn, &r0, &r1, &ofs);
380
+ ptr = (void *)(regs[r1] + ofs);
381
+ regs[r0] = *(uint32_t *)ptr;
382
+ break;
383
+ case INDEX_op_ld32s:
384
tci_args_rrs(insn, &r0, &r1, &ofs);
385
ptr = (void *)(regs[r1] + ofs);
386
regs[r0] = *(int32_t *)ptr;
387
break;
388
- case INDEX_op_ld_i64:
389
- tci_args_rrs(insn, &r0, &r1, &ofs);
390
- ptr = (void *)(regs[r1] + ofs);
391
- regs[r0] = *(uint64_t *)ptr;
392
- break;
393
case INDEX_op_st_i64:
394
tci_args_rrs(insn, &r0, &r1, &ofs);
395
ptr = (void *)(regs[r1] + ofs);
396
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
397
info->fprintf_func(info->stream, "%-12s", op_name);
398
break;
399
400
- case INDEX_op_ld8u_i32:
401
- case INDEX_op_ld8u_i64:
402
- case INDEX_op_ld8s_i32:
403
- case INDEX_op_ld8s_i64:
404
- case INDEX_op_ld16u_i32:
405
- case INDEX_op_ld16u_i64:
406
- case INDEX_op_ld16s_i32:
407
- case INDEX_op_ld16s_i64:
408
- case INDEX_op_ld32u_i64:
409
- case INDEX_op_ld32s_i64:
410
- case INDEX_op_ld_i32:
411
- case INDEX_op_ld_i64:
412
+ case INDEX_op_ld8u:
413
+ case INDEX_op_ld8s:
414
+ case INDEX_op_ld16u:
415
+ case INDEX_op_ld16s:
416
+ case INDEX_op_ld32u:
417
+ case INDEX_op_ld:
418
case INDEX_op_st8_i32:
419
case INDEX_op_st8_i64:
420
case INDEX_op_st16_i32:
421
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
422
index XXXXXXX..XXXXXXX 100644
423
--- a/tcg/tci/tcg-target.c.inc
424
+++ b/tcg/tci/tcg-target.c.inc
425
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
426
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
427
intptr_t offset)
428
{
429
- switch (type) {
430
- case TCG_TYPE_I32:
431
- tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
432
- break;
433
-#if TCG_TARGET_REG_BITS == 64
434
- case TCG_TYPE_I64:
435
- tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
436
- break;
437
-#endif
438
- default:
439
- g_assert_not_reached();
440
+ TCGOpcode op = INDEX_op_ld;
441
+
442
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
443
+ op = INDEX_op_ld32u;
444
}
445
+ tcg_out_ldst(s, op, val, base, offset);
446
}
447
448
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
449
@@ -XXX,XX +XXX,XX @@ static void tcg_out_br(TCGContext *s, TCGLabel *l)
450
static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
451
TCGReg base, ptrdiff_t offset)
452
{
453
- tcg_out_ldst(s, INDEX_op_ld8u_i32, dest, base, offset);
454
+ tcg_out_ldst(s, INDEX_op_ld8u, dest, base, offset);
455
}
456
457
static const TCGOutOpLoad outop_ld8u = {
458
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld8u = {
459
static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
460
TCGReg base, ptrdiff_t offset)
461
{
462
- tcg_out_ldst(s, INDEX_op_ld8s_i32, dest, base, offset);
463
+ tcg_out_ldst(s, INDEX_op_ld8s, dest, base, offset);
464
}
465
466
static const TCGOutOpLoad outop_ld8s = {
467
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld8s = {
468
static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
469
TCGReg base, ptrdiff_t offset)
470
{
471
- tcg_out_ldst(s, INDEX_op_ld16u_i32, dest, base, offset);
472
+ tcg_out_ldst(s, INDEX_op_ld16u, dest, base, offset);
473
}
474
475
static const TCGOutOpLoad outop_ld16u = {
476
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld16u = {
477
static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
478
TCGReg base, ptrdiff_t offset)
479
{
480
- tcg_out_ldst(s, INDEX_op_ld16s_i32, dest, base, offset);
481
+ tcg_out_ldst(s, INDEX_op_ld16s, dest, base, offset);
482
}
483
484
static const TCGOutOpLoad outop_ld16s = {
485
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld16s = {
486
static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
487
TCGReg base, ptrdiff_t offset)
488
{
489
- tcg_out_ldst(s, INDEX_op_ld32u_i64, dest, base, offset);
490
+ tcg_out_ldst(s, INDEX_op_ld32u, dest, base, offset);
491
}
492
493
static const TCGOutOpLoad outop_ld32u = {
494
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32u = {
495
static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
496
TCGReg base, ptrdiff_t offset)
497
{
498
- tcg_out_ldst(s, INDEX_op_ld32s_i64, dest, base, offset);
499
+ tcg_out_ldst(s, INDEX_op_ld32s, dest, base, offset);
500
}
501
502
static const TCGOutOpLoad outop_ld32s = {
503
--
33
--
504
2.43.0
34
2.17.2
505
35
506
36
diff view generated by jsdifflib
1
Drop all backend support for an immediate as the first operand.
1
From: "Emilio G. Cota" <cota@braap.org>
2
This should never happen in any case, as we swap commutative
3
operands to place immediates as the second operand.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Paves the way for the addition of a per-TLB lock.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Emilio G. Cota <cota@braap.org>
8
Message-Id: <20181009174557.16125-4-cota@braap.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
tcg/tcg.c | 4 +++
11
include/exec/exec-all.h | 8 ++++++++
9
tcg/aarch64/tcg-target.c.inc | 31 ++++++++++--------
12
accel/tcg/cputlb.c | 4 ++++
10
tcg/arm/tcg-target.c.inc | 41 +++++++++++++++++-------
13
exec.c | 1 +
11
tcg/i386/tcg-target.c.inc | 27 ++++++++++++----
14
3 files changed, 13 insertions(+)
12
tcg/loongarch64/tcg-target.c.inc | 29 ++++++++++-------
13
tcg/mips/tcg-target.c.inc | 55 +++++++++++++++++++-------------
14
tcg/ppc/tcg-target.c.inc | 40 ++++++++++++-----------
15
tcg/riscv/tcg-target.c.inc | 29 ++++++++++-------
16
tcg/s390x/tcg-target.c.inc | 48 +++++++++++++++-------------
17
tcg/sparc64/tcg-target.c.inc | 23 ++++++++++---
18
tcg/tci/tcg-target.c.inc | 14 ++++++--
19
11 files changed, 216 insertions(+), 125 deletions(-)
20
15
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
16
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
22
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/tcg.c
18
--- a/include/exec/exec-all.h
24
+++ b/tcg/tcg.c
19
+++ b/include/exec/exec-all.h
25
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
20
@@ -XXX,XX +XXX,XX @@ void cpu_address_space_init(CPUState *cpu, int asidx,
26
/* Register allocation descriptions for every TCGOpcode. */
21
27
static const TCGOutOp * const all_outop[NB_OPS] = {
22
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
28
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
23
/* cputlb.c */
29
+ OUTOP(INDEX_op_and_i32, TCGOutOpBinary, outop_and),
24
+/**
30
+ OUTOP(INDEX_op_and_i64, TCGOutOpBinary, outop_and),
25
+ * tlb_init - initialize a CPU's TLB
31
};
26
+ * @cpu: CPU whose TLB should be initialized
32
27
+ */
33
#undef OUTOP
28
+void tlb_init(CPUState *cpu);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
29
/**
35
break;
30
* tlb_flush_page:
36
31
* @cpu: CPU whose TLB should be flushed
37
case INDEX_op_add:
32
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
38
+ case INDEX_op_and_i32:
33
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
39
+ case INDEX_op_and_i64:
34
uintptr_t retaddr);
40
{
35
#else
41
const TCGOutOpBinary *out =
36
+static inline void tlb_init(CPUState *cpu)
42
container_of(all_outop[op->opc], TCGOutOpBinary, base);
37
+{
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
38
+}
39
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
40
{
41
}
42
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
44
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
44
--- a/accel/tcg/cputlb.c
46
+++ b/tcg/aarch64/tcg-target.c.inc
45
+++ b/accel/tcg/cputlb.c
47
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
46
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
48
.out_rri = tgen_addi,
47
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
49
};
48
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
50
49
51
+static void tgen_and(TCGContext *s, TCGType type,
50
+void tlb_init(CPUState *cpu)
52
+ TCGReg a0, TCGReg a1, TCGReg a2)
53
+{
51
+{
54
+ tcg_out_insn(s, 3510, AND, type, a0, a1, a2);
55
+}
52
+}
56
+
53
+
57
+static void tgen_andi(TCGContext *s, TCGType type,
54
/* flush_all_helper: run fn across all cpus
58
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
55
*
59
+{
56
* If the wait flag is set then the src cpu's helper will be queued as
60
+ tcg_out_logicali(s, I3404_ANDI, type, a0, a1, a2);
57
diff --git a/exec.c b/exec.c
61
+}
62
+
63
+static const TCGOutOpBinary outop_and = {
64
+ .base.static_constraint = C_O1_I2(r, r, rL),
65
+ .out_rrr = tgen_and,
66
+ .out_rri = tgen_andi,
67
+};
68
+
69
70
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
71
const TCGArg args[TCG_MAX_OP_ARGS],
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
73
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
74
break;
75
76
- case INDEX_op_and_i32:
77
- a2 = (int32_t)a2;
78
- /* FALLTHRU */
79
- case INDEX_op_and_i64:
80
- if (c2) {
81
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
82
- } else {
83
- tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
84
- }
85
- break;
86
-
87
case INDEX_op_andc_i32:
88
a2 = (int32_t)a2;
89
/* FALLTHRU */
90
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
91
case INDEX_op_mulsh_i64:
92
return C_O1_I2(r, r, r);
93
94
- case INDEX_op_and_i32:
95
- case INDEX_op_and_i64:
96
case INDEX_op_or_i32:
97
case INDEX_op_or_i64:
98
case INDEX_op_xor_i32:
99
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
100
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/arm/tcg-target.c.inc
59
--- a/exec.c
102
+++ b/tcg/arm/tcg-target.c.inc
60
+++ b/exec.c
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
61
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
104
* Emit either the reg,imm or reg,reg form of a data-processing insn.
62
tcg_target_initialized = true;
105
* rhs must satisfy the "rIK" constraint.
63
cc->tcg_initialize();
106
*/
107
+static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc,
108
+ ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs)
109
+{
110
+ int imm12 = encode_imm(rhs);
111
+ if (imm12 < 0) {
112
+ imm12 = encode_imm_nofail(~rhs);
113
+ opc = opinv;
114
+ }
115
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
116
+}
117
+
118
static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
119
ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
120
bool rhs_is_const)
121
{
122
if (rhs_is_const) {
123
- int imm12 = encode_imm(rhs);
124
- if (imm12 < 0) {
125
- imm12 = encode_imm_nofail(~rhs);
126
- opc = opinv;
127
- }
128
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
129
+ tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs);
130
} else {
131
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
132
}
64
}
133
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
65
+ tlb_init(cpu);
134
.out_rri = tgen_addi,
66
135
};
67
#ifndef CONFIG_USER_ONLY
136
68
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
137
+static void tgen_and(TCGContext *s, TCGType type,
138
+ TCGReg a0, TCGReg a1, TCGReg a2)
139
+{
140
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0));
141
+}
142
+
143
+static void tgen_andi(TCGContext *s, TCGType type,
144
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
145
+{
146
+ tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2);
147
+}
148
+
149
+static const TCGOutOpBinary outop_and = {
150
+ .base.static_constraint = C_O1_I2(r, r, rIK),
151
+ .out_rrr = tgen_and,
152
+ .out_rri = tgen_andi,
153
+};
154
+
155
156
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
157
const TCGArg args[TCG_MAX_OP_ARGS],
158
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
159
args[0], args[1], args[2], const_args[2]);
160
}
161
break;
162
- case INDEX_op_and_i32:
163
- tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
164
- args[0], args[1], args[2], const_args[2]);
165
- break;
166
case INDEX_op_andc_i32:
167
tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
168
args[0], args[1], args[2], const_args[2]);
169
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
170
case INDEX_op_negsetcond_i32:
171
return C_O1_I2(r, r, rIN);
172
173
- case INDEX_op_and_i32:
174
case INDEX_op_andc_i32:
175
case INDEX_op_clz_i32:
176
case INDEX_op_ctz_i32:
177
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
178
index XXXXXXX..XXXXXXX 100644
179
--- a/tcg/i386/tcg-target.c.inc
180
+++ b/tcg/i386/tcg-target.c.inc
181
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
182
.out_rri = tgen_addi,
183
};
184
185
+static void tgen_and(TCGContext *s, TCGType type,
186
+ TCGReg a0, TCGReg a1, TCGReg a2)
187
+{
188
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
189
+ tgen_arithr(s, ARITH_AND + rexw, a0, a2);
190
+}
191
+
192
+static void tgen_andi(TCGContext *s, TCGType type,
193
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
194
+{
195
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
196
+ tgen_arithi(s, ARITH_AND + rexw, a0, a2, false);
197
+}
198
+
199
+static const TCGOutOpBinary outop_and = {
200
+ .base.static_constraint = C_O1_I2(r, 0, reZ),
201
+ .out_rrr = tgen_and,
202
+ .out_rri = tgen_andi,
203
+};
204
+
205
206
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
207
const TCGArg args[TCG_MAX_OP_ARGS],
208
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
209
OP_32_64(sub):
210
c = ARITH_SUB;
211
goto gen_arith;
212
- OP_32_64(and):
213
- c = ARITH_AND;
214
- goto gen_arith;
215
OP_32_64(or):
216
c = ARITH_OR;
217
goto gen_arith;
218
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
219
case INDEX_op_xor_i64:
220
return C_O1_I2(r, 0, re);
221
222
- case INDEX_op_and_i32:
223
- case INDEX_op_and_i64:
224
- return C_O1_I2(r, 0, reZ);
225
-
226
case INDEX_op_andc_i32:
227
case INDEX_op_andc_i64:
228
return C_O1_I2(r, r, rI);
229
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
230
index XXXXXXX..XXXXXXX 100644
231
--- a/tcg/loongarch64/tcg-target.c.inc
232
+++ b/tcg/loongarch64/tcg-target.c.inc
233
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
234
.out_rri = tcg_out_addi,
235
};
236
237
+static void tgen_and(TCGContext *s, TCGType type,
238
+ TCGReg a0, TCGReg a1, TCGReg a2)
239
+{
240
+ tcg_out_opc_and(s, a0, a1, a2);
241
+}
242
+
243
+static void tgen_andi(TCGContext *s, TCGType type,
244
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
245
+{
246
+ tcg_out_opc_andi(s, a0, a1, a2);
247
+}
248
+
249
+static const TCGOutOpBinary outop_and = {
250
+ .base.static_constraint = C_O1_I2(r, r, rU),
251
+ .out_rrr = tgen_and,
252
+ .out_rri = tgen_andi,
253
+};
254
+
255
256
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
257
const TCGArg args[TCG_MAX_OP_ARGS],
258
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
259
}
260
break;
261
262
- case INDEX_op_and_i32:
263
- case INDEX_op_and_i64:
264
- if (c2) {
265
- tcg_out_opc_andi(s, a0, a1, a2);
266
- } else {
267
- tcg_out_opc_and(s, a0, a1, a2);
268
- }
269
- break;
270
-
271
case INDEX_op_or_i32:
272
case INDEX_op_or_i64:
273
if (c2) {
274
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
275
case INDEX_op_rotr_i64:
276
return C_O1_I2(r, r, ri);
277
278
- case INDEX_op_and_i32:
279
- case INDEX_op_and_i64:
280
case INDEX_op_nor_i32:
281
case INDEX_op_nor_i64:
282
case INDEX_op_or_i32:
283
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
284
index XXXXXXX..XXXXXXX 100644
285
--- a/tcg/mips/tcg-target.c.inc
286
+++ b/tcg/mips/tcg-target.c.inc
287
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
288
.out_rri = tgen_addi,
289
};
290
291
+static void tgen_and(TCGContext *s, TCGType type,
292
+ TCGReg a0, TCGReg a1, TCGReg a2)
293
+{
294
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
295
+}
296
+
297
+static void tgen_andi(TCGContext *s, TCGType type,
298
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
299
+{
300
+ int msb;
301
+
302
+ if (a2 == (uint16_t)a2) {
303
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
304
+ return;
305
+ }
306
+
307
+ tcg_debug_assert(use_mips32r2_instructions);
308
+ tcg_debug_assert(is_p2m1(a2));
309
+ msb = ctz64(~a2) - 1;
310
+ if (type == TCG_TYPE_I32) {
311
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
312
+ } else {
313
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
314
+ }
315
+}
316
+
317
+static const TCGOutOpBinary outop_and = {
318
+ .base.static_constraint = C_O1_I2(r, r, rIK),
319
+ .out_rrr = tgen_and,
320
+ .out_rri = tgen_andi,
321
+};
322
+
323
324
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
325
const TCGArg args[TCG_MAX_OP_ARGS],
326
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
327
break;
328
}
329
goto do_binaryv;
330
- case INDEX_op_and_i32:
331
- if (c2 && a2 != (uint16_t)a2) {
332
- int msb = ctz32(~a2) - 1;
333
- tcg_debug_assert(use_mips32r2_instructions);
334
- tcg_debug_assert(is_p2m1(a2));
335
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0);
336
- break;
337
- }
338
- i1 = OPC_AND, i2 = OPC_ANDI;
339
- goto do_binary;
340
- case INDEX_op_and_i64:
341
- if (c2 && a2 != (uint16_t)a2) {
342
- int msb = ctz64(~a2) - 1;
343
- tcg_debug_assert(use_mips32r2_instructions);
344
- tcg_debug_assert(is_p2m1(a2));
345
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
346
- break;
347
- }
348
- i1 = OPC_AND, i2 = OPC_ANDI;
349
- goto do_binary;
350
case INDEX_op_nor_i32:
351
case INDEX_op_nor_i64:
352
i1 = OPC_NOR;
353
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
354
case INDEX_op_muls2_i64:
355
case INDEX_op_mulu2_i64:
356
return C_O2_I2(r, r, r, r);
357
- case INDEX_op_and_i32:
358
- case INDEX_op_and_i64:
359
- return C_O1_I2(r, r, rIK);
360
case INDEX_op_or_i32:
361
case INDEX_op_xor_i32:
362
case INDEX_op_or_i64:
363
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
364
index XXXXXXX..XXXXXXX 100644
365
--- a/tcg/ppc/tcg-target.c.inc
366
+++ b/tcg/ppc/tcg-target.c.inc
367
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
368
.out_rri = tgen_addi,
369
};
370
371
+static void tgen_and(TCGContext *s, TCGType type,
372
+ TCGReg a0, TCGReg a1, TCGReg a2)
373
+{
374
+ tcg_out32(s, AND | SAB(a1, a0, a2));
375
+}
376
+
377
+static void tgen_andi(TCGContext *s, TCGType type,
378
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
379
+{
380
+ if (type == TCG_TYPE_I32) {
381
+ tcg_out_andi32(s, a0, a1, a2);
382
+ } else {
383
+ tcg_out_andi64(s, a0, a1, a2);
384
+ }
385
+}
386
+
387
+static const TCGOutOpBinary outop_and = {
388
+ .base.static_constraint = C_O1_I2(r, r, ri),
389
+ .out_rrr = tgen_and,
390
+ .out_rri = tgen_andi,
391
+};
392
+
393
394
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
395
const TCGArg args[TCG_MAX_OP_ARGS],
396
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
397
}
398
break;
399
400
- case INDEX_op_and_i32:
401
- a0 = args[0], a1 = args[1], a2 = args[2];
402
- if (const_args[2]) {
403
- tcg_out_andi32(s, a0, a1, a2);
404
- } else {
405
- tcg_out32(s, AND | SAB(a1, a0, a2));
406
- }
407
- break;
408
- case INDEX_op_and_i64:
409
- a0 = args[0], a1 = args[1], a2 = args[2];
410
- if (const_args[2]) {
411
- tcg_out_andi64(s, a0, a1, a2);
412
- } else {
413
- tcg_out32(s, AND | SAB(a1, a0, a2));
414
- }
415
- break;
416
case INDEX_op_or_i64:
417
case INDEX_op_or_i32:
418
a0 = args[0], a1 = args[1], a2 = args[2];
419
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
420
case INDEX_op_st_i64:
421
return C_O0_I2(r, r);
422
423
- case INDEX_op_and_i32:
424
case INDEX_op_or_i32:
425
case INDEX_op_xor_i32:
426
case INDEX_op_andc_i32:
427
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
428
case INDEX_op_sar_i32:
429
case INDEX_op_rotl_i32:
430
case INDEX_op_rotr_i32:
431
- case INDEX_op_and_i64:
432
case INDEX_op_andc_i64:
433
case INDEX_op_shl_i64:
434
case INDEX_op_shr_i64:
435
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
436
index XXXXXXX..XXXXXXX 100644
437
--- a/tcg/riscv/tcg-target.c.inc
438
+++ b/tcg/riscv/tcg-target.c.inc
439
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
440
.out_rri = tgen_addi,
441
};
442
443
+static void tgen_and(TCGContext *s, TCGType type,
444
+ TCGReg a0, TCGReg a1, TCGReg a2)
445
+{
446
+ tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
447
+}
448
+
449
+static void tgen_andi(TCGContext *s, TCGType type,
450
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
451
+{
452
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
453
+}
454
+
455
+static const TCGOutOpBinary outop_and = {
456
+ .base.static_constraint = C_O1_I2(r, r, rI),
457
+ .out_rrr = tgen_and,
458
+ .out_rri = tgen_andi,
459
+};
460
+
461
462
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
463
const TCGArg args[TCG_MAX_OP_ARGS],
464
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
465
}
466
break;
467
468
- case INDEX_op_and_i32:
469
- case INDEX_op_and_i64:
470
- if (c2) {
471
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
472
- } else {
473
- tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
474
- }
475
- break;
476
-
477
case INDEX_op_or_i32:
478
case INDEX_op_or_i64:
479
if (c2) {
480
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
481
case INDEX_op_st_i64:
482
return C_O0_I2(rz, r);
483
484
- case INDEX_op_and_i32:
485
case INDEX_op_or_i32:
486
case INDEX_op_xor_i32:
487
- case INDEX_op_and_i64:
488
case INDEX_op_or_i64:
489
case INDEX_op_xor_i64:
490
case INDEX_op_setcond_i32:
491
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
492
index XXXXXXX..XXXXXXX 100644
493
--- a/tcg/s390x/tcg-target.c.inc
494
+++ b/tcg/s390x/tcg-target.c.inc
495
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
496
.out_rri = tgen_addi,
497
};
498
499
+static void tgen_and(TCGContext *s, TCGType type,
500
+ TCGReg a0, TCGReg a1, TCGReg a2)
501
+{
502
+ if (type != TCG_TYPE_I32) {
503
+ tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
504
+ } else if (a0 == a1) {
505
+ tcg_out_insn(s, RR, NR, a0, a2);
506
+ } else {
507
+ tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
508
+ }
509
+}
510
+
511
+static void tgen_andi_3(TCGContext *s, TCGType type,
512
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
513
+{
514
+ tcg_out_mov(s, type, a0, a1);
515
+ tgen_andi(s, type, a0, a2);
516
+}
517
+
518
+static const TCGOutOpBinary outop_and = {
519
+ .base.static_constraint = C_O1_I2(r, r, rNKR),
520
+ .out_rrr = tgen_and,
521
+ .out_rri = tgen_andi_3,
522
+};
523
+
524
525
# define OP_32_64(x) \
526
case glue(glue(INDEX_op_,x),_i32): \
527
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
528
}
529
break;
530
531
- case INDEX_op_and_i32:
532
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
533
- if (const_args[2]) {
534
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
535
- tgen_andi(s, TCG_TYPE_I32, a0, a2);
536
- } else if (a0 == a1) {
537
- tcg_out_insn(s, RR, NR, a0, a2);
538
- } else {
539
- tcg_out_insn(s, RRFa, NRK, a0, a1, a2);
540
- }
541
- break;
542
case INDEX_op_or_i32:
543
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
544
if (const_args[2]) {
545
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
546
}
547
break;
548
549
- case INDEX_op_and_i64:
550
- a0 = args[0], a1 = args[1], a2 = args[2];
551
- if (const_args[2]) {
552
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
553
- tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
554
- } else {
555
- tcg_out_insn(s, RRFa, NGRK, a0, a1, a2);
556
- }
557
- break;
558
case INDEX_op_or_i64:
559
a0 = args[0], a1 = args[1], a2 = args[2];
560
if (const_args[2]) {
561
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
562
563
case INDEX_op_sub_i32:
564
case INDEX_op_sub_i64:
565
- case INDEX_op_and_i32:
566
case INDEX_op_or_i32:
567
case INDEX_op_xor_i32:
568
return C_O1_I2(r, r, ri);
569
- case INDEX_op_and_i64:
570
- return C_O1_I2(r, r, rNKR);
571
case INDEX_op_or_i64:
572
case INDEX_op_xor_i64:
573
return C_O1_I2(r, r, rK);
574
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
575
index XXXXXXX..XXXXXXX 100644
576
--- a/tcg/sparc64/tcg-target.c.inc
577
+++ b/tcg/sparc64/tcg-target.c.inc
578
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
579
.out_rri = tgen_addi,
580
};
581
582
+static void tgen_and(TCGContext *s, TCGType type,
583
+ TCGReg a0, TCGReg a1, TCGReg a2)
584
+{
585
+ tcg_out_arith(s, a0, a1, a2, ARITH_AND);
586
+}
587
+
588
+static void tgen_andi(TCGContext *s, TCGType type,
589
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
590
+{
591
+ tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
592
+}
593
+
594
+static const TCGOutOpBinary outop_and = {
595
+ .base.static_constraint = C_O1_I2(r, r, rJ),
596
+ .out_rrr = tgen_and,
597
+ .out_rri = tgen_andi,
598
+};
599
+
600
601
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
602
const TCGArg args[TCG_MAX_OP_ARGS],
603
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
604
OP_32_64(sub):
605
c = ARITH_SUB;
606
goto gen_arith;
607
- OP_32_64(and):
608
- c = ARITH_AND;
609
- goto gen_arith;
610
OP_32_64(andc):
611
c = ARITH_ANDN;
612
goto gen_arith;
613
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
614
case INDEX_op_divu_i64:
615
case INDEX_op_sub_i32:
616
case INDEX_op_sub_i64:
617
- case INDEX_op_and_i32:
618
- case INDEX_op_and_i64:
619
case INDEX_op_andc_i32:
620
case INDEX_op_andc_i64:
621
case INDEX_op_or_i32:
622
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
623
index XXXXXXX..XXXXXXX 100644
624
--- a/tcg/tci/tcg-target.c.inc
625
+++ b/tcg/tci/tcg-target.c.inc
626
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
627
case INDEX_op_sub_i64:
628
case INDEX_op_mul_i32:
629
case INDEX_op_mul_i64:
630
- case INDEX_op_and_i32:
631
- case INDEX_op_and_i64:
632
case INDEX_op_andc_i32:
633
case INDEX_op_andc_i64:
634
case INDEX_op_eqv_i32:
635
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
636
.out_rrr = tgen_add,
637
};
638
639
+static void tgen_and(TCGContext *s, TCGType type,
640
+ TCGReg a0, TCGReg a1, TCGReg a2)
641
+{
642
+ tcg_out_op_rrr(s, glue(INDEX_op_and_i,TCG_TARGET_REG_BITS), a0, a1, a2);
643
+}
644
+
645
+static const TCGOutOpBinary outop_and = {
646
+ .base.static_constraint = C_O1_I2(r, r, r),
647
+ .out_rrr = tgen_and,
648
+};
649
+
650
651
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
652
const TCGArg args[TCG_MAX_OP_ARGS],
653
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
654
655
CASE_32_64(sub)
656
CASE_32_64(mul)
657
- CASE_32_64(and)
658
CASE_32_64(or)
659
CASE_32_64(xor)
660
CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
661
--
69
--
662
2.43.0
70
2.17.2
663
71
664
72
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
From: "Emilio G. Cota" <cota@braap.org>
2
3
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Emilio G. Cota <cota@braap.org>
6
Message-Id: <20181009174557.16125-5-cota@braap.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
tcg/optimize.c | 25 +++++++++++++++++++++++--
9
accel/tcg/cputlb.c | 4 ++--
5
1 file changed, 23 insertions(+), 2 deletions(-)
10
1 file changed, 2 insertions(+), 2 deletions(-)
6
11
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
14
--- a/accel/tcg/cputlb.c
10
+++ b/tcg/optimize.c
15
+++ b/accel/tcg/cputlb.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@
12
static bool fold_orc(OptContext *ctx, TCGOp *op)
17
} \
13
{
18
} while (0)
14
uint64_t s_mask;
19
15
+ TempOptInfo *t1, *t2;
20
-#define assert_cpu_is_self(this_cpu) do { \
16
21
+#define assert_cpu_is_self(cpu) do { \
17
if (fold_const2(ctx, op) ||
22
if (DEBUG_TLB_GATE) { \
18
fold_xx_to_i(ctx, op, -1) ||
23
- g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \
19
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
24
+ g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
20
return true;
25
} \
21
}
26
} while (0)
22
23
- s_mask = arg_info(op->args[1])->s_mask
24
- & arg_info(op->args[2])->s_mask;
25
+ t2 = arg_info(op->args[2]);
26
+ if (ti_is_const(t2)) {
27
+ /* Fold orc r,x,i to or r,x,~i. */
28
+ switch (ctx->type) {
29
+ case TCG_TYPE_I32:
30
+ case TCG_TYPE_I64:
31
+ op->opc = INDEX_op_or;
32
+ break;
33
+ case TCG_TYPE_V64:
34
+ case TCG_TYPE_V128:
35
+ case TCG_TYPE_V256:
36
+ op->opc = INDEX_op_or_vec;
37
+ break;
38
+ default:
39
+ g_assert_not_reached();
40
+ }
41
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
42
+ return fold_or(ctx, op);
43
+ }
44
+
45
+ t1 = arg_info(op->args[1]);
46
+ s_mask = t1->s_mask & t2->s_mask;
47
return fold_masks_s(ctx, op, s_mask);
48
}
49
27
50
--
28
--
51
2.43.0
29
2.17.2
52
30
53
31
diff view generated by jsdifflib
1
Split these functions out from tcg_out_op.
1
From: "Emilio G. Cota" <cota@braap.org>
2
Define outop_goto_ptr generically.
2
3
Call tcg_out_goto_ptr from tcg_reg_alloc_op.
3
Currently we rely on atomic operations for cross-CPU invalidations.
4
4
There are two cases that these atomics miss: cross-CPU invalidations
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
can race with either (1) vCPU threads flushing their TLB, which
6
happens via memset, or (2) vCPUs calling tlb_reset_dirty on their TLB,
7
which updates .addr_write with a regular store. This results in
8
undefined behaviour, since we're mixing regular and atomic ops
9
on concurrent accesses.
10
11
Fix it by using tlb_lock, a per-vCPU lock. All updaters of tlb_table
12
and the corresponding victim cache now hold the lock.
13
The readers that do not hold tlb_lock must use atomic reads when
14
reading .addr_write, since this field can be updated by other threads;
15
the conversion to atomic reads is done in the next patch.
16
17
Note that an alternative fix would be to expand the use of atomic ops.
18
However, in the case of TLB flushes this would have a huge performance
19
impact, since (1) TLB flushes can happen very frequently and (2) we
20
currently use a full memory barrier to flush each TLB entry, and a TLB
21
has many entries. Instead, acquiring the lock is barely slower than a
22
full memory barrier since it is uncontended, and with a single lock
23
acquisition we can flush the entire TLB.
24
25
Tested-by: Alex Bennée <alex.bennee@linaro.org>
26
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
27
Signed-off-by: Emilio G. Cota <cota@braap.org>
28
Message-Id: <20181009174557.16125-6-cota@braap.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
29
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
30
---
8
tcg/tcg.c | 12 ++++++++++++
31
include/exec/cpu-defs.h | 3 +
9
tcg/aarch64/tcg-target.c.inc | 12 +++++-------
32
accel/tcg/cputlb.c | 155 ++++++++++++++++++++++------------------
10
tcg/arm/tcg-target.c.inc | 12 +++++-------
33
2 files changed, 87 insertions(+), 71 deletions(-)
11
tcg/i386/tcg-target.c.inc | 13 ++++++-------
34
12
tcg/loongarch64/tcg-target.c.inc | 12 +++++-------
35
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
13
tcg/mips/tcg-target.c.inc | 22 ++++++++++------------
14
tcg/ppc/tcg-target.c.inc | 15 +++++++--------
15
tcg/riscv/tcg-target.c.inc | 12 +++++-------
16
tcg/s390x/tcg-target.c.inc | 15 +++++----------
17
tcg/sparc64/tcg-target.c.inc | 14 ++++++--------
18
tcg/tci/tcg-target.c.inc | 12 +++++-------
19
11 files changed, 71 insertions(+), 80 deletions(-)
20
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
22
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/tcg.c
37
--- a/include/exec/cpu-defs.h
24
+++ b/tcg/tcg.c
38
+++ b/include/exec/cpu-defs.h
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
39
@@ -XXX,XX +XXX,XX @@
26
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
27
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
28
static void tcg_out_goto_tb(TCGContext *s, int which);
29
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg dest);
30
static void tcg_out_mb(TCGContext *s, unsigned bar);
31
static void tcg_out_br(TCGContext *s, TCGLabel *l);
32
static void tcg_out_set_carry(TCGContext *s);
33
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_extrl_i64_i32 = {
34
};
35
#endif
40
#endif
36
41
37
+static const TCGOutOp outop_goto_ptr = {
42
#include "qemu/host-utils.h"
38
+ .static_constraint = C_O0_I1(r),
43
+#include "qemu/thread.h"
39
+};
44
#include "qemu/queue.h"
40
+
45
#ifdef CONFIG_TCG
41
/*
46
#include "tcg-target.h"
42
* Register V as the TCGOutOp for O.
47
@@ -XXX,XX +XXX,XX @@ typedef struct CPUIOTLBEntry {
43
* This verifies that V is of type T, otherwise give a nice compiler error.
48
44
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
49
#define CPU_COMMON_TLB \
45
OUTOP(INDEX_op_subb1o, TCGOutOpAddSubCarry, outop_subbio),
50
/* The meaning of the MMU modes is defined in the target code. */ \
46
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
51
+ /* tlb_lock serializes updates to tlb_table and tlb_v_table */ \
47
52
+ QemuSpin tlb_lock; \
48
+ [INDEX_op_goto_ptr] = &outop_goto_ptr,
53
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
49
+
54
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
50
#if TCG_TARGET_REG_BITS == 32
55
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
51
OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
56
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
52
OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
53
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
54
g_assert_not_reached();
55
#endif
56
57
+ case INDEX_op_goto_ptr:
58
+ tcg_debug_assert(!const_args[0]);
59
+ tcg_out_goto_ptr(s, new_args[0]);
60
+ break;
61
+
62
default:
63
if (def->flags & TCG_OPF_VECTOR) {
64
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
65
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
66
index XXXXXXX..XXXXXXX 100644
57
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/aarch64/tcg-target.c.inc
58
--- a/accel/tcg/cputlb.c
68
+++ b/tcg/aarch64/tcg-target.c.inc
59
+++ b/accel/tcg/cputlb.c
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
60
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
70
tcg_out_bti(s, BTI_J);
61
71
}
62
void tlb_init(CPUState *cpu)
72
63
{
73
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
64
+ CPUArchState *env = cpu->env_ptr;
74
+{
65
+
75
+ tcg_out_insn(s, 3207, BR, a0);
66
+ qemu_spin_init(&env->tlb_lock);
76
+}
67
}
77
+
68
78
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
69
/* flush_all_helper: run fn across all cpus
79
uintptr_t jmp_rx, uintptr_t jmp_rw)
70
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_nocheck(CPUState *cpu)
80
{
71
atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
72
tlb_debug("(count: %zu)\n", tlb_flush_count());
82
TCGArg a2 = args[2];
73
83
74
+ /*
84
switch (opc) {
75
+ * tlb_table/tlb_v_table updates from any thread must hold tlb_lock.
85
- case INDEX_op_goto_ptr:
76
+ * However, updates from the owner thread (as is the case here; see the
86
- tcg_out_insn(s, 3207, BR, a0);
77
+ * above assert_cpu_is_self) do not need atomic_set because all reads
87
- break;
78
+ * that do not hold the lock are performed by the same owner thread.
79
+ */
80
+ qemu_spin_lock(&env->tlb_lock);
81
memset(env->tlb_table, -1, sizeof(env->tlb_table));
82
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
83
+ qemu_spin_unlock(&env->tlb_lock);
84
+
85
cpu_tb_jmp_cache_clear(cpu);
86
87
env->vtlb_index = 0;
88
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
89
90
tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
91
92
+ qemu_spin_lock(&env->tlb_lock);
93
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
94
95
if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
96
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
97
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
98
}
99
}
100
+ qemu_spin_unlock(&env->tlb_lock);
101
102
cpu_tb_jmp_cache_clear(cpu);
103
104
@@ -XXX,XX +XXX,XX @@ static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
105
tlb_hit_page(tlb_entry->addr_code, page);
106
}
107
108
-static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page)
109
+/* Called with tlb_lock held */
110
+static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
111
+ target_ulong page)
112
{
113
if (tlb_hit_page_anyprot(tlb_entry, page)) {
114
memset(tlb_entry, -1, sizeof(*tlb_entry));
115
}
116
}
117
118
-static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx,
119
- target_ulong page)
120
+/* Called with tlb_lock held */
121
+static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
122
+ target_ulong page)
123
{
124
int k;
125
+
126
+ assert_cpu_is_self(ENV_GET_CPU(env));
127
for (k = 0; k < CPU_VTLB_SIZE; k++) {
128
- tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page);
129
+ tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page);
130
}
131
}
132
133
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
134
135
addr &= TARGET_PAGE_MASK;
136
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
137
+ qemu_spin_lock(&env->tlb_lock);
138
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
139
- tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
140
- tlb_flush_vtlb_page(env, mmu_idx, addr);
141
+ tlb_flush_entry_locked(&env->tlb_table[mmu_idx][i], addr);
142
+ tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
143
}
144
+ qemu_spin_unlock(&env->tlb_lock);
145
146
tb_flush_jmp_cache(cpu, addr);
147
}
148
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
149
tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
150
page, addr, mmu_idx_bitmap);
151
152
+ qemu_spin_lock(&env->tlb_lock);
153
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
154
if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
155
- tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
156
- tlb_flush_vtlb_page(env, mmu_idx, addr);
157
+ tlb_flush_entry_locked(&env->tlb_table[mmu_idx][page], addr);
158
+ tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
159
}
160
}
161
+ qemu_spin_unlock(&env->tlb_lock);
162
163
tb_flush_jmp_cache(cpu, addr);
164
}
165
@@ -XXX,XX +XXX,XX @@ void tlb_unprotect_code(ram_addr_t ram_addr)
166
* most usual is detecting writes to code regions which may invalidate
167
* generated code.
168
*
169
- * Because we want other vCPUs to respond to changes straight away we
170
- * update the te->addr_write field atomically. If the TLB entry has
171
- * been changed by the vCPU in the mean time we skip the update.
172
+ * Other vCPUs might be reading their TLBs during guest execution, so we update
173
+ * te->addr_write with atomic_set. We don't need to worry about this for
174
+ * oversized guests as MTTCG is disabled for them.
175
*
176
- * As this function uses atomic accesses we also need to ensure
177
- * updates to tlb_entries follow the same access rules. We don't need
178
- * to worry about this for oversized guests as MTTCG is disabled for
179
- * them.
180
+ * Called with tlb_lock held.
181
*/
88
-
182
-
89
case INDEX_op_ld8u_i32:
183
-static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
90
case INDEX_op_ld8u_i64:
184
- uintptr_t length)
91
tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
185
+static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
92
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
186
+ uintptr_t start, uintptr_t length)
93
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
187
{
94
{
188
-#if TCG_OVERSIZED_GUEST
95
switch (op) {
189
uintptr_t addr = tlb_entry->addr_write;
96
- case INDEX_op_goto_ptr:
190
97
- return C_O0_I1(r);
191
if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
192
addr &= TARGET_PAGE_MASK;
193
addr += tlb_entry->addend;
194
if ((addr - start) < length) {
195
+#if TCG_OVERSIZED_GUEST
196
tlb_entry->addr_write |= TLB_NOTDIRTY;
197
- }
198
- }
199
#else
200
- /* paired with atomic_mb_set in tlb_set_page_with_attrs */
201
- uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
202
- uintptr_t addr = orig_addr;
98
-
203
-
99
case INDEX_op_ld8u_i32:
204
- if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
100
case INDEX_op_ld8s_i32:
205
- addr &= TARGET_PAGE_MASK;
101
case INDEX_op_ld16u_i32:
206
- addr += atomic_read(&tlb_entry->addend);
102
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
207
- if ((addr - start) < length) {
103
index XXXXXXX..XXXXXXX 100644
208
- uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
104
--- a/tcg/arm/tcg-target.c.inc
209
- atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
105
+++ b/tcg/arm/tcg-target.c.inc
210
+ atomic_set(&tlb_entry->addr_write,
106
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
211
+ tlb_entry->addr_write | TLB_NOTDIRTY);
107
set_jmp_reset_offset(s, which);
212
+#endif
108
}
213
}
109
214
}
110
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
215
-#endif
111
+{
216
}
112
+ tcg_out_b_reg(s, COND_AL, a0);
217
113
+}
218
-/* For atomic correctness when running MTTCG we need to use the right
114
+
219
- * primitives when copying entries */
115
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
220
-static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
116
uintptr_t jmp_rx, uintptr_t jmp_rw)
221
- bool atomic_set)
117
{
222
+/*
118
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
223
+ * Called with tlb_lock held.
119
const int const_args[TCG_MAX_OP_ARGS])
224
+ * Called only from the vCPU context, i.e. the TLB's owner thread.
120
{
225
+ */
121
switch (opc) {
226
+static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
122
- case INDEX_op_goto_ptr:
227
{
123
- tcg_out_b_reg(s, COND_AL, args[0]);
228
-#if TCG_OVERSIZED_GUEST
124
- break;
229
*d = *s;
230
-#else
231
- if (atomic_set) {
232
- d->addr_read = s->addr_read;
233
- d->addr_code = s->addr_code;
234
- atomic_set(&d->addend, atomic_read(&s->addend));
235
- /* Pairs with flag setting in tlb_reset_dirty_range */
236
- atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
237
- } else {
238
- d->addr_read = s->addr_read;
239
- d->addr_write = atomic_read(&s->addr_write);
240
- d->addr_code = s->addr_code;
241
- d->addend = atomic_read(&s->addend);
242
- }
243
-#endif
244
}
245
246
/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
247
- * the target vCPU). As such care needs to be taken that we don't
248
- * dangerously race with another vCPU update. The only thing actually
249
- * updated is the target TLB entry ->addr_write flags.
250
+ * the target vCPU).
251
+ * We must take tlb_lock to avoid racing with another vCPU update. The only
252
+ * thing actually updated is the target TLB entry ->addr_write flags.
253
*/
254
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
255
{
256
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
257
int mmu_idx;
258
259
env = cpu->env_ptr;
260
+ qemu_spin_lock(&env->tlb_lock);
261
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
262
unsigned int i;
263
264
for (i = 0; i < CPU_TLB_SIZE; i++) {
265
- tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
266
- start1, length);
267
+ tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
268
+ length);
269
}
270
271
for (i = 0; i < CPU_VTLB_SIZE; i++) {
272
- tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
273
- start1, length);
274
+ tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
275
+ length);
276
}
277
}
278
+ qemu_spin_unlock(&env->tlb_lock);
279
}
280
281
-static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
282
+/* Called with tlb_lock held */
283
+static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
284
+ target_ulong vaddr)
285
{
286
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
287
tlb_entry->addr_write = vaddr;
288
@@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
289
290
vaddr &= TARGET_PAGE_MASK;
291
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
292
+ qemu_spin_lock(&env->tlb_lock);
293
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
294
- tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
295
+ tlb_set_dirty1_locked(&env->tlb_table[mmu_idx][i], vaddr);
296
}
297
298
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
299
int k;
300
for (k = 0; k < CPU_VTLB_SIZE; k++) {
301
- tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
302
+ tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
303
}
304
}
305
+ qemu_spin_unlock(&env->tlb_lock);
306
}
307
308
/* Our TLB does not support large pages, so remember the area covered by
309
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
310
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
311
}
312
313
- /* Make sure there's no cached translation for the new page. */
314
- tlb_flush_vtlb_page(env, mmu_idx, vaddr_page);
125
-
315
-
126
case INDEX_op_ld8u_i32:
316
code_address = address;
127
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
317
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
128
break;
318
paddr_page, xlat, prot, &address);
129
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
319
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
130
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
320
index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
131
{
321
te = &env->tlb_table[mmu_idx][index];
132
switch (op) {
322
133
- case INDEX_op_goto_ptr:
323
+ /*
134
- return C_O0_I1(r);
324
+ * Hold the TLB lock for the rest of the function. We could acquire/release
135
-
325
+ * the lock several times in the function, but it is faster to amortize the
136
case INDEX_op_ld8u_i32:
326
+ * acquisition cost by acquiring it just once. Note that this leads to
137
case INDEX_op_ld8s_i32:
327
+ * a longer critical section, but this is not a concern since the TLB lock
138
case INDEX_op_ld16u_i32:
328
+ * is unlikely to be contended.
139
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
329
+ */
140
index XXXXXXX..XXXXXXX 100644
330
+ qemu_spin_lock(&env->tlb_lock);
141
--- a/tcg/i386/tcg-target.c.inc
331
+
142
+++ b/tcg/i386/tcg-target.c.inc
332
+ /* Make sure there's no cached translation for the new page. */
143
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
333
+ tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
144
set_jmp_reset_offset(s, which);
334
+
145
}
335
/*
146
336
* Only evict the old entry to the victim tlb if it's for a
147
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
337
* different page; otherwise just overwrite the stale data.
148
+{
338
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
149
+ /* Jump to the given host address (could be epilogue) */
339
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
150
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
340
151
+}
341
/* Evict the old entry into the victim tlb. */
152
+
342
- copy_tlb_helper(tv, te, true);
153
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
343
+ copy_tlb_helper_locked(tv, te);
154
uintptr_t jmp_rx, uintptr_t jmp_rw)
344
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
155
{
345
}
156
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
346
157
rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
347
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
158
348
}
159
switch (opc) {
349
}
160
- case INDEX_op_goto_ptr:
350
161
- /* jmp to the given host address (could be epilogue) */
351
- /* Pairs with flag setting in tlb_reset_dirty_range */
162
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
352
- copy_tlb_helper(te, &tn, true);
163
- break;
353
- /* atomic_mb_set(&te->addr_write, write_address); */
164
OP_32_64(ld8u):
354
+ copy_tlb_helper_locked(te, &tn);
165
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
355
+ qemu_spin_unlock(&env->tlb_lock);
166
tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
356
}
167
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
357
168
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
358
/* Add a new TLB entry, but without specifying the memory
169
{
359
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
170
switch (op) {
360
size_t elt_ofs, target_ulong page)
171
- case INDEX_op_goto_ptr:
361
{
172
- return C_O0_I1(r);
362
size_t vidx;
173
-
363
+
174
case INDEX_op_ld8u_i32:
364
+ assert_cpu_is_self(ENV_GET_CPU(env));
175
case INDEX_op_ld8u_i64:
365
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
176
case INDEX_op_ld8s_i32:
366
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
177
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
367
target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
178
index XXXXXXX..XXXXXXX 100644
368
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
179
--- a/tcg/loongarch64/tcg-target.c.inc
369
/* Found entry in victim tlb, swap tlb and iotlb. */
180
+++ b/tcg/loongarch64/tcg-target.c.inc
370
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
181
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
371
182
set_jmp_reset_offset(s, which);
372
- copy_tlb_helper(&tmptlb, tlb, false);
183
}
373
- copy_tlb_helper(tlb, vtlb, true);
184
374
- copy_tlb_helper(vtlb, &tmptlb, true);
185
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
375
+ qemu_spin_lock(&env->tlb_lock);
186
+{
376
+ copy_tlb_helper_locked(&tmptlb, tlb);
187
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
377
+ copy_tlb_helper_locked(tlb, vtlb);
188
+}
378
+ copy_tlb_helper_locked(vtlb, &tmptlb);
189
+
379
+ qemu_spin_unlock(&env->tlb_lock);
190
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
380
191
uintptr_t jmp_rx, uintptr_t jmp_rw)
381
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
192
{
382
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
193
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
194
TCGArg a3 = args[3];
195
196
switch (opc) {
197
- case INDEX_op_goto_ptr:
198
- tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
199
- break;
200
-
201
case INDEX_op_ld8s_i32:
202
case INDEX_op_ld8s_i64:
203
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
204
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
205
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
206
{
207
switch (op) {
208
- case INDEX_op_goto_ptr:
209
- return C_O0_I1(r);
210
-
211
case INDEX_op_st8_i32:
212
case INDEX_op_st8_i64:
213
case INDEX_op_st16_i32:
214
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/mips/tcg-target.c.inc
217
+++ b/tcg/mips/tcg-target.c.inc
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
219
}
220
}
221
222
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
223
+{
224
+ tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
225
+ if (TCG_TARGET_REG_BITS == 64) {
226
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
227
+ } else {
228
+ tcg_out_nop(s);
229
+ }
230
+}
231
+
232
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
233
uintptr_t jmp_rx, uintptr_t jmp_rw)
234
{
235
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
236
a2 = args[2];
237
238
switch (opc) {
239
- case INDEX_op_goto_ptr:
240
- /* jmp to the given host address (could be epilogue) */
241
- tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
242
- if (TCG_TARGET_REG_BITS == 64) {
243
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
244
- } else {
245
- tcg_out_nop(s);
246
- }
247
- break;
248
case INDEX_op_ld8u_i32:
249
case INDEX_op_ld8u_i64:
250
i1 = OPC_LBU;
251
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
252
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
253
{
254
switch (op) {
255
- case INDEX_op_goto_ptr:
256
- return C_O0_I1(r);
257
-
258
case INDEX_op_ld8u_i32:
259
case INDEX_op_ld8s_i32:
260
case INDEX_op_ld16u_i32:
261
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
262
index XXXXXXX..XXXXXXX 100644
263
--- a/tcg/ppc/tcg-target.c.inc
264
+++ b/tcg/ppc/tcg-target.c.inc
265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
266
set_jmp_reset_offset(s, which);
267
}
268
269
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
270
+{
271
+ tcg_out32(s, MTSPR | RS(a0) | CTR);
272
+ tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
273
+ tcg_out32(s, BCCTR | BO_ALWAYS);
274
+}
275
+
276
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
277
uintptr_t jmp_rx, uintptr_t jmp_rw)
278
{
279
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
280
const int const_args[TCG_MAX_OP_ARGS])
281
{
282
switch (opc) {
283
- case INDEX_op_goto_ptr:
284
- tcg_out32(s, MTSPR | RS(args[0]) | CTR);
285
- tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
286
- tcg_out32(s, BCCTR | BO_ALWAYS);
287
- break;
288
case INDEX_op_ld8u_i32:
289
case INDEX_op_ld8u_i64:
290
tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
291
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
292
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
293
{
294
switch (op) {
295
- case INDEX_op_goto_ptr:
296
- return C_O0_I1(r);
297
-
298
case INDEX_op_ld8u_i32:
299
case INDEX_op_ld8s_i32:
300
case INDEX_op_ld16u_i32:
301
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
302
index XXXXXXX..XXXXXXX 100644
303
--- a/tcg/riscv/tcg-target.c.inc
304
+++ b/tcg/riscv/tcg-target.c.inc
305
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
306
set_jmp_reset_offset(s, which);
307
}
308
309
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
310
+{
311
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
312
+}
313
+
314
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
315
uintptr_t jmp_rx, uintptr_t jmp_rw)
316
{
317
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
318
TCGArg a2 = args[2];
319
320
switch (opc) {
321
- case INDEX_op_goto_ptr:
322
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
323
- break;
324
-
325
case INDEX_op_ld8u_i32:
326
case INDEX_op_ld8u_i64:
327
tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
328
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
329
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
330
{
331
switch (op) {
332
- case INDEX_op_goto_ptr:
333
- return C_O0_I1(r);
334
-
335
case INDEX_op_ld8u_i32:
336
case INDEX_op_ld8s_i32:
337
case INDEX_op_ld16u_i32:
338
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
339
index XXXXXXX..XXXXXXX 100644
340
--- a/tcg/s390x/tcg-target.c.inc
341
+++ b/tcg/s390x/tcg-target.c.inc
342
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
343
set_jmp_reset_offset(s, which);
344
}
345
346
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
347
+{
348
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
349
+}
350
+
351
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
352
uintptr_t jmp_rx, uintptr_t jmp_rw)
353
{
354
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
355
const TCGArg args[TCG_MAX_OP_ARGS],
356
const int const_args[TCG_MAX_OP_ARGS])
357
{
358
- TCGArg a0;
359
-
360
switch (opc) {
361
- case INDEX_op_goto_ptr:
362
- a0 = args[0];
363
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
364
- break;
365
-
366
OP_32_64(ld8u):
367
/* ??? LLC (RXY format) is only present with the extended-immediate
368
facility, whereas LLGC is always present. */
369
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
370
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
371
{
372
switch (op) {
373
- case INDEX_op_goto_ptr:
374
- return C_O0_I1(r);
375
-
376
case INDEX_op_ld8u_i32:
377
case INDEX_op_ld8u_i64:
378
case INDEX_op_ld8s_i32:
379
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
380
index XXXXXXX..XXXXXXX 100644
381
--- a/tcg/sparc64/tcg-target.c.inc
382
+++ b/tcg/sparc64/tcg-target.c.inc
383
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
384
}
385
}
386
387
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
388
+{
389
+ tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
390
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
391
+}
392
+
393
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
394
uintptr_t jmp_rx, uintptr_t jmp_rw)
395
{
396
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
397
a2 = args[2];
398
399
switch (opc) {
400
- case INDEX_op_goto_ptr:
401
- tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
402
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
403
- break;
404
-
405
#define OP_32_64(x) \
406
glue(glue(case INDEX_op_, x), _i32): \
407
glue(glue(case INDEX_op_, x), _i64)
408
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
409
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
410
{
411
switch (op) {
412
- case INDEX_op_goto_ptr:
413
- return C_O0_I1(r);
414
-
415
case INDEX_op_ld8u_i32:
416
case INDEX_op_ld8u_i64:
417
case INDEX_op_ld8s_i32:
418
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
419
index XXXXXXX..XXXXXXX 100644
420
--- a/tcg/tci/tcg-target.c.inc
421
+++ b/tcg/tci/tcg-target.c.inc
422
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
423
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
424
{
425
switch (op) {
426
- case INDEX_op_goto_ptr:
427
- return C_O0_I1(r);
428
-
429
case INDEX_op_ld8u_i32:
430
case INDEX_op_ld8s_i32:
431
case INDEX_op_ld16u_i32:
432
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
433
set_jmp_reset_offset(s, which);
434
}
435
436
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
437
+{
438
+ tcg_out_op_r(s, INDEX_op_goto_ptr, a0);
439
+}
440
+
441
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
442
uintptr_t jmp_rx, uintptr_t jmp_rw)
443
{
444
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
445
const int const_args[TCG_MAX_OP_ARGS])
446
{
447
switch (opc) {
448
- case INDEX_op_goto_ptr:
449
- tcg_out_op_r(s, opc, args[0]);
450
- break;
451
-
452
CASE_32_64(ld8u)
453
CASE_32_64(ld8s)
454
CASE_32_64(ld16u)
455
--
383
--
456
2.43.0
384
2.17.2
385
386
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Isolate the computation of an index from an address into a
2
helper before we change that function.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
[ cota: convert tlb_vaddr_to_host; use atomic_read on addr_write ]
7
Signed-off-by: Emilio G. Cota <cota@braap.org>
8
Message-Id: <20181009175129.17888-2-cota@braap.org>
3
---
9
---
4
tcg/aarch64/tcg-target-has.h | 2 --
10
accel/tcg/softmmu_template.h | 64 +++++++++++++++++---------------
5
tcg/arm/tcg-target-has.h | 1 -
11
include/exec/cpu_ldst.h | 19 ++++++++--
6
tcg/i386/tcg-target-has.h | 2 --
12
include/exec/cpu_ldst_template.h | 25 +++++++------
7
tcg/loongarch64/tcg-target-has.h | 2 --
13
accel/tcg/cputlb.c | 60 ++++++++++++++----------------
8
tcg/mips/tcg-target-has.h | 2 --
14
4 files changed, 90 insertions(+), 78 deletions(-)
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/tcg-op.c | 4 ++--
16
tcg/tcg.c | 8 ++++----
17
tcg/aarch64/tcg-target.c.inc | 17 +++++++++++-----
18
tcg/arm/tcg-target.c.inc | 4 ++++
19
tcg/i386/tcg-target.c.inc | 4 ++++
20
tcg/loongarch64/tcg-target.c.inc | 24 +++++++++++++---------
21
tcg/mips/tcg-target.c.inc | 34 +++++++++++++++++---------------
22
tcg/ppc/tcg-target.c.inc | 21 +++++++++++---------
23
tcg/riscv/tcg-target.c.inc | 19 +++++++++++-------
24
tcg/s390x/tcg-target.c.inc | 4 ++++
25
tcg/sparc64/tcg-target.c.inc | 4 ++++
26
tcg/tci/tcg-target.c.inc | 4 ++++
27
23 files changed, 95 insertions(+), 72 deletions(-)
28
15
29
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
16
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
30
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/aarch64/tcg-target-has.h
18
--- a/accel/tcg/softmmu_template.h
32
+++ b/tcg/aarch64/tcg-target-has.h
19
+++ b/accel/tcg/softmmu_template.h
33
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
34
#define TCG_TARGET_HAS_sub2_i32 1
21
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
35
#define TCG_TARGET_HAS_mulu2_i32 0
22
TCGMemOpIdx oi, uintptr_t retaddr)
36
#define TCG_TARGET_HAS_muls2_i32 0
23
{
37
-#define TCG_TARGET_HAS_mulsh_i32 0
24
- unsigned mmu_idx = get_mmuidx(oi);
38
#define TCG_TARGET_HAS_extr_i64_i32 0
25
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
39
#define TCG_TARGET_HAS_qemu_st8_i32 0
26
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
40
27
+ uintptr_t mmu_idx = get_mmuidx(oi);
41
@@ -XXX,XX +XXX,XX @@
28
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
42
#define TCG_TARGET_HAS_sub2_i64 1
29
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
43
#define TCG_TARGET_HAS_mulu2_i64 0
30
+ target_ulong tlb_addr = entry->ADDR_READ;
44
#define TCG_TARGET_HAS_muls2_i64 0
31
unsigned a_bits = get_alignment_bits(get_memop(oi));
45
-#define TCG_TARGET_HAS_mulsh_i64 1
32
uintptr_t haddr;
46
33
DATA_TYPE res;
47
/*
34
@@ -XXX,XX +XXX,XX @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
48
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
35
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
49
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
36
mmu_idx, retaddr);
37
}
38
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
39
+ tlb_addr = entry->ADDR_READ;
40
}
41
42
/* Handle an IO access. */
43
@@ -XXX,XX +XXX,XX @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
44
return res;
45
}
46
47
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
48
+ haddr = addr + entry->addend;
49
#if DATA_SIZE == 1
50
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
51
#else
52
@@ -XXX,XX +XXX,XX @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
53
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
54
TCGMemOpIdx oi, uintptr_t retaddr)
55
{
56
- unsigned mmu_idx = get_mmuidx(oi);
57
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
58
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
59
+ uintptr_t mmu_idx = get_mmuidx(oi);
60
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
61
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
62
+ target_ulong tlb_addr = entry->ADDR_READ;
63
unsigned a_bits = get_alignment_bits(get_memop(oi));
64
uintptr_t haddr;
65
DATA_TYPE res;
66
@@ -XXX,XX +XXX,XX @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
67
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
68
mmu_idx, retaddr);
69
}
70
- tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
71
+ tlb_addr = entry->ADDR_READ;
72
}
73
74
/* Handle an IO access. */
75
@@ -XXX,XX +XXX,XX @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
76
return res;
77
}
78
79
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
80
+ haddr = addr + entry->addend;
81
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
82
return res;
83
}
84
@@ -XXX,XX +XXX,XX @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
85
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
86
TCGMemOpIdx oi, uintptr_t retaddr)
87
{
88
- unsigned mmu_idx = get_mmuidx(oi);
89
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
90
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
91
+ uintptr_t mmu_idx = get_mmuidx(oi);
92
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
93
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
94
+ target_ulong tlb_addr = entry->addr_write;
95
unsigned a_bits = get_alignment_bits(get_memop(oi));
96
uintptr_t haddr;
97
98
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
99
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
100
mmu_idx, retaddr);
101
}
102
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
103
+ tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
104
}
105
106
/* Handle an IO access. */
107
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
108
if (DATA_SIZE > 1
109
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
110
>= TARGET_PAGE_SIZE)) {
111
- int i, index2;
112
- target_ulong page2, tlb_addr2;
113
+ int i;
114
+ target_ulong page2;
115
+ CPUTLBEntry *entry2;
116
do_unaligned_access:
117
/* Ensure the second page is in the TLB. Note that the first page
118
is already guaranteed to be filled, and that the second page
119
cannot evict the first. */
120
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
121
- index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
122
- tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
123
- if (!tlb_hit_page(tlb_addr2, page2)
124
+ entry2 = tlb_entry(env, mmu_idx, page2);
125
+ if (!tlb_hit_page(entry2->addr_write, page2)
126
&& !VICTIM_TLB_HIT(addr_write, page2)) {
127
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
128
mmu_idx, retaddr);
129
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
130
return;
131
}
132
133
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
134
+ haddr = addr + entry->addend;
135
#if DATA_SIZE == 1
136
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
137
#else
138
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
139
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
140
TCGMemOpIdx oi, uintptr_t retaddr)
141
{
142
- unsigned mmu_idx = get_mmuidx(oi);
143
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
145
+ uintptr_t mmu_idx = get_mmuidx(oi);
146
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
147
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
148
+ target_ulong tlb_addr = entry->addr_write;
149
unsigned a_bits = get_alignment_bits(get_memop(oi));
150
uintptr_t haddr;
151
152
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
153
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
154
mmu_idx, retaddr);
155
}
156
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
157
+ tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
158
}
159
160
/* Handle an IO access. */
161
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
162
if (DATA_SIZE > 1
163
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
164
>= TARGET_PAGE_SIZE)) {
165
- int i, index2;
166
- target_ulong page2, tlb_addr2;
167
+ int i;
168
+ target_ulong page2;
169
+ CPUTLBEntry *entry2;
170
do_unaligned_access:
171
/* Ensure the second page is in the TLB. Note that the first page
172
is already guaranteed to be filled, and that the second page
173
cannot evict the first. */
174
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
175
- index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
176
- tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
177
- if (!tlb_hit_page(tlb_addr2, page2)
178
+ entry2 = tlb_entry(env, mmu_idx, page2);
179
+ if (!tlb_hit_page(entry2->addr_write, page2)
180
&& !VICTIM_TLB_HIT(addr_write, page2)) {
181
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
182
mmu_idx, retaddr);
183
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
184
return;
185
}
186
187
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
188
+ haddr = addr + entry->addend;
189
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
190
}
191
#endif /* DATA_SIZE > 1 */
192
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
50
index XXXXXXX..XXXXXXX 100644
193
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/arm/tcg-target-has.h
194
--- a/include/exec/cpu_ldst.h
52
+++ b/tcg/arm/tcg-target-has.h
195
+++ b/include/exec/cpu_ldst.h
53
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
196
@@ -XXX,XX +XXX,XX @@ extern __thread uintptr_t helper_retaddr;
54
#define TCG_TARGET_HAS_negsetcond_i32 1
197
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
55
#define TCG_TARGET_HAS_mulu2_i32 1
198
#include "tcg.h"
56
#define TCG_TARGET_HAS_muls2_i32 1
199
57
-#define TCG_TARGET_HAS_mulsh_i32 0
200
+/* Find the TLB index corresponding to the mmu_idx + address pair. */
58
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
201
+static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
59
#define TCG_TARGET_HAS_rem_i32 0
202
+ target_ulong addr)
60
#define TCG_TARGET_HAS_qemu_st8_i32 0
61
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tcg/i386/tcg-target-has.h
64
+++ b/tcg/i386/tcg-target-has.h
65
@@ -XXX,XX +XXX,XX @@
66
#define TCG_TARGET_HAS_sub2_i32 1
67
#define TCG_TARGET_HAS_mulu2_i32 1
68
#define TCG_TARGET_HAS_muls2_i32 1
69
-#define TCG_TARGET_HAS_mulsh_i32 0
70
71
#if TCG_TARGET_REG_BITS == 64
72
/* Keep 32-bit values zero-extended in a register. */
73
@@ -XXX,XX +XXX,XX @@
74
#define TCG_TARGET_HAS_sub2_i64 1
75
#define TCG_TARGET_HAS_mulu2_i64 1
76
#define TCG_TARGET_HAS_muls2_i64 1
77
-#define TCG_TARGET_HAS_mulsh_i64 0
78
#define TCG_TARGET_HAS_qemu_st8_i32 0
79
#else
80
#define TCG_TARGET_HAS_qemu_st8_i32 1
81
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/loongarch64/tcg-target-has.h
84
+++ b/tcg/loongarch64/tcg-target-has.h
85
@@ -XXX,XX +XXX,XX @@
86
#define TCG_TARGET_HAS_sub2_i32 0
87
#define TCG_TARGET_HAS_mulu2_i32 0
88
#define TCG_TARGET_HAS_muls2_i32 0
89
-#define TCG_TARGET_HAS_mulsh_i32 1
90
#define TCG_TARGET_HAS_bswap16_i32 1
91
#define TCG_TARGET_HAS_bswap32_i32 1
92
#define TCG_TARGET_HAS_clz_i32 1
93
@@ -XXX,XX +XXX,XX @@
94
#define TCG_TARGET_HAS_sub2_i64 0
95
#define TCG_TARGET_HAS_mulu2_i64 0
96
#define TCG_TARGET_HAS_muls2_i64 0
97
-#define TCG_TARGET_HAS_mulsh_i64 1
98
99
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
100
101
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/mips/tcg-target-has.h
104
+++ b/tcg/mips/tcg-target-has.h
105
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
106
#define TCG_TARGET_HAS_rem_i32 1
107
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
108
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
109
-#define TCG_TARGET_HAS_mulsh_i32 1
110
#define TCG_TARGET_HAS_bswap16_i32 1
111
#define TCG_TARGET_HAS_bswap32_i32 1
112
#define TCG_TARGET_HAS_negsetcond_i32 0
113
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
114
#define TCG_TARGET_HAS_sub2_i64 0
115
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
116
#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
117
-#define TCG_TARGET_HAS_mulsh_i64 1
118
#define TCG_TARGET_HAS_ext32s_i64 1
119
#define TCG_TARGET_HAS_ext32u_i64 1
120
#define TCG_TARGET_HAS_negsetcond_i64 0
121
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/ppc/tcg-target-has.h
124
+++ b/tcg/ppc/tcg-target-has.h
125
@@ -XXX,XX +XXX,XX @@
126
#define TCG_TARGET_HAS_negsetcond_i32 1
127
#define TCG_TARGET_HAS_mulu2_i32 0
128
#define TCG_TARGET_HAS_muls2_i32 0
129
-#define TCG_TARGET_HAS_mulsh_i32 1
130
#define TCG_TARGET_HAS_qemu_st8_i32 0
131
132
#if TCG_TARGET_REG_BITS == 64
133
@@ -XXX,XX +XXX,XX @@
134
#define TCG_TARGET_HAS_sub2_i64 1
135
#define TCG_TARGET_HAS_mulu2_i64 0
136
#define TCG_TARGET_HAS_muls2_i64 0
137
-#define TCG_TARGET_HAS_mulsh_i64 1
138
#endif
139
140
#define TCG_TARGET_HAS_qemu_ldst_i128 \
141
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
142
index XXXXXXX..XXXXXXX 100644
143
--- a/tcg/riscv/tcg-target-has.h
144
+++ b/tcg/riscv/tcg-target-has.h
145
@@ -XXX,XX +XXX,XX @@
146
#define TCG_TARGET_HAS_sub2_i32 1
147
#define TCG_TARGET_HAS_mulu2_i32 0
148
#define TCG_TARGET_HAS_muls2_i32 0
149
-#define TCG_TARGET_HAS_mulsh_i32 0
150
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
151
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
152
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
153
@@ -XXX,XX +XXX,XX @@
154
#define TCG_TARGET_HAS_sub2_i64 1
155
#define TCG_TARGET_HAS_mulu2_i64 0
156
#define TCG_TARGET_HAS_muls2_i64 0
157
-#define TCG_TARGET_HAS_mulsh_i64 1
158
159
#define TCG_TARGET_HAS_qemu_ldst_i128 0
160
161
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
162
index XXXXXXX..XXXXXXX 100644
163
--- a/tcg/s390x/tcg-target-has.h
164
+++ b/tcg/s390x/tcg-target-has.h
165
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
166
#define TCG_TARGET_HAS_sub2_i32 1
167
#define TCG_TARGET_HAS_mulu2_i32 0
168
#define TCG_TARGET_HAS_muls2_i32 0
169
-#define TCG_TARGET_HAS_mulsh_i32 0
170
#define TCG_TARGET_HAS_extr_i64_i32 0
171
#define TCG_TARGET_HAS_qemu_st8_i32 0
172
173
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
174
#define TCG_TARGET_HAS_sub2_i64 1
175
#define TCG_TARGET_HAS_mulu2_i64 1
176
#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
177
-#define TCG_TARGET_HAS_mulsh_i64 0
178
179
#define TCG_TARGET_HAS_qemu_ldst_i128 1
180
181
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
182
index XXXXXXX..XXXXXXX 100644
183
--- a/tcg/sparc64/tcg-target-has.h
184
+++ b/tcg/sparc64/tcg-target-has.h
185
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
186
#define TCG_TARGET_HAS_sub2_i32 1
187
#define TCG_TARGET_HAS_mulu2_i32 1
188
#define TCG_TARGET_HAS_muls2_i32 1
189
-#define TCG_TARGET_HAS_mulsh_i32 0
190
#define TCG_TARGET_HAS_qemu_st8_i32 0
191
192
#define TCG_TARGET_HAS_extr_i64_i32 0
193
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
194
#define TCG_TARGET_HAS_sub2_i64 1
195
#define TCG_TARGET_HAS_mulu2_i64 0
196
#define TCG_TARGET_HAS_muls2_i64 0
197
-#define TCG_TARGET_HAS_mulsh_i64 0
198
199
#define TCG_TARGET_HAS_qemu_ldst_i128 0
200
201
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/tcg-has.h
204
+++ b/tcg/tcg-has.h
205
@@ -XXX,XX +XXX,XX @@
206
#define TCG_TARGET_HAS_sub2_i64 0
207
#define TCG_TARGET_HAS_mulu2_i64 0
208
#define TCG_TARGET_HAS_muls2_i64 0
209
-#define TCG_TARGET_HAS_mulsh_i64 0
210
/* Turn some undef macros into true macros. */
211
#define TCG_TARGET_HAS_add2_i32 1
212
#define TCG_TARGET_HAS_sub2_i32 1
213
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tci/tcg-target-has.h
216
+++ b/tcg/tci/tcg-target-has.h
217
@@ -XXX,XX +XXX,XX @@
218
#define TCG_TARGET_HAS_rot_i32 1
219
#define TCG_TARGET_HAS_negsetcond_i32 0
220
#define TCG_TARGET_HAS_muls2_i32 1
221
-#define TCG_TARGET_HAS_mulsh_i32 0
222
#define TCG_TARGET_HAS_qemu_st8_i32 0
223
224
#if TCG_TARGET_REG_BITS == 64
225
@@ -XXX,XX +XXX,XX @@
226
#define TCG_TARGET_HAS_add2_i64 1
227
#define TCG_TARGET_HAS_sub2_i64 1
228
#define TCG_TARGET_HAS_mulu2_i64 1
229
-#define TCG_TARGET_HAS_mulsh_i64 0
230
#else
231
#define TCG_TARGET_HAS_mulu2_i32 1
232
#endif /* TCG_TARGET_REG_BITS == 64 */
233
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/tcg-op.c
236
+++ b/tcg/tcg-op.c
237
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
238
{
239
if (TCG_TARGET_HAS_muls2_i32) {
240
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
241
- } else if (TCG_TARGET_HAS_mulsh_i32) {
242
+ } else if (tcg_op_supported(INDEX_op_mulsh_i32, TCG_TYPE_I32, 0)) {
243
TCGv_i32 t = tcg_temp_ebb_new_i32();
244
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
245
tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
246
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
247
{
248
if (TCG_TARGET_HAS_muls2_i64) {
249
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
250
- } else if (TCG_TARGET_HAS_mulsh_i64) {
251
+ } else if (tcg_op_supported(INDEX_op_mulsh_i64, TCG_TYPE_I64, 0)) {
252
TCGv_i64 t = tcg_temp_ebb_new_i64();
253
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
254
tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
255
diff --git a/tcg/tcg.c b/tcg/tcg.c
256
index XXXXXXX..XXXXXXX 100644
257
--- a/tcg/tcg.c
258
+++ b/tcg/tcg.c
259
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
260
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
261
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
262
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
263
+ OUTOP(INDEX_op_mulsh_i32, TCGOutOpBinary, outop_mulsh),
264
+ OUTOP(INDEX_op_mulsh_i64, TCGOutOpBinary, outop_mulsh),
265
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
266
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
267
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
268
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
269
return TCG_TARGET_HAS_mulu2_i32;
270
case INDEX_op_muls2_i32:
271
return TCG_TARGET_HAS_muls2_i32;
272
- case INDEX_op_mulsh_i32:
273
- return TCG_TARGET_HAS_mulsh_i32;
274
case INDEX_op_bswap16_i32:
275
return TCG_TARGET_HAS_bswap16_i32;
276
case INDEX_op_bswap32_i32:
277
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
278
return TCG_TARGET_HAS_mulu2_i64;
279
case INDEX_op_muls2_i64:
280
return TCG_TARGET_HAS_muls2_i64;
281
- case INDEX_op_mulsh_i64:
282
- return TCG_TARGET_HAS_mulsh_i64;
283
284
case INDEX_op_mov_vec:
285
case INDEX_op_dup_vec:
286
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
287
case INDEX_op_andc:
288
case INDEX_op_eqv:
289
case INDEX_op_mul:
290
+ case INDEX_op_mulsh_i32:
291
+ case INDEX_op_mulsh_i64:
292
case INDEX_op_muluh:
293
case INDEX_op_nand:
294
case INDEX_op_nor:
295
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/aarch64/tcg-target.c.inc
298
+++ b/tcg/aarch64/tcg-target.c.inc
299
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
300
return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented;
301
}
302
303
+static void tgen_mulsh(TCGContext *s, TCGType type,
304
+ TCGReg a0, TCGReg a1, TCGReg a2)
305
+{
203
+{
306
+ tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
204
+ return (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
307
+}
205
+}
308
+
206
+
309
+static const TCGOutOpBinary outop_mulsh = {
207
+/* Find the TLB entry corresponding to the mmu_idx + address pair. */
310
+ .base.static_constraint = C_Dynamic,
208
+static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
311
+ .base.dynamic_constraint = cset_mulh,
209
+ target_ulong addr)
312
+ .out_rrr = tgen_mulsh,
313
+};
314
+
315
static void tgen_muluh(TCGContext *s, TCGType type,
316
TCGReg a0, TCGReg a1, TCGReg a2)
317
{
318
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
319
args[5], const_args[4], const_args[5], true);
320
break;
321
322
- case INDEX_op_mulsh_i64:
323
- tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
324
- break;
325
-
326
case INDEX_op_mb:
327
tcg_out_mb(s, a0);
328
break;
329
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
330
case INDEX_op_rem_i64:
331
case INDEX_op_remu_i32:
332
case INDEX_op_remu_i64:
333
- case INDEX_op_mulsh_i64:
334
return C_O1_I2(r, r, r);
335
336
case INDEX_op_shl_i32:
337
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
338
index XXXXXXX..XXXXXXX 100644
339
--- a/tcg/arm/tcg-target.c.inc
340
+++ b/tcg/arm/tcg-target.c.inc
341
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
342
.out_rrr = tgen_mul,
343
};
344
345
+static const TCGOutOpBinary outop_mulsh = {
346
+ .base.static_constraint = C_NotImplemented,
347
+};
348
+
349
static const TCGOutOpBinary outop_muluh = {
350
.base.static_constraint = C_NotImplemented,
351
};
352
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/i386/tcg-target.c.inc
355
+++ b/tcg/i386/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
357
.out_rri = tgen_muli,
358
};
359
360
+static const TCGOutOpBinary outop_mulsh = {
361
+ .base.static_constraint = C_NotImplemented,
362
+};
363
+
364
static const TCGOutOpBinary outop_muluh = {
365
.base.static_constraint = C_NotImplemented,
366
};
367
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
368
index XXXXXXX..XXXXXXX 100644
369
--- a/tcg/loongarch64/tcg-target.c.inc
370
+++ b/tcg/loongarch64/tcg-target.c.inc
371
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
372
.out_rrr = tgen_mul,
373
};
374
375
+static void tgen_mulsh(TCGContext *s, TCGType type,
376
+ TCGReg a0, TCGReg a1, TCGReg a2)
377
+{
210
+{
378
+ if (type == TCG_TYPE_I32) {
211
+ return &env->tlb_table[mmu_idx][tlb_index(env, mmu_idx, addr)];
379
+ tcg_out_opc_mulh_w(s, a0, a1, a2);
380
+ } else {
381
+ tcg_out_opc_mulh_d(s, a0, a1, a2);
382
+ }
383
+}
212
+}
384
+
213
+
385
+static const TCGOutOpBinary outop_mulsh = {
214
#ifdef MMU_MODE0_SUFFIX
386
+ .base.static_constraint = C_O1_I2(r, r, r),
215
#define CPU_MMU_INDEX 0
387
+ .out_rrr = tgen_mulsh,
216
#define MEMSUFFIX MMU_MODE0_SUFFIX
388
+};
217
@@ -XXX,XX +XXX,XX @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
389
+
218
#if defined(CONFIG_USER_ONLY)
390
static void tgen_muluh(TCGContext *s, TCGType type,
219
return g2h(addr);
391
TCGReg a0, TCGReg a1, TCGReg a2)
220
#else
392
{
221
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
393
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
222
- CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
394
}
223
+ CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
395
break;
224
abi_ptr tlb_addr;
396
225
uintptr_t haddr;
397
- case INDEX_op_mulsh_i32:
226
398
- tcg_out_opc_mulh_w(s, a0, a1, a2);
227
@@ -XXX,XX +XXX,XX @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
399
- break;
228
return NULL;
400
- case INDEX_op_mulsh_i64:
229
}
401
- tcg_out_opc_mulh_d(s, a0, a1, a2);
230
402
- break;
231
- haddr = addr + env->tlb_table[mmu_idx][index].addend;
403
-
232
+ haddr = addr + tlbentry->addend;
404
case INDEX_op_div_i32:
233
return (void *)haddr;
405
tcg_out_opc_div_w(s, a0, a1, a2);
234
#endif /* defined(CONFIG_USER_ONLY) */
406
break;
235
}
407
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
236
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
408
case INDEX_op_setcond_i64:
409
return C_O1_I2(r, rz, rJ);
410
411
- case INDEX_op_mulsh_i32:
412
- case INDEX_op_mulsh_i64:
413
case INDEX_op_div_i32:
414
case INDEX_op_div_i64:
415
case INDEX_op_divu_i32:
416
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
417
index XXXXXXX..XXXXXXX 100644
237
index XXXXXXX..XXXXXXX 100644
418
--- a/tcg/mips/tcg-target.c.inc
238
--- a/include/exec/cpu_ldst_template.h
419
+++ b/tcg/mips/tcg-target.c.inc
239
+++ b/include/exec/cpu_ldst_template.h
420
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
240
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
421
.out_rrr = tgen_mul,
241
target_ulong ptr,
422
};
242
uintptr_t retaddr)
423
243
{
424
+static void tgen_mulsh(TCGContext *s, TCGType type,
244
- int page_index;
425
+ TCGReg a0, TCGReg a1, TCGReg a2)
245
+ CPUTLBEntry *entry;
426
+{
246
RES_TYPE res;
427
+ if (use_mips32r6_instructions) {
247
target_ulong addr;
428
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUH : OPC_DMUH;
248
int mmu_idx;
429
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
249
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
430
+ } else {
250
#endif
431
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT;
251
432
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
252
addr = ptr;
433
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
253
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
434
+ }
254
mmu_idx = CPU_MMU_INDEX;
435
+}
255
- if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
436
+
256
+ entry = tlb_entry(env, mmu_idx, addr);
437
+static const TCGOutOpBinary outop_mulsh = {
257
+ if (unlikely(entry->ADDR_READ !=
438
+ .base.static_constraint = C_O1_I2(r, r, r),
258
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
439
+ .out_rrr = tgen_mulsh,
259
oi = make_memop_idx(SHIFT, mmu_idx);
440
+};
260
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
441
+
261
oi, retaddr);
442
static void tgen_muluh(TCGContext *s, TCGType type,
262
} else {
443
TCGReg a0, TCGReg a1, TCGReg a2)
263
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
444
{
264
+ uintptr_t hostaddr = addr + entry->addend;
445
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
265
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
446
tcg_out_ldst(s, i1, a0, a1, a2);
266
}
447
break;
267
return res;
448
268
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
449
- case INDEX_op_mulsh_i32:
269
target_ulong ptr,
450
- if (use_mips32r6_instructions) {
270
uintptr_t retaddr)
451
- tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
271
{
452
- break;
272
- int res, page_index;
453
- }
273
+ CPUTLBEntry *entry;
454
- i1 = OPC_MULT, i2 = OPC_MFHI;
274
+ int res;
455
- goto do_hilo1;
275
target_ulong addr;
456
case INDEX_op_div_i32:
276
int mmu_idx;
457
if (use_mips32r6_instructions) {
277
TCGMemOpIdx oi;
458
tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
278
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
459
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
279
#endif
460
}
280
461
i1 = OPC_DIVU, i2 = OPC_MFHI;
281
addr = ptr;
462
goto do_hilo1;
282
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
463
- case INDEX_op_mulsh_i64:
283
mmu_idx = CPU_MMU_INDEX;
464
- if (use_mips32r6_instructions) {
284
- if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
465
- tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
285
+ entry = tlb_entry(env, mmu_idx, addr);
466
- break;
286
+ if (unlikely(entry->ADDR_READ !=
467
- }
287
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
468
- i1 = OPC_DMULT, i2 = OPC_MFHI;
288
oi = make_memop_idx(SHIFT, mmu_idx);
469
- goto do_hilo1;
289
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
470
case INDEX_op_div_i64:
290
MMUSUFFIX)(env, addr, oi, retaddr);
471
if (use_mips32r6_instructions) {
291
} else {
472
tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
292
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
473
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
293
+ uintptr_t hostaddr = addr + entry->addend;
474
case INDEX_op_st_i64:
294
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
475
return C_O0_I2(rz, r);
295
}
476
296
return res;
477
- case INDEX_op_mulsh_i32:
297
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
478
case INDEX_op_div_i32:
298
target_ulong ptr,
479
case INDEX_op_divu_i32:
299
RES_TYPE v, uintptr_t retaddr)
480
case INDEX_op_rem_i32:
300
{
481
case INDEX_op_remu_i32:
301
- int page_index;
482
case INDEX_op_setcond_i32:
302
+ CPUTLBEntry *entry;
483
- case INDEX_op_mulsh_i64:
303
target_ulong addr;
484
case INDEX_op_div_i64:
304
int mmu_idx;
485
case INDEX_op_divu_i64:
305
TCGMemOpIdx oi;
486
case INDEX_op_rem_i64:
306
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
487
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
307
#endif
308
309
addr = ptr;
310
- page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
311
mmu_idx = CPU_MMU_INDEX;
312
- if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
313
+ entry = tlb_entry(env, mmu_idx, addr);
314
+ if (unlikely(entry->addr_write !=
315
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
316
oi = make_memop_idx(SHIFT, mmu_idx);
317
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
318
retaddr);
319
} else {
320
- uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
321
+ uintptr_t hostaddr = addr + entry->addend;
322
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
323
}
324
}
325
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
488
index XXXXXXX..XXXXXXX 100644
326
index XXXXXXX..XXXXXXX 100644
489
--- a/tcg/ppc/tcg-target.c.inc
327
--- a/accel/tcg/cputlb.c
490
+++ b/tcg/ppc/tcg-target.c.inc
328
+++ b/accel/tcg/cputlb.c
491
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
329
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
492
.out_rri = tgen_muli,
330
{
493
};
331
CPUArchState *env = cpu->env_ptr;
494
332
target_ulong addr = (target_ulong) data.target_ptr;
495
+static void tgen_mulsh(TCGContext *s, TCGType type,
333
- int i;
496
+ TCGReg a0, TCGReg a1, TCGReg a2)
334
int mmu_idx;
497
+{
335
498
+ uint32_t insn = type == TCG_TYPE_I32 ? MULHW : MULHD;
336
assert_cpu_is_self(cpu);
499
+ tcg_out32(s, insn | TAB(a0, a1, a2));
337
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
500
+}
338
}
501
+
339
502
+static const TCGOutOpBinary outop_mulsh = {
340
addr &= TARGET_PAGE_MASK;
503
+ .base.static_constraint = C_O1_I2(r, r, r),
341
- i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
504
+ .out_rrr = tgen_mulsh,
342
qemu_spin_lock(&env->tlb_lock);
505
+};
343
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
506
+
344
- tlb_flush_entry_locked(&env->tlb_table[mmu_idx][i], addr);
507
static void tgen_muluh(TCGContext *s, TCGType type,
345
+ tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
508
TCGReg a0, TCGReg a1, TCGReg a2)
346
tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
509
{
347
}
510
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
348
qemu_spin_unlock(&env->tlb_lock);
511
}
349
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
512
break;
350
target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
513
351
target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
514
- case INDEX_op_mulsh_i32:
352
unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
515
- tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
353
- int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
516
- break;
354
int mmu_idx;
517
- case INDEX_op_mulsh_i64:
355
518
- tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
356
assert_cpu_is_self(cpu);
519
- break;
357
520
-
358
- tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
521
case INDEX_op_mb:
359
- page, addr, mmu_idx_bitmap);
522
tcg_out_mb(s, args[0]);
360
+ tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
523
break;
361
+ addr, mmu_idx_bitmap);
524
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
362
525
case INDEX_op_divu_i32:
363
qemu_spin_lock(&env->tlb_lock);
526
case INDEX_op_rem_i32:
364
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
527
case INDEX_op_remu_i32:
365
if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
528
- case INDEX_op_mulsh_i32:
366
- tlb_flush_entry_locked(&env->tlb_table[mmu_idx][page], addr);
529
case INDEX_op_div_i64:
367
+ tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr);
530
case INDEX_op_divu_i64:
368
tlb_flush_vtlb_page_locked(env, mmu_idx, addr);
531
case INDEX_op_rem_i64:
369
}
532
case INDEX_op_remu_i64:
370
}
533
- case INDEX_op_mulsh_i64:
371
@@ -XXX,XX +XXX,XX @@ static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
534
return C_O1_I2(r, r, r);
372
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
535
373
{
536
case INDEX_op_clz_i32:
374
CPUArchState *env = cpu->env_ptr;
537
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
375
- int i;
538
index XXXXXXX..XXXXXXX 100644
376
int mmu_idx;
539
--- a/tcg/riscv/tcg-target.c.inc
377
540
+++ b/tcg/riscv/tcg-target.c.inc
378
assert_cpu_is_self(cpu);
541
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
379
542
return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r);
380
vaddr &= TARGET_PAGE_MASK;
381
- i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
382
qemu_spin_lock(&env->tlb_lock);
383
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
384
- tlb_set_dirty1_locked(&env->tlb_table[mmu_idx][i], vaddr);
385
+ tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
386
}
387
388
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
389
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
390
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
391
paddr_page, xlat, prot, &address);
392
393
- index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
394
- te = &env->tlb_table[mmu_idx][index];
395
+ index = tlb_index(env, mmu_idx, vaddr_page);
396
+ te = tlb_entry(env, mmu_idx, vaddr_page);
397
398
/*
399
* Hold the TLB lock for the rest of the function. We could acquire/release
400
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
401
* repeat the MMU check here. This tlb_fill() call might
402
* longjump out if this access should cause a guest exception.
403
*/
404
- int index;
405
+ CPUTLBEntry *entry;
406
target_ulong tlb_addr;
407
408
tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
409
410
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
411
- tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
412
+ entry = tlb_entry(env, mmu_idx, addr);
413
+ tlb_addr = entry->addr_read;
414
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
415
/* RAM access */
416
- uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
417
+ uintptr_t haddr = addr + entry->addend;
418
419
return ldn_p((void *)haddr, size);
420
}
421
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
422
* repeat the MMU check here. This tlb_fill() call might
423
* longjump out if this access should cause a guest exception.
424
*/
425
- int index;
426
+ CPUTLBEntry *entry;
427
target_ulong tlb_addr;
428
429
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
430
431
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
432
- tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
433
+ entry = tlb_entry(env, mmu_idx, addr);
434
+ tlb_addr = entry->addr_write;
435
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
436
/* RAM access */
437
- uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend;
438
+ uintptr_t haddr = addr + entry->addend;
439
440
stn_p((void *)haddr, size, val);
441
return;
442
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
443
*/
444
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
445
{
446
- int mmu_idx, index;
447
+ uintptr_t mmu_idx = cpu_mmu_index(env, true);
448
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
449
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
450
void *p;
451
452
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
453
- mmu_idx = cpu_mmu_index(env, true);
454
- if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) {
455
+ if (unlikely(!tlb_hit(entry->addr_code, addr))) {
456
if (!VICTIM_TLB_HIT(addr_code, addr)) {
457
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
458
}
459
- assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
460
+ assert(tlb_hit(entry->addr_code, addr));
461
}
462
463
- if (unlikely(env->tlb_table[mmu_idx][index].addr_code &
464
- (TLB_RECHECK | TLB_MMIO))) {
465
+ if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
466
/*
467
* Return -1 if we can't translate and execute from an entire
468
* page of RAM here, which will cause us to execute by loading
469
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
470
return -1;
471
}
472
473
- p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
474
+ p = (void *)((uintptr_t)addr + entry->addend);
475
return qemu_ram_addr_from_host_nofail(p);
543
}
476
}
544
477
545
+static void tgen_mulsh(TCGContext *s, TCGType type,
478
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
546
+ TCGReg a0, TCGReg a1, TCGReg a2)
479
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
547
+{
480
uintptr_t retaddr)
548
+ tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
481
{
549
+}
482
- int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
550
+
483
- target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
551
+static const TCGOutOpBinary outop_mulsh = {
484
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
552
+ .base.static_constraint = C_Dynamic,
485
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
553
+ .base.dynamic_constraint = cset_mulh,
486
554
+ .out_rrr = tgen_mulsh,
487
- if (!tlb_hit(tlb_addr, addr)) {
555
+};
488
+ if (!tlb_hit(entry->addr_write, addr)) {
556
+
489
/* TLB entry is for a different page */
557
static void tgen_muluh(TCGContext *s, TCGType type,
490
if (!VICTIM_TLB_HIT(addr_write, addr)) {
558
TCGReg a0, TCGReg a1, TCGReg a2)
491
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
559
{
492
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
560
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
493
NotDirtyInfo *ndi)
561
tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
494
{
562
break;
495
size_t mmu_idx = get_mmuidx(oi);
563
496
- size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
564
- case INDEX_op_mulsh_i32:
497
- CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
565
- case INDEX_op_mulsh_i64:
498
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
566
- tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
499
+ CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
567
- break;
500
target_ulong tlb_addr = tlbe->addr_write;
568
-
501
TCGMemOp mop = get_memop(oi);
569
case INDEX_op_mb:
502
int a_bits = get_alignment_bits(mop);
570
tcg_out_mb(s, a0);
571
break;
572
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
573
case INDEX_op_negsetcond_i64:
574
return C_O1_I2(r, r, rI);
575
576
- case INDEX_op_mulsh_i32:
577
case INDEX_op_div_i32:
578
case INDEX_op_divu_i32:
579
case INDEX_op_rem_i32:
580
case INDEX_op_remu_i32:
581
- case INDEX_op_mulsh_i64:
582
case INDEX_op_div_i64:
583
case INDEX_op_divu_i64:
584
case INDEX_op_rem_i64:
585
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
586
index XXXXXXX..XXXXXXX 100644
587
--- a/tcg/s390x/tcg-target.c.inc
588
+++ b/tcg/s390x/tcg-target.c.inc
589
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
590
.out_rri = tgen_muli,
591
};
592
593
+static const TCGOutOpBinary outop_mulsh = {
594
+ .base.static_constraint = C_NotImplemented,
595
+};
596
+
597
static const TCGOutOpBinary outop_muluh = {
598
.base.static_constraint = C_NotImplemented,
599
};
600
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
601
index XXXXXXX..XXXXXXX 100644
602
--- a/tcg/sparc64/tcg-target.c.inc
603
+++ b/tcg/sparc64/tcg-target.c.inc
604
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
605
.out_rri = tgen_muli,
606
};
607
608
+static const TCGOutOpBinary outop_mulsh = {
609
+ .base.static_constraint = C_NotImplemented,
610
+};
611
+
612
static void tgen_muluh(TCGContext *s, TCGType type,
613
TCGReg a0, TCGReg a1, TCGReg a2)
614
{
615
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
616
index XXXXXXX..XXXXXXX 100644
617
--- a/tcg/tci/tcg-target.c.inc
618
+++ b/tcg/tci/tcg-target.c.inc
619
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
620
.out_rrr = tgen_mul,
621
};
622
623
+static const TCGOutOpBinary outop_mulsh = {
624
+ .base.static_constraint = C_NotImplemented,
625
+};
626
+
627
static const TCGOutOpBinary outop_muluh = {
628
.base.static_constraint = C_NotImplemented,
629
};
630
--
503
--
631
2.43.0
504
2.17.2
632
505
633
506
diff view generated by jsdifflib
1
Drop all backend support for an immediate as the first operand.
1
GCC7+ will no longer advertise support for 16-byte __atomic operations
2
This should never happen in any case, as we swap commutative
2
if only cmpxchg is supported, as for x86_64. Fortunately, x86_64 still
3
operands to place immediates as the second operand.
3
has support for __sync_compare_and_swap_16 and we can make use of that.
4
AArch64 does not have, nor ever has had such support, so open-code it.
4
5
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Emilio G. Cota <cota@braap.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/sparc64/tcg-target-con-set.h | 1 +
9
accel/tcg/atomic_template.h | 20 ++++-
9
tcg/tcg.c | 41 +++++++++++-
10
include/qemu/atomic128.h | 153 ++++++++++++++++++++++++++++++++++++
10
tcg/aarch64/tcg-target.c.inc | 51 +++++++-------
11
include/qemu/compiler.h | 11 +++
11
tcg/arm/tcg-target.c.inc | 43 ++++++++----
12
tcg/tcg.h | 16 ++--
12
tcg/i386/tcg-target.c.inc | 56 +++++++++-------
13
accel/tcg/cputlb.c | 3 +-
13
tcg/loongarch64/tcg-target.c.inc | 38 +++++------
14
accel/tcg/user-exec.c | 5 +-
14
tcg/mips/tcg-target.c.inc | 31 ++++++---
15
configure | 19 +++++
15
tcg/ppc/tcg-target.c.inc | 47 +++++++------
16
7 files changed, 213 insertions(+), 14 deletions(-)
16
tcg/riscv/tcg-target.c.inc | 39 ++++++-----
17
create mode 100644 include/qemu/atomic128.h
17
tcg/s390x/tcg-target.c.inc | 110 +++++++++++++++----------------
18
tcg/sparc64/tcg-target.c.inc | 25 +++++--
19
tcg/tci/tcg-target.c.inc | 15 ++++-
20
12 files changed, 302 insertions(+), 195 deletions(-)
21
18
22
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
19
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
23
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/sparc64/tcg-target-con-set.h
21
--- a/accel/tcg/atomic_template.h
25
+++ b/tcg/sparc64/tcg-target-con-set.h
22
+++ b/accel/tcg/atomic_template.h
26
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rz, r)
23
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
27
C_O0_I2(rz, rJ)
24
DATA_TYPE ret;
28
C_O1_I1(r, r)
25
29
C_O1_I2(r, r, r)
26
ATOMIC_TRACE_RMW;
30
+C_O1_I2(r, r, rJ)
27
+#if DATA_SIZE == 16
31
C_O1_I2(r, rz, rJ)
28
+ ret = atomic16_cmpxchg(haddr, cmpv, newv);
32
C_O1_I4(r, rz, rJ, rI, 0)
29
+#else
33
C_O2_I2(r, r, rz, rJ)
30
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
34
diff --git a/tcg/tcg.c b/tcg/tcg.c
31
+#endif
32
ATOMIC_MMU_CLEANUP;
33
return ret;
34
}
35
36
#if DATA_SIZE >= 16
37
+#if HAVE_ATOMIC128
38
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
39
{
40
ATOMIC_MMU_DECLS;
41
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
42
43
ATOMIC_TRACE_LD;
44
- __atomic_load(haddr, &val, __ATOMIC_RELAXED);
45
+ val = atomic16_read(haddr);
46
ATOMIC_MMU_CLEANUP;
47
return val;
48
}
49
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
50
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
51
52
ATOMIC_TRACE_ST;
53
- __atomic_store(haddr, &val, __ATOMIC_RELAXED);
54
+ atomic16_set(haddr, val);
55
ATOMIC_MMU_CLEANUP;
56
}
57
+#endif
58
#else
59
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
60
ABI_TYPE val EXTRA_ARGS)
61
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
62
DATA_TYPE ret;
63
64
ATOMIC_TRACE_RMW;
65
+#if DATA_SIZE == 16
66
+ ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
67
+#else
68
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
69
+#endif
70
ATOMIC_MMU_CLEANUP;
71
return BSWAP(ret);
72
}
73
74
#if DATA_SIZE >= 16
75
+#if HAVE_ATOMIC128
76
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
77
{
78
ATOMIC_MMU_DECLS;
79
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
80
81
ATOMIC_TRACE_LD;
82
- __atomic_load(haddr, &val, __ATOMIC_RELAXED);
83
+ val = atomic16_read(haddr);
84
ATOMIC_MMU_CLEANUP;
85
return BSWAP(val);
86
}
87
@@ -XXX,XX +XXX,XX @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
88
89
ATOMIC_TRACE_ST;
90
val = BSWAP(val);
91
- __atomic_store(haddr, &val, __ATOMIC_RELAXED);
92
+ atomic16_set(haddr, val);
93
ATOMIC_MMU_CLEANUP;
94
}
95
+#endif
96
#else
97
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
98
ABI_TYPE val EXTRA_ARGS)
99
diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h
100
new file mode 100644
101
index XXXXXXX..XXXXXXX
102
--- /dev/null
103
+++ b/include/qemu/atomic128.h
104
@@ -XXX,XX +XXX,XX @@
105
+/*
106
+ * Simple interface for 128-bit atomic operations.
107
+ *
108
+ * Copyright (C) 2018 Linaro, Ltd.
109
+ *
110
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
111
+ * See the COPYING file in the top-level directory.
112
+ *
113
+ * See docs/devel/atomics.txt for discussion about the guarantees each
114
+ * atomic primitive is meant to provide.
115
+ */
116
+
117
+#ifndef QEMU_ATOMIC128_H
118
+#define QEMU_ATOMIC128_H
119
+
120
+/*
121
+ * GCC is a house divided about supporting large atomic operations.
122
+ *
123
+ * For hosts that only have large compare-and-swap, a legalistic reading
124
+ * of the C++ standard means that one cannot implement __atomic_read on
125
+ * read-only memory, and thus all atomic operations must synchronize
126
+ * through libatomic.
127
+ *
128
+ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
129
+ *
130
+ * This interpretation is not especially helpful for QEMU.
131
+ * For softmmu, all RAM is always read/write from the hypervisor.
132
+ * For user-only, if the guest doesn't implement such an __atomic_read
133
+ * then the host need not worry about it either.
134
+ *
135
+ * Moreover, using libatomic is not an option, because its interface is
136
+ * built for std::atomic<T>, and requires that *all* accesses to such an
137
+ * object go through the library. In our case we do not have an object
138
+ * in the C/C++ sense, but a view of memory as seen by the guest.
139
+ * The guest may issue a large atomic operation and then access those
140
+ * pieces using word-sized accesses. From the hypervisor, we have no
141
+ * way to connect those two actions.
142
+ *
143
+ * Therefore, special case each platform.
144
+ */
145
+
146
+#if defined(CONFIG_ATOMIC128)
147
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
148
+{
149
+ return atomic_cmpxchg__nocheck(ptr, cmp, new);
150
+}
151
+# define HAVE_CMPXCHG128 1
152
+#elif defined(CONFIG_CMPXCHG128)
153
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
154
+{
155
+ return __sync_val_compare_and_swap_16(ptr, cmp, new);
156
+}
157
+# define HAVE_CMPXCHG128 1
158
+#elif defined(__aarch64__)
159
+/* Through gcc 8, aarch64 has no support for 128-bit at all. */
160
+static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
161
+{
162
+ uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp);
163
+ uint64_t newl = int128_getlo(new), newh = int128_gethi(new);
164
+ uint64_t oldl, oldh;
165
+ uint32_t tmp;
166
+
167
+ asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t"
168
+ "cmp %[oldl], %[cmpl]\n\t"
169
+ "ccmp %[oldh], %[cmph], #0, eq\n\t"
170
+ "b.ne 1f\n\t"
171
+ "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t"
172
+ "cbnz %w[tmp], 0b\n"
173
+ "1:"
174
+ : [mem] "+m"(*ptr), [tmp] "=&r"(tmp),
175
+ [oldl] "=&r"(oldl), [oldh] "=r"(oldh)
176
+ : [cmpl] "r"(cmpl), [cmph] "r"(cmph),
177
+ [newl] "r"(newl), [newh] "r"(newh)
178
+ : "memory", "cc");
179
+
180
+ return int128_make128(oldl, oldh);
181
+}
182
+# define HAVE_CMPXCHG128 1
183
+#else
184
+/* Fallback definition that must be optimized away, or error. */
185
+Int128 QEMU_ERROR("unsupported atomic")
186
+ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new);
187
+# define HAVE_CMPXCHG128 0
188
+#endif /* Some definition for HAVE_CMPXCHG128 */
189
+
190
+
191
+#if defined(CONFIG_ATOMIC128)
192
+static inline Int128 atomic16_read(Int128 *ptr)
193
+{
194
+ return atomic_read__nocheck(ptr);
195
+}
196
+
197
+static inline void atomic16_set(Int128 *ptr, Int128 val)
198
+{
199
+ atomic_set__nocheck(ptr, val);
200
+}
201
+
202
+# define HAVE_ATOMIC128 1
203
+#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__)
204
+/* We can do better than cmpxchg for AArch64. */
205
+static inline Int128 atomic16_read(Int128 *ptr)
206
+{
207
+ uint64_t l, h;
208
+ uint32_t tmp;
209
+
210
+ /* The load must be paired with the store to guarantee not tearing. */
211
+ asm("0: ldxp %[l], %[h], %[mem]\n\t"
212
+ "stxp %w[tmp], %[l], %[h], %[mem]\n\t"
213
+ "cbnz %w[tmp], 0b"
214
+ : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h));
215
+
216
+ return int128_make128(l, h);
217
+}
218
+
219
+static inline void atomic16_set(Int128 *ptr, Int128 val)
220
+{
221
+ uint64_t l = int128_getlo(val), h = int128_gethi(val);
222
+ uint64_t t1, t2;
223
+
224
+ /* Load into temporaries to acquire the exclusive access lock. */
225
+ asm("0: ldxp %[t1], %[t2], %[mem]\n\t"
226
+ "stxp %w[t1], %[l], %[h], %[mem]\n\t"
227
+ "cbnz %w[t1], 0b"
228
+ : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2)
229
+ : [l] "r"(l), [h] "r"(h));
230
+}
231
+
232
+# define HAVE_ATOMIC128 1
233
+#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
234
+static inline Int128 atomic16_read(Int128 *ptr)
235
+{
236
+ /* Maybe replace 0 with 0, returning the old value. */
237
+ return atomic16_cmpxchg(ptr, 0, 0);
238
+}
239
+
240
+static inline void atomic16_set(Int128 *ptr, Int128 val)
241
+{
242
+ Int128 old = *ptr, cmp;
243
+ do {
244
+ cmp = old;
245
+ old = atomic16_cmpxchg(ptr, cmp, val);
246
+ } while (old != cmp);
247
+}
248
+
249
+# define HAVE_ATOMIC128 1
250
+#else
251
+/* Fallback definitions that must be optimized away, or error. */
252
+Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr);
253
+void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val);
254
+# define HAVE_ATOMIC128 0
255
+#endif /* Some definition for HAVE_ATOMIC128 */
256
+
257
+#endif /* QEMU_ATOMIC128_H */
258
diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h
35
index XXXXXXX..XXXXXXX 100644
259
index XXXXXXX..XXXXXXX 100644
36
--- a/tcg/tcg.c
260
--- a/include/qemu/compiler.h
37
+++ b/tcg/tcg.c
261
+++ b/include/qemu/compiler.h
38
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOp {
262
@@ -XXX,XX +XXX,XX @@
39
TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags);
263
# define QEMU_FLATTEN
40
} TCGOutOp;
41
42
+typedef struct TCGOutOpBinary {
43
+ TCGOutOp base;
44
+ void (*out_rrr)(TCGContext *s, TCGType type,
45
+ TCGReg a0, TCGReg a1, TCGReg a2);
46
+ void (*out_rri)(TCGContext *s, TCGType type,
47
+ TCGReg a0, TCGReg a1, tcg_target_long a2);
48
+} TCGOutOpBinary;
49
+
50
#include "tcg-target.c.inc"
51
52
#ifndef CONFIG_TCG_INTERPRETER
53
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
54
< MIN_TLB_MASK_TABLE_OFS);
55
#endif
264
#endif
56
265
57
+/*
266
+/*
58
+ * Register V as the TCGOutOp for O.
267
+ * If __attribute__((error)) is present, use it to produce an error at
59
+ * This verifies that V is of type T, otherwise give a nice compiler error.
268
+ * compile time. Otherwise, one must wait for the linker to diagnose
60
+ * This prevents trivial mistakes within each arch/tcg-target.c.inc.
269
+ * the missing symbol.
61
+ */
270
+ */
62
+#define OUTOP(O, T, V) [O] = _Generic(V, T: &V.base)
271
+#if __has_attribute(error)
63
+
272
+# define QEMU_ERROR(X) __attribute__((error(X)))
64
/* Register allocation descriptions for every TCGOpcode. */
273
+#else
65
static const TCGOutOp * const all_outop[NB_OPS] = {
274
+# define QEMU_ERROR(X)
66
+ OUTOP(INDEX_op_add_i32, TCGOutOpBinary, outop_add),
275
+#endif
67
+ OUTOP(INDEX_op_add_i64, TCGOutOpBinary, outop_add),
276
+
68
};
277
/* Implement C11 _Generic via GCC builtins. Example:
69
278
*
70
+#undef OUTOP
279
* QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x)
71
+
280
diff --git a/tcg/tcg.h b/tcg/tcg.h
72
/*
73
* All TCG threads except the parent (i.e. the one that called tcg_context_init
74
* and registered the target's TCG globals) must register with this function
75
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
76
}
77
78
/* emit instruction */
79
+ TCGType type = TCGOP_TYPE(op);
80
switch (op->opc) {
81
case INDEX_op_ext_i32_i64:
82
tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
83
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
84
case INDEX_op_extrl_i64_i32:
85
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
86
break;
87
+
88
+ case INDEX_op_add_i32:
89
+ case INDEX_op_add_i64:
90
+ {
91
+ const TCGOutOpBinary *out =
92
+ container_of(all_outop[op->opc], TCGOutOpBinary, base);
93
+
94
+ /* Constants should never appear in the first source operand. */
95
+ tcg_debug_assert(!const_args[1]);
96
+ if (const_args[2]) {
97
+ out->out_rri(s, type, new_args[0], new_args[1], new_args[2]);
98
+ } else {
99
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
100
+ }
101
+ }
102
+ break;
103
+
104
default:
105
if (def->flags & TCG_OPF_VECTOR) {
106
- tcg_out_vec_op(s, op->opc, TCGOP_TYPE(op) - TCG_TYPE_V64,
107
+ tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
108
TCGOP_VECE(op), new_args, const_args);
109
} else {
110
- tcg_out_op(s, op->opc, TCGOP_TYPE(op), new_args, const_args);
111
+ tcg_out_op(s, op->opc, type, new_args, const_args);
112
}
113
break;
114
}
115
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
116
index XXXXXXX..XXXXXXX 100644
281
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/aarch64/tcg-target.c.inc
282
--- a/tcg/tcg.h
118
+++ b/tcg/aarch64/tcg-target.c.inc
283
+++ b/tcg/tcg.h
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
284
@@ -XXX,XX +XXX,XX @@
120
tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
285
#include "qemu/queue.h"
121
}
286
#include "tcg-mo.h"
122
287
#include "tcg-target.h"
123
-static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
288
+#include "qemu/int128.h"
124
- TCGReg rn, int64_t aimm)
289
125
-{
290
/* XXX: make safe guess about sizes */
126
- if (aimm >= 0) {
291
#define MAX_OP_PER_INSTR 266
127
- tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
292
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_ALL(xchg)
128
- } else {
293
#undef GEN_ATOMIC_HELPER
129
- tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
294
#endif /* CONFIG_SOFTMMU */
130
- }
295
131
-}
296
-#ifdef CONFIG_ATOMIC128
297
-#include "qemu/int128.h"
132
-
298
-
133
static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
299
-/* These aren't really a "proper" helpers because TCG cannot manage Int128.
134
TCGReg rh, TCGReg al, TCGReg ah,
300
- However, use the same format as the others, for use by the backends. */
135
tcg_target_long bl, tcg_target_long bh,
301
+/*
136
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
302
+ * These aren't really a "proper" helpers because TCG cannot manage Int128.
137
flush_idcache_range(jmp_rx, jmp_rw, 4);
303
+ * However, use the same format as the others, for use by the backends.
138
}
304
+ *
139
305
+ * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
140
+
306
+ * the ld/st functions are only defined if HAVE_ATOMIC128,
141
+static void tgen_add(TCGContext *s, TCGType type,
307
+ * as defined by <qemu/atomic128.h>.
142
+ TCGReg a0, TCGReg a1, TCGReg a2)
308
+ */
143
+{
309
Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
144
+ tcg_out_insn(s, 3502, ADD, type, a0, a1, a2);
310
Int128 cmpv, Int128 newv,
145
+}
311
TCGMemOpIdx oi, uintptr_t retaddr);
146
+
312
@@ -XXX,XX +XXX,XX @@ void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
147
+static void tgen_addi(TCGContext *s, TCGType type,
313
void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
148
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
314
TCGMemOpIdx oi, uintptr_t retaddr);
149
+{
315
150
+ if (a2 >= 0) {
316
-#endif /* CONFIG_ATOMIC128 */
151
+ tcg_out_insn(s, 3401, ADDI, type, a0, a1, a2);
152
+ } else {
153
+ tcg_out_insn(s, 3401, SUBI, type, a0, a1, -a2);
154
+ }
155
+}
156
+
157
+static const TCGOutOpBinary outop_add = {
158
+ .base.static_constraint = C_O1_I2(r, r, rA),
159
+ .out_rrr = tgen_add,
160
+ .out_rri = tgen_addi,
161
+};
162
+
163
+
164
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
165
const TCGArg args[TCG_MAX_OP_ARGS],
166
const int const_args[TCG_MAX_OP_ARGS])
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
168
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
169
break;
170
171
- case INDEX_op_add_i32:
172
- a2 = (int32_t)a2;
173
- /* FALLTHRU */
174
- case INDEX_op_add_i64:
175
- if (c2) {
176
- tcg_out_addsubi(s, ext, a0, a1, a2);
177
- } else {
178
- tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
179
- }
180
- break;
181
-
317
-
182
case INDEX_op_sub_i32:
318
#endif /* TCG_H */
183
- a2 = (int32_t)a2;
319
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
184
- /* FALLTHRU */
185
case INDEX_op_sub_i64:
186
if (c2) {
187
- tcg_out_addsubi(s, ext, a0, a1, -a2);
188
+ tgen_addi(s, ext, a0, a1, -a2);
189
} else {
190
tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
191
}
192
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
193
case INDEX_op_st_i64:
194
return C_O0_I2(rz, r);
195
196
- case INDEX_op_add_i32:
197
- case INDEX_op_add_i64:
198
case INDEX_op_sub_i32:
199
case INDEX_op_sub_i64:
200
return C_O1_I2(r, r, rA);
201
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
202
index XXXXXXX..XXXXXXX 100644
320
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/arm/tcg-target.c.inc
321
--- a/accel/tcg/cputlb.c
204
+++ b/tcg/arm/tcg-target.c.inc
322
+++ b/accel/tcg/cputlb.c
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
323
@@ -XXX,XX +XXX,XX @@
206
}
324
#include "exec/log.h"
207
}
325
#include "exec/helper-proto.h"
208
326
#include "qemu/atomic.h"
209
+static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc,
327
+#include "qemu/atomic128.h"
210
+ ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs)
328
211
+{
329
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
212
+ int imm12 = encode_imm(rhs);
330
/* #define DEBUG_TLB */
213
+ if (imm12 < 0) {
331
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
214
+ imm12 = encode_imm_nofail(-rhs);
332
#include "atomic_template.h"
215
+ opc = opneg;
333
#endif
216
+ }
334
217
+ tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
335
-#ifdef CONFIG_ATOMIC128
218
+}
336
+#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
219
+
337
#define DATA_SIZE 16
220
static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
338
#include "atomic_template.h"
221
ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
339
#endif
222
bool rhs_is_const)
340
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
223
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
224
* rhs must satisfy the "rIN" constraint.
225
*/
226
if (rhs_is_const) {
227
- int imm12 = encode_imm(rhs);
228
- if (imm12 < 0) {
229
- imm12 = encode_imm_nofail(-rhs);
230
- opc = opneg;
231
- }
232
- tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
233
+ tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs);
234
} else {
235
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
236
}
237
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
238
flush_idcache_range(jmp_rx, jmp_rw, 4);
239
}
240
241
+
242
+static void tgen_add(TCGContext *s, TCGType type,
243
+ TCGReg a0, TCGReg a1, TCGReg a2)
244
+{
245
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0));
246
+}
247
+
248
+static void tgen_addi(TCGContext *s, TCGType type,
249
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
250
+{
251
+ tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2);
252
+}
253
+
254
+static const TCGOutOpBinary outop_add = {
255
+ .base.static_constraint = C_O1_I2(r, r, rIN),
256
+ .out_rrr = tgen_add,
257
+ .out_rri = tgen_addi,
258
+};
259
+
260
+
261
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
262
const TCGArg args[TCG_MAX_OP_ARGS],
263
const int const_args[TCG_MAX_OP_ARGS])
264
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
265
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
266
ARITH_MVN, args[0], 0, args[3], const_args[3]);
267
break;
268
- case INDEX_op_add_i32:
269
- tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
270
- args[0], args[1], args[2], const_args[2]);
271
- break;
272
case INDEX_op_sub_i32:
273
if (const_args[1]) {
274
if (const_args[2]) {
275
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
276
case INDEX_op_st_i32:
277
return C_O0_I2(r, r);
278
279
- case INDEX_op_add_i32:
280
case INDEX_op_sub_i32:
281
case INDEX_op_setcond_i32:
282
case INDEX_op_negsetcond_i32:
283
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
284
index XXXXXXX..XXXXXXX 100644
341
index XXXXXXX..XXXXXXX 100644
285
--- a/tcg/i386/tcg-target.c.inc
342
--- a/accel/tcg/user-exec.c
286
+++ b/tcg/i386/tcg-target.c.inc
343
+++ b/accel/tcg/user-exec.c
287
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
344
@@ -XXX,XX +XXX,XX @@
288
/* no need to flush icache explicitly */
345
#include "exec/cpu_ldst.h"
289
}
346
#include "translate-all.h"
290
347
#include "exec/helper-proto.h"
291
+
348
+#include "qemu/atomic128.h"
292
+static void tgen_add(TCGContext *s, TCGType type,
349
293
+ TCGReg a0, TCGReg a1, TCGReg a2)
350
#undef EAX
294
+{
351
#undef ECX
295
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
352
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
296
+
353
/* The following is only callable from other helpers, and matches up
297
+ if (a0 == a1) {
354
with the softmmu version. */
298
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a2);
355
299
+ } else if (a0 == a2) {
356
-#ifdef CONFIG_ATOMIC128
300
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
357
+#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
301
+ } else {
358
302
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, 0);
359
#undef EXTRA_ARGS
303
+ }
360
#undef ATOMIC_NAME
304
+}
361
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
305
+
362
306
+static void tgen_addi(TCGContext *s, TCGType type,
363
#define DATA_SIZE 16
307
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
364
#include "atomic_template.h"
308
+{
365
-#endif /* CONFIG_ATOMIC128 */
309
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
366
+#endif
310
+
367
diff --git a/configure b/configure
311
+ if (a0 == a1) {
368
index XXXXXXX..XXXXXXX 100755
312
+ tgen_arithi(s, ARITH_ADD + rexw, a0, a2, false);
369
--- a/configure
313
+ } else {
370
+++ b/configure
314
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, -1, 0, a2);
371
@@ -XXX,XX +XXX,XX @@ EOF
315
+ }
372
fi
316
+}
373
fi
317
+
374
318
+static const TCGOutOpBinary outop_add = {
375
+cmpxchg128=no
319
+ .base.static_constraint = C_O1_I2(r, r, re),
376
+if test "$int128" = yes -a "$atomic128" = no; then
320
+ .out_rrr = tgen_add,
377
+ cat > $TMPC << EOF
321
+ .out_rri = tgen_addi,
378
+int main(void)
322
+};
379
+{
323
+
380
+ unsigned __int128 x = 0, y = 0;
324
+
381
+ __sync_val_compare_and_swap_16(&x, y, x);
325
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
382
+ return 0;
326
const TCGArg args[TCG_MAX_OP_ARGS],
383
+}
327
const int const_args[TCG_MAX_OP_ARGS])
384
+EOF
328
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
385
+ if compile_prog "" "" ; then
329
}
386
+ cmpxchg128=yes
330
break;
387
+ fi
331
388
+fi
332
- OP_32_64(add):
389
+
333
- /* For 3-operand addition, use LEA. */
390
#########################################
334
- if (a0 != a1) {
391
# See if 64-bit atomic operations are supported.
335
- TCGArg c3 = 0;
392
# Note that without __atomic builtins, we can only
336
- if (const_a2) {
393
@@ -XXX,XX +XXX,XX @@ if test "$atomic128" = "yes" ; then
337
- c3 = a2, a2 = -1;
394
echo "CONFIG_ATOMIC128=y" >> $config_host_mak
338
- } else if (a0 == a2) {
395
fi
339
- /* Watch out for dest = src + dest, since we've removed
396
340
- the matching constraint on the add. */
397
+if test "$cmpxchg128" = "yes" ; then
341
- tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
398
+ echo "CONFIG_CMPXCHG128=y" >> $config_host_mak
342
- break;
399
+fi
343
- }
400
+
344
-
401
if test "$atomic64" = "yes" ; then
345
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
402
echo "CONFIG_ATOMIC64=y" >> $config_host_mak
346
- break;
403
fi
347
- }
348
- c = ARITH_ADD;
349
- goto gen_arith;
350
OP_32_64(sub):
351
c = ARITH_SUB;
352
goto gen_arith;
353
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
354
case INDEX_op_st_i64:
355
return C_O0_I2(re, r);
356
357
- case INDEX_op_add_i32:
358
- case INDEX_op_add_i64:
359
- return C_O1_I2(r, r, re);
360
-
361
case INDEX_op_sub_i32:
362
case INDEX_op_sub_i64:
363
case INDEX_op_mul_i32:
364
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
365
index XXXXXXX..XXXXXXX 100644
366
--- a/tcg/loongarch64/tcg-target.c.inc
367
+++ b/tcg/loongarch64/tcg-target.c.inc
368
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
369
flush_idcache_range(jmp_rx, jmp_rw, 4);
370
}
371
372
+
373
+static void tgen_add(TCGContext *s, TCGType type,
374
+ TCGReg a0, TCGReg a1, TCGReg a2)
375
+{
376
+ if (type == TCG_TYPE_I32) {
377
+ tcg_out_opc_add_w(s, a0, a1, a2);
378
+ } else {
379
+ tcg_out_opc_add_d(s, a0, a1, a2);
380
+ }
381
+}
382
+
383
+static const TCGOutOpBinary outop_add = {
384
+ .base.static_constraint = C_O1_I2(r, r, rJ),
385
+ .out_rrr = tgen_add,
386
+ .out_rri = tcg_out_addi,
387
+};
388
+
389
+
390
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
391
const TCGArg args[TCG_MAX_OP_ARGS],
392
const int const_args[TCG_MAX_OP_ARGS])
393
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
394
}
395
break;
396
397
- case INDEX_op_add_i32:
398
- if (c2) {
399
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
400
- } else {
401
- tcg_out_opc_add_w(s, a0, a1, a2);
402
- }
403
- break;
404
- case INDEX_op_add_i64:
405
- if (c2) {
406
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
407
- } else {
408
- tcg_out_opc_add_d(s, a0, a1, a2);
409
- }
410
- break;
411
-
412
case INDEX_op_sub_i32:
413
if (c2) {
414
tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
415
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
416
case INDEX_op_rotr_i64:
417
return C_O1_I2(r, r, ri);
418
419
- case INDEX_op_add_i32:
420
- return C_O1_I2(r, r, ri);
421
- case INDEX_op_add_i64:
422
- return C_O1_I2(r, r, rJ);
423
-
424
case INDEX_op_and_i32:
425
case INDEX_op_and_i64:
426
case INDEX_op_nor_i32:
427
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
428
index XXXXXXX..XXXXXXX 100644
429
--- a/tcg/mips/tcg-target.c.inc
430
+++ b/tcg/mips/tcg-target.c.inc
431
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
432
/* Always indirect, nothing to do */
433
}
434
435
+
436
+static void tgen_add(TCGContext *s, TCGType type,
437
+ TCGReg a0, TCGReg a1, TCGReg a2)
438
+{
439
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDU : OPC_DADDU;
440
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
441
+}
442
+
443
+static void tgen_addi(TCGContext *s, TCGType type,
444
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
445
+{
446
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU;
447
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
448
+}
449
+
450
+static const TCGOutOpBinary outop_add = {
451
+ .base.static_constraint = C_O1_I2(r, r, rJ),
452
+ .out_rrr = tgen_add,
453
+ .out_rri = tgen_addi,
454
+};
455
+
456
+
457
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
458
const TCGArg args[TCG_MAX_OP_ARGS],
459
const int const_args[TCG_MAX_OP_ARGS])
460
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
461
tcg_out_ldst(s, i1, a0, a1, a2);
462
break;
463
464
- case INDEX_op_add_i32:
465
- i1 = OPC_ADDU, i2 = OPC_ADDIU;
466
- goto do_binary;
467
- case INDEX_op_add_i64:
468
- i1 = OPC_DADDU, i2 = OPC_DADDIU;
469
- goto do_binary;
470
case INDEX_op_or_i32:
471
case INDEX_op_or_i64:
472
i1 = OPC_OR, i2 = OPC_ORI;
473
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
474
case INDEX_op_st_i64:
475
return C_O0_I2(rz, r);
476
477
- case INDEX_op_add_i32:
478
- case INDEX_op_add_i64:
479
- return C_O1_I2(r, r, rJ);
480
case INDEX_op_sub_i32:
481
case INDEX_op_sub_i64:
482
return C_O1_I2(r, rz, rN);
483
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
484
index XXXXXXX..XXXXXXX 100644
485
--- a/tcg/ppc/tcg-target.c.inc
486
+++ b/tcg/ppc/tcg-target.c.inc
487
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
488
flush_idcache_range(jmp_rx, jmp_rw, 4);
489
}
490
491
+
492
+static void tgen_add(TCGContext *s, TCGType type,
493
+ TCGReg a0, TCGReg a1, TCGReg a2)
494
+{
495
+ tcg_out32(s, ADD | TAB(a0, a1, a2));
496
+}
497
+
498
+static void tgen_addi(TCGContext *s, TCGType type,
499
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
500
+{
501
+ tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
502
+}
503
+
504
+static const TCGOutOpBinary outop_add = {
505
+ .base.static_constraint = C_O1_I2(r, r, rT),
506
+ .out_rrr = tgen_add,
507
+ .out_rri = tgen_addi,
508
+};
509
+
510
+
511
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
512
const TCGArg args[TCG_MAX_OP_ARGS],
513
const int const_args[TCG_MAX_OP_ARGS])
514
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
515
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
516
break;
517
518
- case INDEX_op_add_i32:
519
- a0 = args[0], a1 = args[1], a2 = args[2];
520
- if (const_args[2]) {
521
- do_addi_32:
522
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
523
- } else {
524
- tcg_out32(s, ADD | TAB(a0, a1, a2));
525
- }
526
- break;
527
case INDEX_op_sub_i32:
528
a0 = args[0], a1 = args[1], a2 = args[2];
529
if (const_args[1]) {
530
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
531
tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
532
}
533
} else if (const_args[2]) {
534
- a2 = -a2;
535
- goto do_addi_32;
536
+ tgen_addi(s, type, a0, a1, (int32_t)-a2);
537
} else {
538
tcg_out32(s, SUBF | TAB(a0, a2, a1));
539
}
540
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
541
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
542
break;
543
544
- case INDEX_op_add_i64:
545
- a0 = args[0], a1 = args[1], a2 = args[2];
546
- if (const_args[2]) {
547
- do_addi_64:
548
- tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
549
- } else {
550
- tcg_out32(s, ADD | TAB(a0, a1, a2));
551
- }
552
- break;
553
case INDEX_op_sub_i64:
554
a0 = args[0], a1 = args[1], a2 = args[2];
555
if (const_args[1]) {
556
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
557
tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
558
}
559
} else if (const_args[2]) {
560
- a2 = -a2;
561
- goto do_addi_64;
562
+ tgen_addi(s, type, a0, a1, -a2);
563
} else {
564
tcg_out32(s, SUBF | TAB(a0, a2, a1));
565
}
566
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
567
case INDEX_op_st_i64:
568
return C_O0_I2(r, r);
569
570
- case INDEX_op_add_i32:
571
case INDEX_op_and_i32:
572
case INDEX_op_or_i32:
573
case INDEX_op_xor_i32:
574
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
575
576
case INDEX_op_sub_i32:
577
return C_O1_I2(r, rI, ri);
578
- case INDEX_op_add_i64:
579
- return C_O1_I2(r, r, rT);
580
case INDEX_op_or_i64:
581
case INDEX_op_xor_i64:
582
return C_O1_I2(r, r, rU);
583
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
584
index XXXXXXX..XXXXXXX 100644
585
--- a/tcg/riscv/tcg-target.c.inc
586
+++ b/tcg/riscv/tcg-target.c.inc
587
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
588
flush_idcache_range(jmp_rx, jmp_rw, 4);
589
}
590
591
+
592
+static void tgen_add(TCGContext *s, TCGType type,
593
+ TCGReg a0, TCGReg a1, TCGReg a2)
594
+{
595
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD;
596
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
597
+}
598
+
599
+static void tgen_addi(TCGContext *s, TCGType type,
600
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
601
+{
602
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI;
603
+ tcg_out_opc_imm(s, insn, a0, a1, a2);
604
+}
605
+
606
+static const TCGOutOpBinary outop_add = {
607
+ .base.static_constraint = C_O1_I2(r, r, rI),
608
+ .out_rrr = tgen_add,
609
+ .out_rri = tgen_addi,
610
+};
611
+
612
+
613
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
614
const TCGArg args[TCG_MAX_OP_ARGS],
615
const int const_args[TCG_MAX_OP_ARGS])
616
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
617
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
618
break;
619
620
- case INDEX_op_add_i32:
621
- if (c2) {
622
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
623
- } else {
624
- tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
625
- }
626
- break;
627
- case INDEX_op_add_i64:
628
- if (c2) {
629
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
630
- } else {
631
- tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
632
- }
633
- break;
634
-
635
case INDEX_op_sub_i32:
636
if (c2) {
637
tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
638
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
639
case INDEX_op_st_i64:
640
return C_O0_I2(rz, r);
641
642
- case INDEX_op_add_i32:
643
case INDEX_op_and_i32:
644
case INDEX_op_or_i32:
645
case INDEX_op_xor_i32:
646
- case INDEX_op_add_i64:
647
case INDEX_op_and_i64:
648
case INDEX_op_or_i64:
649
case INDEX_op_xor_i64:
650
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
651
index XXXXXXX..XXXXXXX 100644
652
--- a/tcg/s390x/tcg-target.c.inc
653
+++ b/tcg/s390x/tcg-target.c.inc
654
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
655
/* no need to flush icache explicitly */
656
}
657
658
+
659
+static void tgen_add(TCGContext *s, TCGType type,
660
+ TCGReg a0, TCGReg a1, TCGReg a2)
661
+{
662
+ if (a0 != a1) {
663
+ tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
664
+ } else if (type == TCG_TYPE_I32) {
665
+ tcg_out_insn(s, RR, AR, a0, a2);
666
+ } else {
667
+ tcg_out_insn(s, RRE, AGR, a0, a2);
668
+ }
669
+}
670
+
671
+static void tgen_addi(TCGContext *s, TCGType type,
672
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
673
+{
674
+ if (a0 == a1) {
675
+ if (type == TCG_TYPE_I32) {
676
+ if (a2 == (int16_t)a2) {
677
+ tcg_out_insn(s, RI, AHI, a0, a2);
678
+ } else {
679
+ tcg_out_insn(s, RIL, AFI, a0, a2);
680
+ }
681
+ return;
682
+ }
683
+ if (a2 == (int16_t)a2) {
684
+ tcg_out_insn(s, RI, AGHI, a0, a2);
685
+ return;
686
+ }
687
+ if (a2 == (int32_t)a2) {
688
+ tcg_out_insn(s, RIL, AGFI, a0, a2);
689
+ return;
690
+ }
691
+ if (a2 == (uint32_t)a2) {
692
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
693
+ return;
694
+ }
695
+ if (-a2 == (uint32_t)-a2) {
696
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
697
+ return;
698
+ }
699
+ }
700
+ tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
701
+}
702
+
703
+static const TCGOutOpBinary outop_add = {
704
+ .base.static_constraint = C_O1_I2(r, r, ri),
705
+ .out_rrr = tgen_add,
706
+ .out_rri = tgen_addi,
707
+};
708
+
709
+
710
# define OP_32_64(x) \
711
case glue(glue(INDEX_op_,x),_i32): \
712
case glue(glue(INDEX_op_,x),_i64)
713
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
714
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
715
break;
716
717
- case INDEX_op_add_i32:
718
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
719
- if (const_args[2]) {
720
- do_addi_32:
721
- if (a0 == a1) {
722
- if (a2 == (int16_t)a2) {
723
- tcg_out_insn(s, RI, AHI, a0, a2);
724
- break;
725
- }
726
- tcg_out_insn(s, RIL, AFI, a0, a2);
727
- break;
728
- }
729
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
730
- } else if (a0 == a1) {
731
- tcg_out_insn(s, RR, AR, a0, a2);
732
- } else {
733
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
734
- }
735
- break;
736
case INDEX_op_sub_i32:
737
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
738
+ a0 = args[0], a1 = args[1], a2 = args[2];
739
if (const_args[2]) {
740
- a2 = -a2;
741
- goto do_addi_32;
742
+ tgen_addi(s, type, a0, a1, (int32_t)-a2);
743
} else if (a0 == a1) {
744
tcg_out_insn(s, RR, SR, a0, a2);
745
} else {
746
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
747
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
748
break;
749
750
- case INDEX_op_add_i64:
751
- a0 = args[0], a1 = args[1], a2 = args[2];
752
- if (const_args[2]) {
753
- do_addi_64:
754
- if (a0 == a1) {
755
- if (a2 == (int16_t)a2) {
756
- tcg_out_insn(s, RI, AGHI, a0, a2);
757
- break;
758
- }
759
- if (a2 == (int32_t)a2) {
760
- tcg_out_insn(s, RIL, AGFI, a0, a2);
761
- break;
762
- }
763
- if (a2 == (uint32_t)a2) {
764
- tcg_out_insn(s, RIL, ALGFI, a0, a2);
765
- break;
766
- }
767
- if (-a2 == (uint32_t)-a2) {
768
- tcg_out_insn(s, RIL, SLGFI, a0, -a2);
769
- break;
770
- }
771
- }
772
- tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
773
- } else if (a0 == a1) {
774
- tcg_out_insn(s, RRE, AGR, a0, a2);
775
- } else {
776
- tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
777
- }
778
- break;
779
case INDEX_op_sub_i64:
780
a0 = args[0], a1 = args[1], a2 = args[2];
781
if (const_args[2]) {
782
- a2 = -a2;
783
- goto do_addi_64;
784
+ tgen_addi(s, type, a0, a1, -a2);
785
} else {
786
tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
787
}
788
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
789
case INDEX_op_st_i64:
790
return C_O0_I2(r, r);
791
792
- case INDEX_op_add_i32:
793
- case INDEX_op_add_i64:
794
case INDEX_op_shl_i64:
795
case INDEX_op_shr_i64:
796
case INDEX_op_sar_i64:
797
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
798
index XXXXXXX..XXXXXXX 100644
799
--- a/tcg/sparc64/tcg-target.c.inc
800
+++ b/tcg/sparc64/tcg-target.c.inc
801
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
802
{
803
}
804
805
+
806
+static void tgen_add(TCGContext *s, TCGType type,
807
+ TCGReg a0, TCGReg a1, TCGReg a2)
808
+{
809
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
810
+}
811
+
812
+static void tgen_addi(TCGContext *s, TCGType type,
813
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
814
+{
815
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
816
+}
817
+
818
+static const TCGOutOpBinary outop_add = {
819
+ .base.static_constraint = C_O1_I2(r, r, rJ),
820
+ .out_rrr = tgen_add,
821
+ .out_rri = tgen_addi,
822
+};
823
+
824
+
825
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
826
const TCGArg args[TCG_MAX_OP_ARGS],
827
const int const_args[TCG_MAX_OP_ARGS])
828
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
829
case INDEX_op_st32_i64:
830
tcg_out_ldst(s, a0, a1, a2, STW);
831
break;
832
- OP_32_64(add):
833
- c = ARITH_ADD;
834
- goto gen_arith;
835
OP_32_64(sub):
836
c = ARITH_SUB;
837
goto gen_arith;
838
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
839
case INDEX_op_qemu_st_i64:
840
return C_O0_I2(rz, r);
841
842
- case INDEX_op_add_i32:
843
- case INDEX_op_add_i64:
844
case INDEX_op_mul_i32:
845
case INDEX_op_mul_i64:
846
case INDEX_op_div_i32:
847
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
848
index XXXXXXX..XXXXXXX 100644
849
--- a/tcg/tci/tcg-target.c.inc
850
+++ b/tcg/tci/tcg-target.c.inc
851
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
852
case INDEX_op_rem_i64:
853
case INDEX_op_remu_i32:
854
case INDEX_op_remu_i64:
855
- case INDEX_op_add_i32:
856
- case INDEX_op_add_i64:
857
case INDEX_op_sub_i32:
858
case INDEX_op_sub_i64:
859
case INDEX_op_mul_i32:
860
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
861
/* Always indirect, nothing to do */
862
}
863
864
+static void tgen_add(TCGContext *s, TCGType type,
865
+ TCGReg a0, TCGReg a1, TCGReg a2)
866
+{
867
+ tcg_out_op_rrr(s, glue(INDEX_op_add_i,TCG_TARGET_REG_BITS), a0, a1, a2);
868
+}
869
+
870
+static const TCGOutOpBinary outop_add = {
871
+ .base.static_constraint = C_O1_I2(r, r, r),
872
+ .out_rrr = tgen_add,
873
+};
874
+
875
+
876
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
877
const TCGArg args[TCG_MAX_OP_ARGS],
878
const int const_args[TCG_MAX_OP_ARGS])
879
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
880
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
881
break;
882
883
- CASE_32_64(add)
884
CASE_32_64(sub)
885
CASE_32_64(mul)
886
CASE_32_64(and)
887
--
404
--
888
2.43.0
405
2.17.2
889
406
890
407
diff view generated by jsdifflib
1
Pass the sparc COND_* value not the tcg TCG_COND_* value.
1
Reviewed-by: Emilio G. Cota <cota@braap.org>
2
This makes the usage within add2/sub2 clearer.
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/sparc64/tcg-target.c.inc | 21 +++++++++++----------
5
target/i386/mem_helper.c | 9 ++++-----
9
1 file changed, 11 insertions(+), 10 deletions(-)
6
1 file changed, 4 insertions(+), 5 deletions(-)
10
7
11
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
8
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/sparc64/tcg-target.c.inc
10
--- a/target/i386/mem_helper.c
14
+++ b/tcg/sparc64/tcg-target.c.inc
11
+++ b/target/i386/mem_helper.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
12
@@ -XXX,XX +XXX,XX @@
16
tcg_out_nop(s);
13
#include "exec/exec-all.h"
17
}
14
#include "exec/cpu_ldst.h"
18
15
#include "qemu/int128.h"
19
-static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
16
+#include "qemu/atomic128.h"
20
+static void tcg_out_movcc(TCGContext *s, int scond, int cc, TCGReg ret,
17
#include "tcg.h"
21
int32_t v1, int v1const)
18
22
{
19
void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
23
- tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
20
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
24
- | INSN_RS1(tcg_cond_to_bcond[cond])
21
25
+ tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(scond)
22
if ((a0 & 0xf) != 0) {
26
| (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
23
raise_exception_ra(env, EXCP0D_GPF, ra);
27
}
24
- } else {
28
25
-#ifndef CONFIG_ATOMIC128
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
26
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
30
int32_t v1, int v1const)
27
-#else
31
{
28
+ } else if (HAVE_CMPXCHG128) {
32
tcg_out_cmp(s, cond, c1, c2, c2const);
29
int eflags = cpu_cc_compute_all(env, CC_OP);
33
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
30
34
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_ICC, ret, v1, v1const);
31
Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
35
}
32
@@ -XXX,XX +XXX,XX @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
36
33
eflags &= ~CC_Z;
37
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
34
}
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
35
CC_SRC = eflags;
39
tcg_out_movr(s, rcond, ret, c1, v1, v1const);
36
-#endif
40
} else {
37
+ } else {
41
tcg_out_cmp(s, cond, c1, c2, c2const);
38
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
42
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
43
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_XCC, ret, v1, v1const);
44
}
39
}
45
}
40
}
46
41
#endif
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
48
default:
49
tcg_out_cmp(s, cond, c1, c2, c2const);
50
tcg_out_movi_s13(s, ret, 0);
51
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
52
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
53
+ MOVCC_ICC, ret, neg ? -1 : 1, 1);
54
return;
55
}
56
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
58
} else {
59
tcg_out_cmp(s, cond, c1, c2, c2const);
60
tcg_out_movi_s13(s, ret, 0);
61
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
62
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
63
+ MOVCC_XCC, ret, neg ? -1 : 1, 1);
64
}
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
68
    if (rh == ah) {
69
tcg_out_arithi(s, TCG_REG_T2, ah, 1,
70
             is_sub ? ARITH_SUB : ARITH_ADD);
71
- tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
72
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, rh, TCG_REG_T2, 0);
73
    } else {
74
tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
75
-     tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
76
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, rh, ah, 0);
77
    }
78
} else {
79
/*
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
81
is_sub ? ARITH_SUB : ARITH_ADD);
82
}
83
/* ... smoosh T2 back to original BH if carry is clear ... */
84
- tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
85
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
86
    /* ... and finally perform the arithmetic with the new operand. */
87
tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
88
}
89
--
42
--
90
2.43.0
43
2.17.2
91
44
92
45
diff view generated by jsdifflib
1
Use the fully general extract opcodes instead.
1
Reviewed-by: Emilio G. Cota <cota@braap.org>
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
include/tcg/tcg-opc.h | 10 -
4
target/arm/helper-a64.c | 259 +++++++++++++++++++++-------------------
7
tcg/aarch64/tcg-target-has.h | 10 -
5
1 file changed, 133 insertions(+), 126 deletions(-)
8
tcg/arm/tcg-target-has.h | 4 -
9
tcg/i386/tcg-target-has.h | 10 -
10
tcg/loongarch64/tcg-target-has.h | 10 -
11
tcg/mips/tcg-target-has.h | 13 -
12
tcg/ppc/tcg-target-has.h | 12 -
13
tcg/riscv/tcg-target-has.h | 10 -
14
tcg/s390x/tcg-target-has.h | 10 -
15
tcg/sparc64/tcg-target-has.h | 10 -
16
tcg/tcg-has.h | 6 -
17
tcg/tci/tcg-target-has.h | 10 -
18
tcg/optimize.c | 61 +----
19
tcg/tcg-op.c | 414 +++++++------------------------
20
tcg/tcg.c | 46 ----
21
tcg/tci.c | 36 ---
22
docs/devel/tcg-ops.rst | 14 --
23
tcg/aarch64/tcg-target.c.inc | 22 +-
24
tcg/arm/tcg-target.c.inc | 7 -
25
tcg/i386/tcg-target.c.inc | 24 +-
26
tcg/loongarch64/tcg-target.c.inc | 22 +-
27
tcg/mips/tcg-target.c.inc | 20 +-
28
tcg/ppc/tcg-target.c.inc | 17 +-
29
tcg/riscv/tcg-target.c.inc | 22 +-
30
tcg/s390x/tcg-target.c.inc | 22 +-
31
tcg/sparc64/tcg-target.c.inc | 14 +-
32
tcg/tci/tcg-target.c.inc | 102 +++-----
33
27 files changed, 135 insertions(+), 823 deletions(-)
34
6
35
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
7
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
36
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
37
--- a/include/tcg/tcg-opc.h
9
--- a/target/arm/helper-a64.c
38
+++ b/include/tcg/tcg-opc.h
10
+++ b/target/arm/helper-a64.c
39
@@ -XXX,XX +XXX,XX @@ DEF(mulsh_i32, 1, 2, 0, 0)
40
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
41
DEF(setcond2_i32, 1, 4, 1, 0)
42
43
-DEF(ext8s_i32, 1, 1, 0, 0)
44
-DEF(ext16s_i32, 1, 1, 0, 0)
45
-DEF(ext8u_i32, 1, 1, 0, 0)
46
-DEF(ext16u_i32, 1, 1, 0, 0)
47
DEF(bswap16_i32, 1, 1, 1, 0)
48
DEF(bswap32_i32, 1, 1, 1, 0)
49
DEF(not_i32, 1, 1, 0, 0)
50
@@ -XXX,XX +XXX,XX @@ DEF(extrl_i64_i32, 1, 1, 0, 0)
51
DEF(extrh_i64_i32, 1, 1, 0, 0)
52
53
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
54
-DEF(ext8s_i64, 1, 1, 0, 0)
55
-DEF(ext16s_i64, 1, 1, 0, 0)
56
-DEF(ext32s_i64, 1, 1, 0, 0)
57
-DEF(ext8u_i64, 1, 1, 0, 0)
58
-DEF(ext16u_i64, 1, 1, 0, 0)
59
-DEF(ext32u_i64, 1, 1, 0, 0)
60
DEF(bswap16_i64, 1, 1, 1, 0)
61
DEF(bswap32_i64, 1, 1, 1, 0)
62
DEF(bswap64_i64, 1, 1, 1, 0)
63
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/aarch64/tcg-target-has.h
66
+++ b/tcg/aarch64/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@
68
/* optional instructions */
12
#include "exec/exec-all.h"
69
#define TCG_TARGET_HAS_div_i32 1
13
#include "exec/cpu_ldst.h"
70
#define TCG_TARGET_HAS_rem_i32 1
14
#include "qemu/int128.h"
71
-#define TCG_TARGET_HAS_ext8s_i32 1
15
+#include "qemu/atomic128.h"
72
-#define TCG_TARGET_HAS_ext16s_i32 1
16
#include "tcg.h"
73
-#define TCG_TARGET_HAS_ext8u_i32 1
17
#include "fpu/softfloat.h"
74
-#define TCG_TARGET_HAS_ext16u_i32 1
18
#include <zlib.h> /* For crc32 */
75
#define TCG_TARGET_HAS_bswap16_i32 1
19
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
76
#define TCG_TARGET_HAS_bswap32_i32 1
20
return crc32c(acc, buf, bytes) ^ 0xffffffff;
77
#define TCG_TARGET_HAS_not_i32 1
21
}
78
@@ -XXX,XX +XXX,XX @@
22
79
23
-/* Returns 0 on success; 1 otherwise. */
80
#define TCG_TARGET_HAS_div_i64 1
24
-static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
81
#define TCG_TARGET_HAS_rem_i64 1
25
- uint64_t new_lo, uint64_t new_hi,
82
-#define TCG_TARGET_HAS_ext8s_i64 1
26
- bool parallel, uintptr_t ra)
83
-#define TCG_TARGET_HAS_ext16s_i64 1
27
+uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
84
-#define TCG_TARGET_HAS_ext32s_i64 1
28
+ uint64_t new_lo, uint64_t new_hi)
85
-#define TCG_TARGET_HAS_ext8u_i64 1
29
{
86
-#define TCG_TARGET_HAS_ext16u_i64 1
30
- Int128 oldv, cmpv, newv;
87
-#define TCG_TARGET_HAS_ext32u_i64 1
31
+ Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
88
#define TCG_TARGET_HAS_bswap16_i64 1
32
+ Int128 newv = int128_make128(new_lo, new_hi);
89
#define TCG_TARGET_HAS_bswap32_i64 1
33
+ Int128 oldv;
90
#define TCG_TARGET_HAS_bswap64_i64 1
34
+ uintptr_t ra = GETPC();
91
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
35
+ uint64_t o0, o1;
92
index XXXXXXX..XXXXXXX 100644
36
bool success;
93
--- a/tcg/arm/tcg-target-has.h
37
94
+++ b/tcg/arm/tcg-target-has.h
38
- cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
95
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
39
- newv = int128_make128(new_lo, new_hi);
96
#endif
40
-
97
41
- if (parallel) {
98
/* optional instructions */
42
-#ifndef CONFIG_ATOMIC128
99
-#define TCG_TARGET_HAS_ext8s_i32 1
43
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
100
-#define TCG_TARGET_HAS_ext16s_i32 1
44
-#else
101
-#define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */
45
- int mem_idx = cpu_mmu_index(env, false);
102
-#define TCG_TARGET_HAS_ext16u_i32 1
46
- TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
103
#define TCG_TARGET_HAS_bswap16_i32 1
47
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
104
#define TCG_TARGET_HAS_bswap32_i32 1
48
- success = int128_eq(oldv, cmpv);
105
#define TCG_TARGET_HAS_not_i32 1
49
-#endif
106
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
50
- } else {
107
index XXXXXXX..XXXXXXX 100644
51
- uint64_t o0, o1;
108
--- a/tcg/i386/tcg-target-has.h
52
-
109
+++ b/tcg/i386/tcg-target-has.h
53
#ifdef CONFIG_USER_ONLY
110
@@ -XXX,XX +XXX,XX @@
54
- /* ??? Enforce alignment. */
111
/* optional instructions */
55
- uint64_t *haddr = g2h(addr);
112
#define TCG_TARGET_HAS_div2_i32 1
56
+ /* ??? Enforce alignment. */
113
#define TCG_TARGET_HAS_rot_i32 1
57
+ uint64_t *haddr = g2h(addr);
114
-#define TCG_TARGET_HAS_ext8s_i32 1
58
115
-#define TCG_TARGET_HAS_ext16s_i32 1
59
- helper_retaddr = ra;
116
-#define TCG_TARGET_HAS_ext8u_i32 1
60
- o0 = ldq_le_p(haddr + 0);
117
-#define TCG_TARGET_HAS_ext16u_i32 1
61
- o1 = ldq_le_p(haddr + 1);
118
#define TCG_TARGET_HAS_bswap16_i32 1
62
- oldv = int128_make128(o0, o1);
119
#define TCG_TARGET_HAS_bswap32_i32 1
63
+ helper_retaddr = ra;
120
#define TCG_TARGET_HAS_not_i32 1
64
+ o0 = ldq_le_p(haddr + 0);
121
@@ -XXX,XX +XXX,XX @@
65
+ o1 = ldq_le_p(haddr + 1);
122
#define TCG_TARGET_HAS_extr_i64_i32 1
66
+ oldv = int128_make128(o0, o1);
123
#define TCG_TARGET_HAS_div2_i64 1
67
124
#define TCG_TARGET_HAS_rot_i64 1
68
- success = int128_eq(oldv, cmpv);
125
-#define TCG_TARGET_HAS_ext8s_i64 1
69
- if (success) {
126
-#define TCG_TARGET_HAS_ext16s_i64 1
70
- stq_le_p(haddr + 0, int128_getlo(newv));
127
-#define TCG_TARGET_HAS_ext32s_i64 1
71
- stq_le_p(haddr + 1, int128_gethi(newv));
128
-#define TCG_TARGET_HAS_ext8u_i64 1
72
- }
129
-#define TCG_TARGET_HAS_ext16u_i64 1
73
- helper_retaddr = 0;
130
-#define TCG_TARGET_HAS_ext32u_i64 1
74
-#else
131
#define TCG_TARGET_HAS_bswap16_i64 1
75
- int mem_idx = cpu_mmu_index(env, false);
132
#define TCG_TARGET_HAS_bswap32_i64 1
76
- TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
133
#define TCG_TARGET_HAS_bswap64_i64 1
77
- TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
134
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
78
-
135
index XXXXXXX..XXXXXXX 100644
79
- o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
136
--- a/tcg/loongarch64/tcg-target-has.h
80
- o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
137
+++ b/tcg/loongarch64/tcg-target-has.h
81
- oldv = int128_make128(o0, o1);
138
@@ -XXX,XX +XXX,XX @@
82
-
139
#define TCG_TARGET_HAS_muls2_i32 0
83
- success = int128_eq(oldv, cmpv);
140
#define TCG_TARGET_HAS_muluh_i32 1
84
- if (success) {
141
#define TCG_TARGET_HAS_mulsh_i32 1
85
- helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
142
-#define TCG_TARGET_HAS_ext8s_i32 1
86
- helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
143
-#define TCG_TARGET_HAS_ext16s_i32 1
87
- }
144
-#define TCG_TARGET_HAS_ext8u_i32 1
88
-#endif
145
-#define TCG_TARGET_HAS_ext16u_i32 1
89
+ success = int128_eq(oldv, cmpv);
146
#define TCG_TARGET_HAS_bswap16_i32 1
90
+ if (success) {
147
#define TCG_TARGET_HAS_bswap32_i32 1
91
+ stq_le_p(haddr + 0, int128_getlo(newv));
148
#define TCG_TARGET_HAS_not_i32 1
92
+ stq_le_p(haddr + 1, int128_gethi(newv));
149
@@ -XXX,XX +XXX,XX @@
150
#define TCG_TARGET_HAS_rot_i64 1
151
#define TCG_TARGET_HAS_extract2_i64 0
152
#define TCG_TARGET_HAS_extr_i64_i32 1
153
-#define TCG_TARGET_HAS_ext8s_i64 1
154
-#define TCG_TARGET_HAS_ext16s_i64 1
155
-#define TCG_TARGET_HAS_ext32s_i64 1
156
-#define TCG_TARGET_HAS_ext8u_i64 1
157
-#define TCG_TARGET_HAS_ext16u_i64 1
158
-#define TCG_TARGET_HAS_ext32u_i64 1
159
#define TCG_TARGET_HAS_bswap16_i64 1
160
#define TCG_TARGET_HAS_bswap32_i64 1
161
#define TCG_TARGET_HAS_bswap64_i64 1
162
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/mips/tcg-target-has.h
165
+++ b/tcg/mips/tcg-target-has.h
166
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
167
168
/* optional instructions detected at runtime */
169
#define TCG_TARGET_HAS_extract2_i32 0
170
-#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
171
-#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
172
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
173
#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
174
#define TCG_TARGET_HAS_ctz_i32 0
175
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
176
#define TCG_TARGET_HAS_bswap32_i64 1
177
#define TCG_TARGET_HAS_bswap64_i64 1
178
#define TCG_TARGET_HAS_extract2_i64 0
179
-#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
180
-#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
181
#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
182
#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
183
#define TCG_TARGET_HAS_ctz_i64 0
184
#define TCG_TARGET_HAS_ctpop_i64 0
185
#endif
186
187
-/* optional instructions automatically implemented */
188
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
189
-#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
190
-
191
-#if TCG_TARGET_REG_BITS == 64
192
-#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
193
-#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
194
-#endif
195
-
196
#define TCG_TARGET_HAS_qemu_ldst_i128 0
197
#define TCG_TARGET_HAS_tst 0
198
199
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/tcg/ppc/tcg-target-has.h
202
+++ b/tcg/ppc/tcg-target-has.h
203
@@ -XXX,XX +XXX,XX @@
204
#define have_altivec (cpuinfo & CPUINFO_ALTIVEC)
205
#define have_vsx (cpuinfo & CPUINFO_VSX)
206
207
-/* optional instructions automatically implemented */
208
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
209
-#define TCG_TARGET_HAS_ext16u_i32 0
210
-
211
/* optional instructions */
212
#define TCG_TARGET_HAS_div_i32 1
213
#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
214
#define TCG_TARGET_HAS_rot_i32 1
215
-#define TCG_TARGET_HAS_ext8s_i32 1
216
-#define TCG_TARGET_HAS_ext16s_i32 1
217
#define TCG_TARGET_HAS_bswap16_i32 1
218
#define TCG_TARGET_HAS_bswap32_i32 1
219
#define TCG_TARGET_HAS_not_i32 1
220
@@ -XXX,XX +XXX,XX @@
221
#define TCG_TARGET_HAS_div_i64 1
222
#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
223
#define TCG_TARGET_HAS_rot_i64 1
224
-#define TCG_TARGET_HAS_ext8s_i64 1
225
-#define TCG_TARGET_HAS_ext16s_i64 1
226
-#define TCG_TARGET_HAS_ext32s_i64 1
227
-#define TCG_TARGET_HAS_ext8u_i64 0
228
-#define TCG_TARGET_HAS_ext16u_i64 0
229
-#define TCG_TARGET_HAS_ext32u_i64 0
230
#define TCG_TARGET_HAS_bswap16_i64 1
231
#define TCG_TARGET_HAS_bswap32_i64 1
232
#define TCG_TARGET_HAS_bswap64_i64 1
233
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/riscv/tcg-target-has.h
236
+++ b/tcg/riscv/tcg-target-has.h
237
@@ -XXX,XX +XXX,XX @@
238
#define TCG_TARGET_HAS_muls2_i32 0
239
#define TCG_TARGET_HAS_muluh_i32 0
240
#define TCG_TARGET_HAS_mulsh_i32 0
241
-#define TCG_TARGET_HAS_ext8s_i32 1
242
-#define TCG_TARGET_HAS_ext16s_i32 1
243
-#define TCG_TARGET_HAS_ext8u_i32 1
244
-#define TCG_TARGET_HAS_ext16u_i32 1
245
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
246
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
247
#define TCG_TARGET_HAS_not_i32 1
248
@@ -XXX,XX +XXX,XX @@
249
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
250
#define TCG_TARGET_HAS_extract2_i64 0
251
#define TCG_TARGET_HAS_extr_i64_i32 1
252
-#define TCG_TARGET_HAS_ext8s_i64 1
253
-#define TCG_TARGET_HAS_ext16s_i64 1
254
-#define TCG_TARGET_HAS_ext32s_i64 1
255
-#define TCG_TARGET_HAS_ext8u_i64 1
256
-#define TCG_TARGET_HAS_ext16u_i64 1
257
-#define TCG_TARGET_HAS_ext32u_i64 1
258
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
259
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
260
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
261
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
262
index XXXXXXX..XXXXXXX 100644
263
--- a/tcg/s390x/tcg-target-has.h
264
+++ b/tcg/s390x/tcg-target-has.h
265
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
266
/* optional instructions */
267
#define TCG_TARGET_HAS_div2_i32 1
268
#define TCG_TARGET_HAS_rot_i32 1
269
-#define TCG_TARGET_HAS_ext8s_i32 1
270
-#define TCG_TARGET_HAS_ext16s_i32 1
271
-#define TCG_TARGET_HAS_ext8u_i32 1
272
-#define TCG_TARGET_HAS_ext16u_i32 1
273
#define TCG_TARGET_HAS_bswap16_i32 1
274
#define TCG_TARGET_HAS_bswap32_i32 1
275
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
276
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
277
278
#define TCG_TARGET_HAS_div2_i64 1
279
#define TCG_TARGET_HAS_rot_i64 1
280
-#define TCG_TARGET_HAS_ext8s_i64 1
281
-#define TCG_TARGET_HAS_ext16s_i64 1
282
-#define TCG_TARGET_HAS_ext32s_i64 1
283
-#define TCG_TARGET_HAS_ext8u_i64 1
284
-#define TCG_TARGET_HAS_ext16u_i64 1
285
-#define TCG_TARGET_HAS_ext32u_i64 1
286
#define TCG_TARGET_HAS_bswap16_i64 1
287
#define TCG_TARGET_HAS_bswap32_i64 1
288
#define TCG_TARGET_HAS_bswap64_i64 1
289
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
290
index XXXXXXX..XXXXXXX 100644
291
--- a/tcg/sparc64/tcg-target-has.h
292
+++ b/tcg/sparc64/tcg-target-has.h
293
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
294
#define TCG_TARGET_HAS_div_i32        1
295
#define TCG_TARGET_HAS_rem_i32        0
296
#define TCG_TARGET_HAS_rot_i32 0
297
-#define TCG_TARGET_HAS_ext8s_i32 0
298
-#define TCG_TARGET_HAS_ext16s_i32 0
299
-#define TCG_TARGET_HAS_ext8u_i32 0
300
-#define TCG_TARGET_HAS_ext16u_i32 0
301
#define TCG_TARGET_HAS_bswap16_i32 0
302
#define TCG_TARGET_HAS_bswap32_i32 0
303
#define TCG_TARGET_HAS_not_i32 1
304
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
305
#define TCG_TARGET_HAS_div_i64 1
306
#define TCG_TARGET_HAS_rem_i64 0
307
#define TCG_TARGET_HAS_rot_i64 0
308
-#define TCG_TARGET_HAS_ext8s_i64 0
309
-#define TCG_TARGET_HAS_ext16s_i64 0
310
-#define TCG_TARGET_HAS_ext32s_i64 1
311
-#define TCG_TARGET_HAS_ext8u_i64 0
312
-#define TCG_TARGET_HAS_ext16u_i64 0
313
-#define TCG_TARGET_HAS_ext32u_i64 1
314
#define TCG_TARGET_HAS_bswap16_i64 0
315
#define TCG_TARGET_HAS_bswap32_i64 0
316
#define TCG_TARGET_HAS_bswap64_i64 0
317
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/tcg/tcg-has.h
320
+++ b/tcg/tcg-has.h
321
@@ -XXX,XX +XXX,XX @@
322
#define TCG_TARGET_HAS_rem_i64 0
323
#define TCG_TARGET_HAS_div2_i64 0
324
#define TCG_TARGET_HAS_rot_i64 0
325
-#define TCG_TARGET_HAS_ext8s_i64 0
326
-#define TCG_TARGET_HAS_ext16s_i64 0
327
-#define TCG_TARGET_HAS_ext32s_i64 0
328
-#define TCG_TARGET_HAS_ext8u_i64 0
329
-#define TCG_TARGET_HAS_ext16u_i64 0
330
-#define TCG_TARGET_HAS_ext32u_i64 0
331
#define TCG_TARGET_HAS_bswap16_i64 0
332
#define TCG_TARGET_HAS_bswap32_i64 0
333
#define TCG_TARGET_HAS_bswap64_i64 0
334
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
335
index XXXXXXX..XXXXXXX 100644
336
--- a/tcg/tci/tcg-target-has.h
337
+++ b/tcg/tci/tcg-target-has.h
338
@@ -XXX,XX +XXX,XX @@
339
#define TCG_TARGET_HAS_bswap32_i32 1
340
#define TCG_TARGET_HAS_div_i32 1
341
#define TCG_TARGET_HAS_rem_i32 1
342
-#define TCG_TARGET_HAS_ext8s_i32 1
343
-#define TCG_TARGET_HAS_ext16s_i32 1
344
-#define TCG_TARGET_HAS_ext8u_i32 1
345
-#define TCG_TARGET_HAS_ext16u_i32 1
346
#define TCG_TARGET_HAS_andc_i32 1
347
#define TCG_TARGET_HAS_extract2_i32 0
348
#define TCG_TARGET_HAS_eqv_i32 1
349
@@ -XXX,XX +XXX,XX @@
350
#define TCG_TARGET_HAS_extract2_i64 0
351
#define TCG_TARGET_HAS_div_i64 1
352
#define TCG_TARGET_HAS_rem_i64 1
353
-#define TCG_TARGET_HAS_ext8s_i64 1
354
-#define TCG_TARGET_HAS_ext16s_i64 1
355
-#define TCG_TARGET_HAS_ext32s_i64 1
356
-#define TCG_TARGET_HAS_ext8u_i64 1
357
-#define TCG_TARGET_HAS_ext16u_i64 1
358
-#define TCG_TARGET_HAS_ext32u_i64 1
359
#define TCG_TARGET_HAS_andc_i64 1
360
#define TCG_TARGET_HAS_eqv_i64 1
361
#define TCG_TARGET_HAS_nand_i64 1
362
diff --git a/tcg/optimize.c b/tcg/optimize.c
363
index XXXXXXX..XXXXXXX 100644
364
--- a/tcg/optimize.c
365
+++ b/tcg/optimize.c
366
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
367
case INDEX_op_ctpop_i64:
368
return ctpop64(x);
369
370
- CASE_OP_32_64(ext8s):
371
- return (int8_t)x;
372
-
373
- CASE_OP_32_64(ext16s):
374
- return (int16_t)x;
375
-
376
- CASE_OP_32_64(ext8u):
377
- return (uint8_t)x;
378
-
379
- CASE_OP_32_64(ext16u):
380
- return (uint16_t)x;
381
-
382
CASE_OP_32_64(bswap16):
383
x = bswap16(x);
384
return y & TCG_BSWAP_OS ? (int16_t)x : x;
385
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
386
return bswap64(x);
387
388
case INDEX_op_ext_i32_i64:
389
- case INDEX_op_ext32s_i64:
390
return (int32_t)x;
391
392
case INDEX_op_extu_i32_i64:
393
case INDEX_op_extrl_i64_i32:
394
- case INDEX_op_ext32u_i64:
395
return (uint32_t)x;
396
397
case INDEX_op_extrh_i64_i32:
398
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
399
400
static bool fold_exts(OptContext *ctx, TCGOp *op)
401
{
402
- uint64_t s_mask_old, s_mask, z_mask;
403
- bool type_change = false;
404
+ uint64_t s_mask, z_mask;
405
TempOptInfo *t1;
406
407
if (fold_const1(ctx, op)) {
408
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
409
t1 = arg_info(op->args[1]);
410
z_mask = t1->z_mask;
411
s_mask = t1->s_mask;
412
- s_mask_old = s_mask;
413
414
switch (op->opc) {
415
- CASE_OP_32_64(ext8s):
416
- s_mask |= INT8_MIN;
417
- z_mask = (int8_t)z_mask;
418
- break;
419
- CASE_OP_32_64(ext16s):
420
- s_mask |= INT16_MIN;
421
- z_mask = (int16_t)z_mask;
422
- break;
423
case INDEX_op_ext_i32_i64:
424
- type_change = true;
425
- QEMU_FALLTHROUGH;
426
- case INDEX_op_ext32s_i64:
427
s_mask |= INT32_MIN;
428
z_mask = (int32_t)z_mask;
429
break;
430
default:
431
g_assert_not_reached();
432
}
93
}
433
-
94
+ helper_retaddr = 0;
434
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
95
+#else
435
- return true;
96
+ int mem_idx = cpu_mmu_index(env, false);
436
- }
97
+ TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
437
-
98
+ TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
438
return fold_masks_zs(ctx, op, z_mask, s_mask);
99
+
439
}
100
+ o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
440
101
+ o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
441
static bool fold_extu(OptContext *ctx, TCGOp *op)
102
+ oldv = int128_make128(o0, o1);
442
{
103
+
443
- uint64_t z_mask_old, z_mask;
104
+ success = int128_eq(oldv, cmpv);
444
- bool type_change = false;
105
+ if (success) {
445
+ uint64_t z_mask;
106
+ helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
446
107
+ helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
447
if (fold_const1(ctx, op)) {
108
+ }
448
return true;
109
+#endif
110
111
return !success;
112
}
113
114
-uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
115
- uint64_t new_lo, uint64_t new_hi)
116
-{
117
- return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false, GETPC());
118
-}
119
-
120
uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
121
uint64_t new_lo, uint64_t new_hi)
122
-{
123
- return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true, GETPC());
124
-}
125
-
126
-static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
127
- uint64_t new_lo, uint64_t new_hi,
128
- bool parallel, uintptr_t ra)
129
{
130
Int128 oldv, cmpv, newv;
131
+ uintptr_t ra = GETPC();
132
bool success;
133
+ int mem_idx;
134
+ TCGMemOpIdx oi;
135
136
- /* high and low need to be switched here because this is not actually a
137
- * 128bit store but two doublewords stored consecutively
138
- */
139
- cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
140
- newv = int128_make128(new_hi, new_lo);
141
-
142
- if (parallel) {
143
-#ifndef CONFIG_ATOMIC128
144
+ if (!HAVE_CMPXCHG128) {
145
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
146
-#else
147
- int mem_idx = cpu_mmu_index(env, false);
148
- TCGMemOpIdx oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
149
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
150
- success = int128_eq(oldv, cmpv);
151
-#endif
152
- } else {
153
- uint64_t o0, o1;
154
-
155
-#ifdef CONFIG_USER_ONLY
156
- /* ??? Enforce alignment. */
157
- uint64_t *haddr = g2h(addr);
158
-
159
- helper_retaddr = ra;
160
- o1 = ldq_be_p(haddr + 0);
161
- o0 = ldq_be_p(haddr + 1);
162
- oldv = int128_make128(o0, o1);
163
-
164
- success = int128_eq(oldv, cmpv);
165
- if (success) {
166
- stq_be_p(haddr + 0, int128_gethi(newv));
167
- stq_be_p(haddr + 1, int128_getlo(newv));
168
- }
169
- helper_retaddr = 0;
170
-#else
171
- int mem_idx = cpu_mmu_index(env, false);
172
- TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
173
- TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
174
-
175
- o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
176
- o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
177
- oldv = int128_make128(o0, o1);
178
-
179
- success = int128_eq(oldv, cmpv);
180
- if (success) {
181
- helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
182
- helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
183
- }
184
-#endif
449
}
185
}
450
186
451
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
187
+ mem_idx = cpu_mmu_index(env, false);
452
-
188
+ oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
453
+ z_mask = arg_info(op->args[1])->z_mask;
189
+
454
switch (op->opc) {
190
+ cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
455
- CASE_OP_32_64(ext8u):
191
+ newv = int128_make128(new_lo, new_hi);
456
- z_mask = (uint8_t)z_mask;
192
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
457
- break;
193
+
458
- CASE_OP_32_64(ext16u):
194
+ success = int128_eq(oldv, cmpv);
459
- z_mask = (uint16_t)z_mask;
195
return !success;
460
- break;
196
}
461
case INDEX_op_extrl_i64_i32:
197
462
case INDEX_op_extu_i32_i64:
198
uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
463
- type_change = true;
199
uint64_t new_lo, uint64_t new_hi)
464
- QEMU_FALLTHROUGH;
200
{
465
- case INDEX_op_ext32u_i64:
201
- return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false, GETPC());
466
z_mask = (uint32_t)z_mask;
202
+ /*
467
break;
203
+ * High and low need to be switched here because this is not actually a
468
case INDEX_op_extrh_i64_i32:
204
+ * 128bit store but two doublewords stored consecutively
469
- type_change = true;
205
+ */
470
z_mask >>= 32;
206
+ Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
471
break;
207
+ Int128 newv = int128_make128(new_lo, new_hi);
472
default:
208
+ Int128 oldv;
473
g_assert_not_reached();
209
+ uintptr_t ra = GETPC();
474
}
210
+ uint64_t o0, o1;
475
-
211
+ bool success;
476
- if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
212
+
477
- return true;
213
+#ifdef CONFIG_USER_ONLY
478
- }
214
+ /* ??? Enforce alignment. */
479
-
215
+ uint64_t *haddr = g2h(addr);
480
return fold_masks_z(ctx, op, z_mask);
216
+
481
}
217
+ helper_retaddr = ra;
482
218
+ o1 = ldq_be_p(haddr + 0);
483
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
+ o0 = ldq_be_p(haddr + 1);
484
CASE_OP_32_64(extract2):
220
+ oldv = int128_make128(o0, o1);
485
done = fold_extract2(&ctx, op);
221
+
486
break;
222
+ success = int128_eq(oldv, cmpv);
487
- CASE_OP_32_64(ext8s):
223
+ if (success) {
488
- CASE_OP_32_64(ext16s):
224
+ stq_be_p(haddr + 0, int128_gethi(newv));
489
- case INDEX_op_ext32s_i64:
225
+ stq_be_p(haddr + 1, int128_getlo(newv));
490
case INDEX_op_ext_i32_i64:
226
+ }
491
done = fold_exts(&ctx, op);
227
+ helper_retaddr = 0;
492
break;
228
+#else
493
- CASE_OP_32_64(ext8u):
229
+ int mem_idx = cpu_mmu_index(env, false);
494
- CASE_OP_32_64(ext16u):
230
+ TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
495
- case INDEX_op_ext32u_i64:
231
+ TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
496
case INDEX_op_extu_i32_i64:
232
+
497
case INDEX_op_extrl_i64_i32:
233
+ o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
498
case INDEX_op_extrh_i64_i32:
234
+ o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
499
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
235
+ oldv = int128_make128(o0, o1);
500
index XXXXXXX..XXXXXXX 100644
236
+
501
--- a/tcg/tcg-op.c
237
+ success = int128_eq(oldv, cmpv);
502
+++ b/tcg/tcg-op.c
238
+ if (success) {
503
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
239
+ helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
504
case -1:
240
+ helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
505
tcg_gen_mov_i32(ret, arg1);
241
+ }
506
return;
242
+#endif
507
- case 0xff:
243
+
508
- /* Don't recurse with tcg_gen_ext8u_i32. */
244
+ return !success;
509
- if (TCG_TARGET_HAS_ext8u_i32) {
245
}
510
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1);
246
511
- return;
247
uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
512
- }
248
- uint64_t new_lo, uint64_t new_hi)
513
- break;
249
+ uint64_t new_lo, uint64_t new_hi)
514
- case 0xffff:
250
{
515
- if (TCG_TARGET_HAS_ext16u_i32) {
251
- return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true, GETPC());
516
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1);
252
+ Int128 oldv, cmpv, newv;
517
- return;
253
+ uintptr_t ra = GETPC();
518
+ default:
254
+ bool success;
519
+ /*
255
+ int mem_idx;
520
+ * Canonicalize on extract, if valid. This aids x86 with its
256
+ TCGMemOpIdx oi;
521
+ * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode
257
+
522
+ * which does not require matching operands. Other backends can
258
+ if (!HAVE_CMPXCHG128) {
523
+ * trivially expand the extract to AND during code generation.
259
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
524
+ */
260
+ }
525
+ if (!(arg2 & (arg2 + 1))) {
261
+
526
+ unsigned len = ctz32(~arg2);
262
+ mem_idx = cpu_mmu_index(env, false);
527
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) {
263
+ oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
528
+ tcg_gen_extract_i32(ret, arg1, 0, len);
264
+
529
+ return;
265
+ /*
530
+ }
266
+ * High and low need to be switched here because this is not actually a
531
}
267
+ * 128bit store but two doublewords stored consecutively
532
break;
268
+ */
533
}
269
+ cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
534
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
270
+ newv = int128_make128(new_hi, new_lo);
535
TCGv_i32 zero = tcg_constant_i32(0);
271
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
536
tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
272
+
537
} else {
273
+ success = int128_eq(oldv, cmpv);
538
- /* To help two-operand hosts we prefer to zero-extend first,
274
+ return !success;
539
- which allows ARG to stay live. */
275
}
540
- switch (len) {
276
541
- case 16:
277
/* Writes back the old data into Rs. */
542
- if (TCG_TARGET_HAS_ext16u_i32) {
278
void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
543
- tcg_gen_ext16u_i32(ret, arg);
279
uint64_t new_lo, uint64_t new_hi)
544
- tcg_gen_shli_i32(ret, ret, ofs);
280
{
545
- return;
281
- uintptr_t ra = GETPC();
546
- }
282
-#ifndef CONFIG_ATOMIC128
547
- break;
283
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
548
- case 8:
284
-#else
549
- if (TCG_TARGET_HAS_ext8u_i32) {
285
Int128 oldv, cmpv, newv;
550
- tcg_gen_ext8u_i32(ret, arg);
286
+ uintptr_t ra = GETPC();
551
- tcg_gen_shli_i32(ret, ret, ofs);
287
+ int mem_idx;
552
- return;
288
+ TCGMemOpIdx oi;
553
- }
289
+
554
- break;
290
+ if (!HAVE_CMPXCHG128) {
555
+ /*
291
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
556
+ * To help two-operand hosts we prefer to zero-extend first,
292
+ }
557
+ * which allows ARG to stay live.
293
+
558
+ */
294
+ mem_idx = cpu_mmu_index(env, false);
559
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, len)) {
295
+ oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
560
+ tcg_gen_extract_i32(ret, arg, 0, len);
296
561
+ tcg_gen_shli_i32(ret, ret, ofs);
297
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
562
+ return;
298
newv = int128_make128(new_lo, new_hi);
563
}
299
-
564
/* Otherwise prefer zero-extension over AND for code size. */
300
- int mem_idx = cpu_mmu_index(env, false);
565
- switch (ofs + len) {
301
- TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
566
- case 16:
302
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
567
- if (TCG_TARGET_HAS_ext16u_i32) {
303
568
- tcg_gen_shli_i32(ret, arg, ofs);
304
env->xregs[rs] = int128_getlo(oldv);
569
- tcg_gen_ext16u_i32(ret, ret);
305
env->xregs[rs + 1] = int128_gethi(oldv);
570
- return;
306
-#endif
571
- }
307
}
572
- break;
308
573
- case 8:
309
void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
574
- if (TCG_TARGET_HAS_ext8u_i32) {
310
uint64_t new_hi, uint64_t new_lo)
575
- tcg_gen_shli_i32(ret, arg, ofs);
311
{
576
- tcg_gen_ext8u_i32(ret, ret);
312
- uintptr_t ra = GETPC();
577
- return;
313
-#ifndef CONFIG_ATOMIC128
578
- }
314
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
579
- break;
315
-#else
580
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
316
Int128 oldv, cmpv, newv;
581
+ tcg_gen_shli_i32(ret, arg, ofs);
317
+ uintptr_t ra = GETPC();
582
+ tcg_gen_extract_i32(ret, ret, 0, ofs + len);
318
+ int mem_idx;
583
+ return;
319
+ TCGMemOpIdx oi;
584
}
320
+
585
tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
321
+ if (!HAVE_CMPXCHG128) {
586
tcg_gen_shli_i32(ret, ret, ofs);
322
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
587
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
323
+ }
588
tcg_gen_shri_i32(ret, arg, 32 - len);
324
+
589
return;
325
+ mem_idx = cpu_mmu_index(env, false);
590
}
326
+ oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
591
- if (ofs == 0) {
327
592
- tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
328
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
593
- return;
329
newv = int128_make128(new_lo, new_hi);
594
- }
330
-
595
331
- int mem_idx = cpu_mmu_index(env, false);
596
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
332
- TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
597
tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
333
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
598
return;
334
599
}
335
env->xregs[rs + 1] = int128_getlo(oldv);
600
+ if (ofs == 0) {
336
env->xregs[rs] = int128_gethi(oldv);
601
+ tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
337
-#endif
602
+ return;
338
}
603
+ }
604
605
/* Assume that zero-extension, if available, is cheaper than a shift. */
606
- switch (ofs + len) {
607
- case 16:
608
- if (TCG_TARGET_HAS_ext16u_i32) {
609
- tcg_gen_ext16u_i32(ret, arg);
610
- tcg_gen_shri_i32(ret, ret, ofs);
611
- return;
612
- }
613
- break;
614
- case 8:
615
- if (TCG_TARGET_HAS_ext8u_i32) {
616
- tcg_gen_ext8u_i32(ret, arg);
617
- tcg_gen_shri_i32(ret, ret, ofs);
618
- return;
619
- }
620
- break;
621
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
622
+ tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, 0, ofs + len);
623
+ tcg_gen_shri_i32(ret, ret, ofs);
624
+ return;
625
}
626
627
/* ??? Ideally we'd know what values are available for immediate AND.
628
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
629
tcg_gen_sari_i32(ret, arg, 32 - len);
630
return;
631
}
632
- if (ofs == 0) {
633
- switch (len) {
634
- case 16:
635
- tcg_gen_ext16s_i32(ret, arg);
636
- return;
637
- case 8:
638
- tcg_gen_ext8s_i32(ret, arg);
639
- return;
640
- }
641
- }
642
643
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, ofs, len)) {
644
tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
645
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
646
}
647
648
/* Assume that sign-extension, if available, is cheaper than a shift. */
649
- switch (ofs + len) {
650
- case 16:
651
- if (TCG_TARGET_HAS_ext16s_i32) {
652
- tcg_gen_ext16s_i32(ret, arg);
653
- tcg_gen_sari_i32(ret, ret, ofs);
654
- return;
655
- }
656
- break;
657
- case 8:
658
- if (TCG_TARGET_HAS_ext8s_i32) {
659
- tcg_gen_ext8s_i32(ret, arg);
660
- tcg_gen_sari_i32(ret, ret, ofs);
661
- return;
662
- }
663
- break;
664
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, ofs + len)) {
665
+ tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, 0, ofs + len);
666
+ tcg_gen_sari_i32(ret, ret, ofs);
667
+ return;
668
}
669
- switch (len) {
670
- case 16:
671
- if (TCG_TARGET_HAS_ext16s_i32) {
672
- tcg_gen_shri_i32(ret, arg, ofs);
673
- tcg_gen_ext16s_i32(ret, ret);
674
- return;
675
- }
676
- break;
677
- case 8:
678
- if (TCG_TARGET_HAS_ext8s_i32) {
679
- tcg_gen_shri_i32(ret, arg, ofs);
680
- tcg_gen_ext8s_i32(ret, ret);
681
- return;
682
- }
683
- break;
684
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, len)) {
685
+ tcg_gen_shri_i32(ret, arg, ofs);
686
+ tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, ret, 0, len);
687
+ return;
688
}
689
690
tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
691
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
692
693
void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg)
694
{
695
- if (TCG_TARGET_HAS_ext8s_i32) {
696
- tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg);
697
- } else {
698
- tcg_gen_shli_i32(ret, arg, 24);
699
- tcg_gen_sari_i32(ret, ret, 24);
700
- }
701
+ tcg_gen_sextract_i32(ret, arg, 0, 8);
702
}
703
704
void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg)
705
{
706
- if (TCG_TARGET_HAS_ext16s_i32) {
707
- tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg);
708
- } else {
709
- tcg_gen_shli_i32(ret, arg, 16);
710
- tcg_gen_sari_i32(ret, ret, 16);
711
- }
712
+ tcg_gen_sextract_i32(ret, arg, 0, 16);
713
}
714
715
void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg)
716
{
717
- if (TCG_TARGET_HAS_ext8u_i32) {
718
- tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg);
719
- } else {
720
- tcg_gen_andi_i32(ret, arg, 0xffu);
721
- }
722
+ tcg_gen_extract_i32(ret, arg, 0, 8);
723
}
724
725
void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
726
{
727
- if (TCG_TARGET_HAS_ext16u_i32) {
728
- tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg);
729
- } else {
730
- tcg_gen_andi_i32(ret, arg, 0xffffu);
731
- }
732
+ tcg_gen_extract_i32(ret, arg, 0, 16);
733
}
734
339
735
/*
340
/*
736
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
737
case -1:
738
tcg_gen_mov_i64(ret, arg1);
739
return;
740
- case 0xff:
741
- /* Don't recurse with tcg_gen_ext8u_i64. */
742
- if (TCG_TARGET_HAS_ext8u_i64) {
743
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1);
744
- return;
745
- }
746
- break;
747
- case 0xffff:
748
- if (TCG_TARGET_HAS_ext16u_i64) {
749
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1);
750
- return;
751
- }
752
- break;
753
- case 0xffffffffu:
754
- if (TCG_TARGET_HAS_ext32u_i64) {
755
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1);
756
- return;
757
+ default:
758
+ /*
759
+ * Canonicalize on extract, if valid. This aids x86 with its
760
+ * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode
761
+ * which does not require matching operands. Other backends can
762
+ * trivially expand the extract to AND during code generation.
763
+ */
764
+ if (!(arg2 & (arg2 + 1))) {
765
+ unsigned len = ctz64(~arg2);
766
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) {
767
+ tcg_gen_extract_i64(ret, arg1, 0, len);
768
+ return;
769
+ }
770
}
771
break;
772
}
773
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
774
775
void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg)
776
{
777
- if (TCG_TARGET_REG_BITS == 32) {
778
- tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
779
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
780
- } else if (TCG_TARGET_HAS_ext8s_i64) {
781
- tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg);
782
- } else {
783
- tcg_gen_shli_i64(ret, arg, 56);
784
- tcg_gen_sari_i64(ret, ret, 56);
785
- }
786
+ tcg_gen_sextract_i64(ret, arg, 0, 8);
787
}
788
789
void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg)
790
{
791
- if (TCG_TARGET_REG_BITS == 32) {
792
- tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg));
793
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
794
- } else if (TCG_TARGET_HAS_ext16s_i64) {
795
- tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg);
796
- } else {
797
- tcg_gen_shli_i64(ret, arg, 48);
798
- tcg_gen_sari_i64(ret, ret, 48);
799
- }
800
+ tcg_gen_sextract_i64(ret, arg, 0, 16);
801
}
802
803
void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg)
804
{
805
- if (TCG_TARGET_REG_BITS == 32) {
806
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
807
- tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
808
- } else if (TCG_TARGET_HAS_ext32s_i64) {
809
- tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg);
810
- } else {
811
- tcg_gen_shli_i64(ret, arg, 32);
812
- tcg_gen_sari_i64(ret, ret, 32);
813
- }
814
+ tcg_gen_sextract_i64(ret, arg, 0, 32);
815
}
816
817
void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg)
818
{
819
- if (TCG_TARGET_REG_BITS == 32) {
820
- tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
821
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
822
- } else if (TCG_TARGET_HAS_ext8u_i64) {
823
- tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg);
824
- } else {
825
- tcg_gen_andi_i64(ret, arg, 0xffu);
826
- }
827
+ tcg_gen_extract_i64(ret, arg, 0, 8);
828
}
829
830
void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg)
831
{
832
- if (TCG_TARGET_REG_BITS == 32) {
833
- tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg));
834
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
835
- } else if (TCG_TARGET_HAS_ext16u_i64) {
836
- tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg);
837
- } else {
838
- tcg_gen_andi_i64(ret, arg, 0xffffu);
839
- }
840
+ tcg_gen_extract_i64(ret, arg, 0, 16);
841
}
842
843
void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
844
{
845
- if (TCG_TARGET_REG_BITS == 32) {
846
- tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
847
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
848
- } else if (TCG_TARGET_HAS_ext32u_i64) {
849
- tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg);
850
- } else {
851
- tcg_gen_andi_i64(ret, arg, 0xffffffffu);
852
- }
853
+ tcg_gen_extract_i64(ret, arg, 0, 32);
854
}
855
856
/*
857
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
858
return;
859
}
860
}
861
- /* To help two-operand hosts we prefer to zero-extend first,
862
- which allows ARG to stay live. */
863
- switch (len) {
864
- case 32:
865
- if (TCG_TARGET_HAS_ext32u_i64) {
866
- tcg_gen_ext32u_i64(ret, arg);
867
- tcg_gen_shli_i64(ret, ret, ofs);
868
- return;
869
- }
870
- break;
871
- case 16:
872
- if (TCG_TARGET_HAS_ext16u_i64) {
873
- tcg_gen_ext16u_i64(ret, arg);
874
- tcg_gen_shli_i64(ret, ret, ofs);
875
- return;
876
- }
877
- break;
878
- case 8:
879
- if (TCG_TARGET_HAS_ext8u_i64) {
880
- tcg_gen_ext8u_i64(ret, arg);
881
- tcg_gen_shli_i64(ret, ret, ofs);
882
- return;
883
- }
884
- break;
885
+ /*
886
+ * To help two-operand hosts we prefer to zero-extend first,
887
+ * which allows ARG to stay live.
888
+ */
889
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, len)) {
890
+ tcg_gen_extract_i64(ret, arg, 0, len);
891
+ tcg_gen_shli_i64(ret, ret, ofs);
892
+ return;
893
}
894
/* Otherwise prefer zero-extension over AND for code size. */
895
- switch (ofs + len) {
896
- case 32:
897
- if (TCG_TARGET_HAS_ext32u_i64) {
898
- tcg_gen_shli_i64(ret, arg, ofs);
899
- tcg_gen_ext32u_i64(ret, ret);
900
- return;
901
- }
902
- break;
903
- case 16:
904
- if (TCG_TARGET_HAS_ext16u_i64) {
905
- tcg_gen_shli_i64(ret, arg, ofs);
906
- tcg_gen_ext16u_i64(ret, ret);
907
- return;
908
- }
909
- break;
910
- case 8:
911
- if (TCG_TARGET_HAS_ext8u_i64) {
912
- tcg_gen_shli_i64(ret, arg, ofs);
913
- tcg_gen_ext8u_i64(ret, ret);
914
- return;
915
- }
916
- break;
917
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
918
+ tcg_gen_shli_i64(ret, arg, ofs);
919
+ tcg_gen_extract_i64(ret, ret, 0, ofs + len);
920
+ return;
921
}
922
tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
923
tcg_gen_shli_i64(ret, ret, ofs);
924
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
925
tcg_gen_shri_i64(ret, arg, 64 - len);
926
return;
927
}
928
- if (ofs == 0) {
929
- tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
930
- return;
931
- }
932
933
if (TCG_TARGET_REG_BITS == 32) {
934
/* Look for a 32-bit extract within one of the two words. */
935
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
936
tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
937
return;
938
}
939
+ if (ofs == 0) {
940
+ tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
941
+ return;
942
+ }
943
944
/* Assume that zero-extension, if available, is cheaper than a shift. */
945
- switch (ofs + len) {
946
- case 32:
947
- if (TCG_TARGET_HAS_ext32u_i64) {
948
- tcg_gen_ext32u_i64(ret, arg);
949
- tcg_gen_shri_i64(ret, ret, ofs);
950
- return;
951
- }
952
- break;
953
- case 16:
954
- if (TCG_TARGET_HAS_ext16u_i64) {
955
- tcg_gen_ext16u_i64(ret, arg);
956
- tcg_gen_shri_i64(ret, ret, ofs);
957
- return;
958
- }
959
- break;
960
- case 8:
961
- if (TCG_TARGET_HAS_ext8u_i64) {
962
- tcg_gen_ext8u_i64(ret, arg);
963
- tcg_gen_shri_i64(ret, ret, ofs);
964
- return;
965
- }
966
- break;
967
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
968
+ tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, 0, ofs + len);
969
+ tcg_gen_shri_i64(ret, ret, ofs);
970
+ return;
971
}
972
973
/* ??? Ideally we'd know what values are available for immediate AND.
974
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
975
tcg_gen_sari_i64(ret, arg, 64 - len);
976
return;
977
}
978
- if (ofs == 0) {
979
- switch (len) {
980
- case 32:
981
- tcg_gen_ext32s_i64(ret, arg);
982
- return;
983
- case 16:
984
- tcg_gen_ext16s_i64(ret, arg);
985
- return;
986
- case 8:
987
- tcg_gen_ext8s_i64(ret, arg);
988
- return;
989
- }
990
- }
991
992
if (TCG_TARGET_REG_BITS == 32) {
993
/* Look for a 32-bit extract within one of the two words. */
994
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
995
}
996
997
/* Assume that sign-extension, if available, is cheaper than a shift. */
998
- switch (ofs + len) {
999
- case 32:
1000
- if (TCG_TARGET_HAS_ext32s_i64) {
1001
- tcg_gen_ext32s_i64(ret, arg);
1002
- tcg_gen_sari_i64(ret, ret, ofs);
1003
- return;
1004
- }
1005
- break;
1006
- case 16:
1007
- if (TCG_TARGET_HAS_ext16s_i64) {
1008
- tcg_gen_ext16s_i64(ret, arg);
1009
- tcg_gen_sari_i64(ret, ret, ofs);
1010
- return;
1011
- }
1012
- break;
1013
- case 8:
1014
- if (TCG_TARGET_HAS_ext8s_i64) {
1015
- tcg_gen_ext8s_i64(ret, arg);
1016
- tcg_gen_sari_i64(ret, ret, ofs);
1017
- return;
1018
- }
1019
- break;
1020
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, ofs + len)) {
1021
+ tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, 0, ofs + len);
1022
+ tcg_gen_sari_i64(ret, ret, ofs);
1023
+ return;
1024
}
1025
- switch (len) {
1026
- case 32:
1027
- if (TCG_TARGET_HAS_ext32s_i64) {
1028
- tcg_gen_shri_i64(ret, arg, ofs);
1029
- tcg_gen_ext32s_i64(ret, ret);
1030
- return;
1031
- }
1032
- break;
1033
- case 16:
1034
- if (TCG_TARGET_HAS_ext16s_i64) {
1035
- tcg_gen_shri_i64(ret, arg, ofs);
1036
- tcg_gen_ext16s_i64(ret, ret);
1037
- return;
1038
- }
1039
- break;
1040
- case 8:
1041
- if (TCG_TARGET_HAS_ext8s_i64) {
1042
- tcg_gen_shri_i64(ret, arg, ofs);
1043
- tcg_gen_ext8s_i64(ret, ret);
1044
- return;
1045
- }
1046
- break;
1047
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, len)) {
1048
+ tcg_gen_shri_i64(ret, arg, ofs);
1049
+ tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, ret, 0, len);
1050
+ return;
1051
}
1052
+
1053
tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
1054
tcg_gen_sari_i64(ret, ret, 64 - len);
1055
}
1056
diff --git a/tcg/tcg.c b/tcg/tcg.c
1057
index XXXXXXX..XXXXXXX 100644
1058
--- a/tcg/tcg.c
1059
+++ b/tcg/tcg.c
1060
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
1061
return TCG_TARGET_HAS_muluh_i32;
1062
case INDEX_op_mulsh_i32:
1063
return TCG_TARGET_HAS_mulsh_i32;
1064
- case INDEX_op_ext8s_i32:
1065
- return TCG_TARGET_HAS_ext8s_i32;
1066
- case INDEX_op_ext16s_i32:
1067
- return TCG_TARGET_HAS_ext16s_i32;
1068
- case INDEX_op_ext8u_i32:
1069
- return TCG_TARGET_HAS_ext8u_i32;
1070
- case INDEX_op_ext16u_i32:
1071
- return TCG_TARGET_HAS_ext16u_i32;
1072
case INDEX_op_bswap16_i32:
1073
return TCG_TARGET_HAS_bswap16_i32;
1074
case INDEX_op_bswap32_i32:
1075
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
1076
case INDEX_op_extrl_i64_i32:
1077
case INDEX_op_extrh_i64_i32:
1078
return TCG_TARGET_HAS_extr_i64_i32;
1079
- case INDEX_op_ext8s_i64:
1080
- return TCG_TARGET_HAS_ext8s_i64;
1081
- case INDEX_op_ext16s_i64:
1082
- return TCG_TARGET_HAS_ext16s_i64;
1083
- case INDEX_op_ext32s_i64:
1084
- return TCG_TARGET_HAS_ext32s_i64;
1085
- case INDEX_op_ext8u_i64:
1086
- return TCG_TARGET_HAS_ext8u_i64;
1087
- case INDEX_op_ext16u_i64:
1088
- return TCG_TARGET_HAS_ext16u_i64;
1089
- case INDEX_op_ext32u_i64:
1090
- return TCG_TARGET_HAS_ext32u_i64;
1091
case INDEX_op_bswap16_i64:
1092
return TCG_TARGET_HAS_bswap16_i64;
1093
case INDEX_op_bswap32_i64:
1094
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
1095
1096
/* emit instruction */
1097
switch (op->opc) {
1098
- case INDEX_op_ext8s_i32:
1099
- tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
1100
- break;
1101
- case INDEX_op_ext8s_i64:
1102
- tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
1103
- break;
1104
- case INDEX_op_ext8u_i32:
1105
- case INDEX_op_ext8u_i64:
1106
- tcg_out_ext8u(s, new_args[0], new_args[1]);
1107
- break;
1108
- case INDEX_op_ext16s_i32:
1109
- tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
1110
- break;
1111
- case INDEX_op_ext16s_i64:
1112
- tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
1113
- break;
1114
- case INDEX_op_ext16u_i32:
1115
- case INDEX_op_ext16u_i64:
1116
- tcg_out_ext16u(s, new_args[0], new_args[1]);
1117
- break;
1118
- case INDEX_op_ext32s_i64:
1119
- tcg_out_ext32s(s, new_args[0], new_args[1]);
1120
- break;
1121
- case INDEX_op_ext32u_i64:
1122
- tcg_out_ext32u(s, new_args[0], new_args[1]);
1123
- break;
1124
case INDEX_op_ext_i32_i64:
1125
tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
1126
break;
1127
diff --git a/tcg/tci.c b/tcg/tci.c
1128
index XXXXXXX..XXXXXXX 100644
1129
--- a/tcg/tci.c
1130
+++ b/tcg/tci.c
1131
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
1132
tci_write_reg64(regs, r1, r0, tmp64);
1133
break;
1134
#endif
1135
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
1136
- CASE_32_64(ext8s)
1137
- tci_args_rr(insn, &r0, &r1);
1138
- regs[r0] = (int8_t)regs[r1];
1139
- break;
1140
-#endif
1141
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
1142
- TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
1143
- CASE_32_64(ext16s)
1144
- tci_args_rr(insn, &r0, &r1);
1145
- regs[r0] = (int16_t)regs[r1];
1146
- break;
1147
-#endif
1148
-#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
1149
- CASE_32_64(ext8u)
1150
- tci_args_rr(insn, &r0, &r1);
1151
- regs[r0] = (uint8_t)regs[r1];
1152
- break;
1153
-#endif
1154
-#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
1155
- CASE_32_64(ext16u)
1156
- tci_args_rr(insn, &r0, &r1);
1157
- regs[r0] = (uint16_t)regs[r1];
1158
- break;
1159
-#endif
1160
#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
1161
CASE_32_64(bswap16)
1162
tci_args_rr(insn, &r0, &r1);
1163
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
1164
tb_ptr = ptr;
1165
}
1166
break;
1167
- case INDEX_op_ext32s_i64:
1168
case INDEX_op_ext_i32_i64:
1169
tci_args_rr(insn, &r0, &r1);
1170
regs[r0] = (int32_t)regs[r1];
1171
break;
1172
- case INDEX_op_ext32u_i64:
1173
case INDEX_op_extu_i32_i64:
1174
tci_args_rr(insn, &r0, &r1);
1175
regs[r0] = (uint32_t)regs[r1];
1176
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
1177
1178
case INDEX_op_mov_i32:
1179
case INDEX_op_mov_i64:
1180
- case INDEX_op_ext8s_i32:
1181
- case INDEX_op_ext8s_i64:
1182
- case INDEX_op_ext8u_i32:
1183
- case INDEX_op_ext8u_i64:
1184
- case INDEX_op_ext16s_i32:
1185
- case INDEX_op_ext16s_i64:
1186
- case INDEX_op_ext16u_i32:
1187
- case INDEX_op_ext32s_i64:
1188
- case INDEX_op_ext32u_i64:
1189
case INDEX_op_ext_i32_i64:
1190
case INDEX_op_extu_i32_i64:
1191
case INDEX_op_bswap16_i32:
1192
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
1193
index XXXXXXX..XXXXXXX 100644
1194
--- a/docs/devel/tcg-ops.rst
1195
+++ b/docs/devel/tcg-ops.rst
1196
@@ -XXX,XX +XXX,XX @@ Misc
1197
- | *t0* = *t1*
1198
| Move *t1* to *t0* (both operands must have the same type).
1199
1200
- * - ext8s_i32/i64 *t0*, *t1*
1201
-
1202
- ext8u_i32/i64 *t0*, *t1*
1203
-
1204
- ext16s_i32/i64 *t0*, *t1*
1205
-
1206
- ext16u_i32/i64 *t0*, *t1*
1207
-
1208
- ext32s_i64 *t0*, *t1*
1209
-
1210
- ext32u_i64 *t0*, *t1*
1211
-
1212
- - | 8, 16 or 32 bit sign/zero extension (both operands must have the same type)
1213
-
1214
* - bswap16_i32/i64 *t0*, *t1*, *flags*
1215
1216
- | 16 bit byte swap on the low bits of a 32/64 bit input.
1217
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
1218
index XXXXXXX..XXXXXXX 100644
1219
--- a/tcg/aarch64/tcg-target.c.inc
1220
+++ b/tcg/aarch64/tcg-target.c.inc
1221
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
1222
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1223
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1224
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1225
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1226
- case INDEX_op_ext8s_i64:
1227
- case INDEX_op_ext8u_i32:
1228
- case INDEX_op_ext8u_i64:
1229
- case INDEX_op_ext16s_i64:
1230
- case INDEX_op_ext16s_i32:
1231
- case INDEX_op_ext16u_i64:
1232
- case INDEX_op_ext16u_i32:
1233
- case INDEX_op_ext32s_i64:
1234
- case INDEX_op_ext32u_i64:
1235
- case INDEX_op_ext_i32_i64:
1236
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1237
case INDEX_op_extu_i32_i64:
1238
case INDEX_op_extrl_i64_i32:
1239
default:
1240
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1241
case INDEX_op_bswap16_i64:
1242
case INDEX_op_bswap32_i64:
1243
case INDEX_op_bswap64_i64:
1244
- case INDEX_op_ext8s_i32:
1245
- case INDEX_op_ext16s_i32:
1246
- case INDEX_op_ext8u_i32:
1247
- case INDEX_op_ext16u_i32:
1248
- case INDEX_op_ext8s_i64:
1249
- case INDEX_op_ext16s_i64:
1250
- case INDEX_op_ext32s_i64:
1251
- case INDEX_op_ext8u_i64:
1252
- case INDEX_op_ext16u_i64:
1253
- case INDEX_op_ext32u_i64:
1254
case INDEX_op_ext_i32_i64:
1255
case INDEX_op_extu_i32_i64:
1256
case INDEX_op_extract_i32:
1257
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
1258
index XXXXXXX..XXXXXXX 100644
1259
--- a/tcg/arm/tcg-target.c.inc
1260
+++ b/tcg/arm/tcg-target.c.inc
1261
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1262
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1263
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1264
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1265
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1266
- case INDEX_op_ext8u_i32:
1267
- case INDEX_op_ext16s_i32:
1268
- case INDEX_op_ext16u_i32:
1269
default:
1270
g_assert_not_reached();
1271
}
1272
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1273
case INDEX_op_not_i32:
1274
case INDEX_op_bswap16_i32:
1275
case INDEX_op_bswap32_i32:
1276
- case INDEX_op_ext8s_i32:
1277
- case INDEX_op_ext16s_i32:
1278
- case INDEX_op_ext16u_i32:
1279
case INDEX_op_extract_i32:
1280
case INDEX_op_sextract_i32:
1281
return C_O1_I1(r, r);
1282
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
1283
index XXXXXXX..XXXXXXX 100644
1284
--- a/tcg/i386/tcg-target.c.inc
1285
+++ b/tcg/i386/tcg-target.c.inc
1286
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1287
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1288
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1289
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1290
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1291
- case INDEX_op_ext8s_i64:
1292
- case INDEX_op_ext8u_i32:
1293
- case INDEX_op_ext8u_i64:
1294
- case INDEX_op_ext16s_i32:
1295
- case INDEX_op_ext16s_i64:
1296
- case INDEX_op_ext16u_i32:
1297
- case INDEX_op_ext16u_i64:
1298
- case INDEX_op_ext32s_i64:
1299
- case INDEX_op_ext32u_i64:
1300
- case INDEX_op_ext_i32_i64:
1301
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1302
case INDEX_op_extu_i32_i64:
1303
case INDEX_op_extrl_i64_i32:
1304
default:
1305
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1306
case INDEX_op_extrh_i64_i32:
1307
return C_O1_I1(r, 0);
1308
1309
- case INDEX_op_ext8s_i32:
1310
- case INDEX_op_ext8s_i64:
1311
- case INDEX_op_ext8u_i32:
1312
- case INDEX_op_ext8u_i64:
1313
- return C_O1_I1(r, q);
1314
-
1315
- case INDEX_op_ext16s_i32:
1316
- case INDEX_op_ext16s_i64:
1317
- case INDEX_op_ext16u_i32:
1318
- case INDEX_op_ext16u_i64:
1319
- case INDEX_op_ext32s_i64:
1320
- case INDEX_op_ext32u_i64:
1321
case INDEX_op_ext_i32_i64:
1322
case INDEX_op_extu_i32_i64:
1323
case INDEX_op_extrl_i64_i32:
1324
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
1325
index XXXXXXX..XXXXXXX 100644
1326
--- a/tcg/loongarch64/tcg-target.c.inc
1327
+++ b/tcg/loongarch64/tcg-target.c.inc
1328
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1329
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1330
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1331
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1332
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1333
- case INDEX_op_ext8s_i64:
1334
- case INDEX_op_ext8u_i32:
1335
- case INDEX_op_ext8u_i64:
1336
- case INDEX_op_ext16s_i32:
1337
- case INDEX_op_ext16s_i64:
1338
- case INDEX_op_ext16u_i32:
1339
- case INDEX_op_ext16u_i64:
1340
- case INDEX_op_ext32s_i64:
1341
- case INDEX_op_ext32u_i64:
1342
- case INDEX_op_ext_i32_i64:
1343
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1344
case INDEX_op_extu_i32_i64:
1345
case INDEX_op_extrl_i64_i32:
1346
default:
1347
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1348
case INDEX_op_brcond_i64:
1349
return C_O0_I2(rz, rz);
1350
1351
- case INDEX_op_ext8s_i32:
1352
- case INDEX_op_ext8s_i64:
1353
- case INDEX_op_ext8u_i32:
1354
- case INDEX_op_ext8u_i64:
1355
- case INDEX_op_ext16s_i32:
1356
- case INDEX_op_ext16s_i64:
1357
- case INDEX_op_ext16u_i32:
1358
- case INDEX_op_ext16u_i64:
1359
- case INDEX_op_ext32s_i64:
1360
- case INDEX_op_ext32u_i64:
1361
case INDEX_op_extu_i32_i64:
1362
case INDEX_op_extrl_i64_i32:
1363
case INDEX_op_extrh_i64_i32:
1364
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
1365
index XXXXXXX..XXXXXXX 100644
1366
--- a/tcg/mips/tcg-target.c.inc
1367
+++ b/tcg/mips/tcg-target.c.inc
1368
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
1369
1370
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
1371
{
1372
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
1373
+ tcg_debug_assert(use_mips32r2_instructions);
1374
tcg_out_opc_reg(s, OPC_SEB, rd, TCG_REG_ZERO, rs);
1375
}
1376
1377
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
1378
1379
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
1380
{
1381
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
1382
+ tcg_debug_assert(use_mips32r2_instructions);
1383
tcg_out_opc_reg(s, OPC_SEH, rd, TCG_REG_ZERO, rs);
1384
}
1385
1386
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1387
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1388
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1389
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1390
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1391
- case INDEX_op_ext8s_i64:
1392
- case INDEX_op_ext8u_i32:
1393
- case INDEX_op_ext8u_i64:
1394
- case INDEX_op_ext16s_i32:
1395
- case INDEX_op_ext16s_i64:
1396
- case INDEX_op_ext32s_i64:
1397
- case INDEX_op_ext32u_i64:
1398
- case INDEX_op_ext_i32_i64:
1399
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1400
case INDEX_op_extu_i32_i64:
1401
case INDEX_op_extrl_i64_i32:
1402
default:
1403
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1404
case INDEX_op_not_i32:
1405
case INDEX_op_bswap16_i32:
1406
case INDEX_op_bswap32_i32:
1407
- case INDEX_op_ext8s_i32:
1408
- case INDEX_op_ext16s_i32:
1409
case INDEX_op_extract_i32:
1410
case INDEX_op_sextract_i32:
1411
case INDEX_op_ld8u_i64:
1412
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1413
case INDEX_op_bswap16_i64:
1414
case INDEX_op_bswap32_i64:
1415
case INDEX_op_bswap64_i64:
1416
- case INDEX_op_ext8s_i64:
1417
- case INDEX_op_ext16s_i64:
1418
- case INDEX_op_ext32s_i64:
1419
- case INDEX_op_ext32u_i64:
1420
case INDEX_op_ext_i32_i64:
1421
case INDEX_op_extu_i32_i64:
1422
case INDEX_op_extrl_i64_i32:
1423
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
1424
index XXXXXXX..XXXXXXX 100644
1425
--- a/tcg/ppc/tcg-target.c.inc
1426
+++ b/tcg/ppc/tcg-target.c.inc
1427
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1428
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1429
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1430
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1431
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1432
- case INDEX_op_ext8s_i64:
1433
- case INDEX_op_ext8u_i32:
1434
- case INDEX_op_ext8u_i64:
1435
- case INDEX_op_ext16s_i32:
1436
- case INDEX_op_ext16s_i64:
1437
- case INDEX_op_ext16u_i32:
1438
- case INDEX_op_ext16u_i64:
1439
- case INDEX_op_ext32s_i64:
1440
- case INDEX_op_ext32u_i64:
1441
- case INDEX_op_ext_i32_i64:
1442
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1443
case INDEX_op_extu_i32_i64:
1444
case INDEX_op_extrl_i64_i32:
1445
default:
1446
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1447
case INDEX_op_ctpop_i32:
1448
case INDEX_op_neg_i32:
1449
case INDEX_op_not_i32:
1450
- case INDEX_op_ext8s_i32:
1451
- case INDEX_op_ext16s_i32:
1452
case INDEX_op_bswap16_i32:
1453
case INDEX_op_bswap32_i32:
1454
case INDEX_op_extract_i32:
1455
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1456
case INDEX_op_ctpop_i64:
1457
case INDEX_op_neg_i64:
1458
case INDEX_op_not_i64:
1459
- case INDEX_op_ext8s_i64:
1460
- case INDEX_op_ext16s_i64:
1461
- case INDEX_op_ext32s_i64:
1462
case INDEX_op_ext_i32_i64:
1463
case INDEX_op_extu_i32_i64:
1464
case INDEX_op_bswap16_i64:
1465
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
1466
index XXXXXXX..XXXXXXX 100644
1467
--- a/tcg/riscv/tcg-target.c.inc
1468
+++ b/tcg/riscv/tcg-target.c.inc
1469
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1470
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1471
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1472
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1473
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1474
- case INDEX_op_ext8s_i64:
1475
- case INDEX_op_ext8u_i32:
1476
- case INDEX_op_ext8u_i64:
1477
- case INDEX_op_ext16s_i32:
1478
- case INDEX_op_ext16s_i64:
1479
- case INDEX_op_ext16u_i32:
1480
- case INDEX_op_ext16u_i64:
1481
- case INDEX_op_ext32s_i64:
1482
- case INDEX_op_ext32u_i64:
1483
- case INDEX_op_ext_i32_i64:
1484
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1485
case INDEX_op_extu_i32_i64:
1486
case INDEX_op_extrl_i64_i32:
1487
default:
1488
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1489
case INDEX_op_ld_i64:
1490
case INDEX_op_not_i64:
1491
case INDEX_op_neg_i64:
1492
- case INDEX_op_ext8u_i32:
1493
- case INDEX_op_ext8u_i64:
1494
- case INDEX_op_ext16u_i32:
1495
- case INDEX_op_ext16u_i64:
1496
- case INDEX_op_ext32u_i64:
1497
case INDEX_op_extu_i32_i64:
1498
- case INDEX_op_ext8s_i32:
1499
- case INDEX_op_ext8s_i64:
1500
- case INDEX_op_ext16s_i32:
1501
- case INDEX_op_ext16s_i64:
1502
- case INDEX_op_ext32s_i64:
1503
case INDEX_op_extrl_i64_i32:
1504
case INDEX_op_extrh_i64_i32:
1505
case INDEX_op_ext_i32_i64:
1506
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1507
index XXXXXXX..XXXXXXX 100644
1508
--- a/tcg/s390x/tcg-target.c.inc
1509
+++ b/tcg/s390x/tcg-target.c.inc
1510
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1511
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1512
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1513
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1514
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1515
- case INDEX_op_ext8s_i64:
1516
- case INDEX_op_ext8u_i32:
1517
- case INDEX_op_ext8u_i64:
1518
- case INDEX_op_ext16s_i32:
1519
- case INDEX_op_ext16s_i64:
1520
- case INDEX_op_ext16u_i32:
1521
- case INDEX_op_ext16u_i64:
1522
- case INDEX_op_ext32s_i64:
1523
- case INDEX_op_ext32u_i64:
1524
- case INDEX_op_ext_i32_i64:
1525
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1526
case INDEX_op_extu_i32_i64:
1527
case INDEX_op_extrl_i64_i32:
1528
default:
1529
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1530
case INDEX_op_neg_i64:
1531
case INDEX_op_not_i32:
1532
case INDEX_op_not_i64:
1533
- case INDEX_op_ext8s_i32:
1534
- case INDEX_op_ext8s_i64:
1535
- case INDEX_op_ext8u_i32:
1536
- case INDEX_op_ext8u_i64:
1537
- case INDEX_op_ext16s_i32:
1538
- case INDEX_op_ext16s_i64:
1539
- case INDEX_op_ext16u_i32:
1540
- case INDEX_op_ext16u_i64:
1541
- case INDEX_op_ext32s_i64:
1542
- case INDEX_op_ext32u_i64:
1543
case INDEX_op_ext_i32_i64:
1544
case INDEX_op_extu_i32_i64:
1545
case INDEX_op_extract_i32:
1546
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1547
index XXXXXXX..XXXXXXX 100644
1548
--- a/tcg/sparc64/tcg-target.c.inc
1549
+++ b/tcg/sparc64/tcg-target.c.inc
1550
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1551
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1552
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1553
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1554
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1555
- case INDEX_op_ext8s_i64:
1556
- case INDEX_op_ext8u_i32:
1557
- case INDEX_op_ext8u_i64:
1558
- case INDEX_op_ext16s_i32:
1559
- case INDEX_op_ext16s_i64:
1560
- case INDEX_op_ext16u_i32:
1561
- case INDEX_op_ext16u_i64:
1562
- case INDEX_op_ext32s_i64:
1563
- case INDEX_op_ext32u_i64:
1564
- case INDEX_op_ext_i32_i64:
1565
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1566
case INDEX_op_extu_i32_i64:
1567
default:
1568
g_assert_not_reached();
1569
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1570
case INDEX_op_neg_i64:
1571
case INDEX_op_not_i32:
1572
case INDEX_op_not_i64:
1573
- case INDEX_op_ext32s_i64:
1574
- case INDEX_op_ext32u_i64:
1575
case INDEX_op_ext_i32_i64:
1576
case INDEX_op_extu_i32_i64:
1577
case INDEX_op_extract_i64:
1578
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1579
index XXXXXXX..XXXXXXX 100644
1580
--- a/tcg/tci/tcg-target.c.inc
1581
+++ b/tcg/tci/tcg-target.c.inc
1582
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1583
case INDEX_op_not_i64:
1584
case INDEX_op_neg_i32:
1585
case INDEX_op_neg_i64:
1586
- case INDEX_op_ext8s_i32:
1587
- case INDEX_op_ext8s_i64:
1588
- case INDEX_op_ext16s_i32:
1589
- case INDEX_op_ext16s_i64:
1590
- case INDEX_op_ext8u_i32:
1591
- case INDEX_op_ext8u_i64:
1592
- case INDEX_op_ext16u_i32:
1593
- case INDEX_op_ext16u_i64:
1594
- case INDEX_op_ext32s_i64:
1595
- case INDEX_op_ext32u_i64:
1596
case INDEX_op_ext_i32_i64:
1597
case INDEX_op_extu_i32_i64:
1598
case INDEX_op_bswap16_i32:
1599
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
1600
}
1601
}
1602
1603
+static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
1604
+ TCGReg rs, unsigned pos, unsigned len)
1605
+{
1606
+ TCGOpcode opc = type == TCG_TYPE_I32 ?
1607
+ INDEX_op_extract_i32 :
1608
+ INDEX_op_extract_i64;
1609
+ tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
1610
+}
1611
+
1612
+static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
1613
+ TCGReg rs, unsigned pos, unsigned len)
1614
+{
1615
+ TCGOpcode opc = type == TCG_TYPE_I32 ?
1616
+ INDEX_op_sextract_i32 :
1617
+ INDEX_op_sextract_i64;
1618
+ tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
1619
+}
1620
+
1621
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
1622
{
1623
- switch (type) {
1624
- case TCG_TYPE_I32:
1625
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32);
1626
- tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs);
1627
- break;
1628
-#if TCG_TARGET_REG_BITS == 64
1629
- case TCG_TYPE_I64:
1630
- tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64);
1631
- tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs);
1632
- break;
1633
-#endif
1634
- default:
1635
- g_assert_not_reached();
1636
- }
1637
+ tcg_out_sextract(s, type, rd, rs, 0, 8);
1638
}
1639
1640
static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
1641
{
1642
- if (TCG_TARGET_REG_BITS == 64) {
1643
- tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64);
1644
- tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs);
1645
- } else {
1646
- tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32);
1647
- tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs);
1648
- }
1649
+ tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
1650
}
1651
1652
static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
1653
{
1654
- switch (type) {
1655
- case TCG_TYPE_I32:
1656
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32);
1657
- tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs);
1658
- break;
1659
-#if TCG_TARGET_REG_BITS == 64
1660
- case TCG_TYPE_I64:
1661
- tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64);
1662
- tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs);
1663
- break;
1664
-#endif
1665
- default:
1666
- g_assert_not_reached();
1667
- }
1668
+ tcg_out_sextract(s, type, rd, rs, 0, 16);
1669
}
1670
1671
static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
1672
{
1673
- if (TCG_TARGET_REG_BITS == 64) {
1674
- tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64);
1675
- tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs);
1676
- } else {
1677
- tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32);
1678
- tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs);
1679
- }
1680
+ tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
1681
}
1682
1683
static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
1684
{
1685
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1686
- tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64);
1687
- tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs);
1688
+ tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
1689
}
1690
1691
static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
1692
{
1693
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1694
- tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64);
1695
- tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs);
1696
+ tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
1697
}
1698
1699
static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
1700
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1701
const TCGArg args[TCG_MAX_OP_ARGS],
1702
const int const_args[TCG_MAX_OP_ARGS])
1703
{
1704
- TCGOpcode exts;
1705
+ int width;
1706
1707
switch (opc) {
1708
case INDEX_op_goto_ptr:
1709
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1710
break;
1711
1712
case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
1713
- exts = INDEX_op_ext16s_i32;
1714
- goto do_bswap;
1715
case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
1716
- exts = INDEX_op_ext16s_i64;
1717
+ width = 16;
1718
goto do_bswap;
1719
case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
1720
- exts = INDEX_op_ext32s_i64;
1721
+ width = 32;
1722
do_bswap:
1723
/* The base tci bswaps zero-extend, and ignore high bits. */
1724
tcg_out_op_rr(s, opc, args[0], args[1]);
1725
if (args[2] & TCG_BSWAP_OS) {
1726
- tcg_out_op_rr(s, exts, args[0], args[0]);
1727
+ tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
1728
}
1729
break;
1730
1731
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1732
case INDEX_op_call: /* Always emitted via tcg_out_call. */
1733
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1734
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1735
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1736
- case INDEX_op_ext8s_i64:
1737
- case INDEX_op_ext8u_i32:
1738
- case INDEX_op_ext8u_i64:
1739
- case INDEX_op_ext16s_i32:
1740
- case INDEX_op_ext16s_i64:
1741
- case INDEX_op_ext16u_i32:
1742
- case INDEX_op_ext16u_i64:
1743
- case INDEX_op_ext32s_i64:
1744
- case INDEX_op_ext32u_i64:
1745
- case INDEX_op_ext_i32_i64:
1746
+ case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
1747
case INDEX_op_extu_i32_i64:
1748
case INDEX_op_extrl_i64_i32:
1749
default:
1750
--
341
--
1751
2.43.0
342
2.17.2
1752
343
1753
344
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Emilio G. Cota <cota@braap.org>
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/tcg.c | 6 ++-
5
target/arm/helper-a64.c | 16 ++++------------
5
tcg/aarch64/tcg-target.c.inc | 18 ++++---
6
target/arm/translate-a64.c | 38 ++++++++++++++++++++++----------------
6
tcg/arm/tcg-target.c.inc | 23 ++++----
7
2 files changed, 26 insertions(+), 28 deletions(-)
7
tcg/i386/tcg-target.c.inc | 47 +++++++++-------
8
tcg/loongarch64/tcg-target.c.inc | 24 +++++----
9
tcg/mips/tcg-target.c.inc | 43 +++++++++------
10
tcg/ppc/tcg-target.c.inc | 42 +++++++--------
11
tcg/riscv/tcg-target.c.inc | 21 ++++----
12
tcg/s390x/tcg-target.c.inc | 92 ++++++++++++++++++--------------
13
tcg/sparc64/tcg-target.c.inc | 28 +++++++---
14
tcg/tci/tcg-target.c.inc | 14 +++--
15
11 files changed, 210 insertions(+), 148 deletions(-)
16
8
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
9
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
18
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
11
--- a/target/arm/helper-a64.c
20
+++ b/tcg/tcg.c
12
+++ b/target/arm/helper-a64.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
13
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
22
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
14
int mem_idx;
23
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
15
TCGMemOpIdx oi;
24
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
16
25
+ OUTOP(INDEX_op_mul_i32, TCGOutOpBinary, outop_mul),
17
- if (!HAVE_CMPXCHG128) {
26
+ OUTOP(INDEX_op_mul_i64, TCGOutOpBinary, outop_mul),
18
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
27
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
19
- }
28
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
20
+ assert(HAVE_CMPXCHG128);
29
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
21
30
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
22
mem_idx = cpu_mmu_index(env, false);
31
case INDEX_op_st8_i32:
23
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
32
case INDEX_op_st16_i32:
24
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
33
case INDEX_op_st_i32:
25
int mem_idx;
34
- case INDEX_op_mul_i32:
26
TCGMemOpIdx oi;
35
case INDEX_op_shl_i32:
27
36
case INDEX_op_shr_i32:
28
- if (!HAVE_CMPXCHG128) {
37
case INDEX_op_sar_i32:
29
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
38
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
30
- }
39
case INDEX_op_st16_i64:
31
+ assert(HAVE_CMPXCHG128);
40
case INDEX_op_st32_i64:
32
41
case INDEX_op_st_i64:
33
mem_idx = cpu_mmu_index(env, false);
42
- case INDEX_op_mul_i64:
34
oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
43
case INDEX_op_shl_i64:
35
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
44
case INDEX_op_shr_i64:
36
int mem_idx;
45
case INDEX_op_sar_i64:
37
TCGMemOpIdx oi;
46
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
38
47
case INDEX_op_and:
39
- if (!HAVE_CMPXCHG128) {
48
case INDEX_op_andc:
40
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
49
case INDEX_op_eqv:
41
- }
50
+ case INDEX_op_mul_i32:
42
+ assert(HAVE_CMPXCHG128);
51
+ case INDEX_op_mul_i64:
43
52
case INDEX_op_nand:
44
mem_idx = cpu_mmu_index(env, false);
53
case INDEX_op_nor:
45
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
54
case INDEX_op_or:
46
@@ -XXX,XX +XXX,XX @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
55
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
47
int mem_idx;
48
TCGMemOpIdx oi;
49
50
- if (!HAVE_CMPXCHG128) {
51
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
52
- }
53
+ assert(HAVE_CMPXCHG128);
54
55
mem_idx = cpu_mmu_index(env, false);
56
oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
57
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
56
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/aarch64/tcg-target.c.inc
59
--- a/target/arm/translate-a64.c
58
+++ b/tcg/aarch64/tcg-target.c.inc
60
+++ b/target/arm/translate-a64.c
59
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
61
@@ -XXX,XX +XXX,XX @@
60
.out_rrr = tgen_eqv,
62
61
};
63
#include "trace-tcg.h"
62
64
#include "translate-a64.h"
63
+static void tgen_mul(TCGContext *s, TCGType type,
65
+#include "qemu/atomic128.h"
64
+ TCGReg a0, TCGReg a1, TCGReg a2)
66
65
+{
67
static TCGv_i64 cpu_X[32];
66
+ tcg_out_insn(s, 3509, MADD, type, a0, a1, a2, TCG_REG_XZR);
68
static TCGv_i64 cpu_pc;
67
+}
69
@@ -XXX,XX +XXX,XX @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
68
+
70
get_mem_index(s),
69
+static const TCGOutOpBinary outop_mul = {
71
MO_64 | MO_ALIGN | s->be_data);
70
+ .base.static_constraint = C_O1_I2(r, r, r),
72
tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
71
+ .out_rrr = tgen_mul,
73
- } else if (s->be_data == MO_LE) {
72
+};
74
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
73
+
75
+ } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
74
static const TCGOutOpBinary outop_nand = {
76
+ if (!HAVE_CMPXCHG128) {
75
.base.static_constraint = C_NotImplemented,
77
+ gen_helper_exit_atomic(cpu_env);
76
};
78
+ s->base.is_jmp = DISAS_NORETURN;
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
79
+ } else if (s->be_data == MO_LE) {
78
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
80
gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
79
break;
81
cpu_exclusive_addr,
80
82
cpu_reg(s, rt),
81
- case INDEX_op_mul_i64:
83
cpu_reg(s, rt2));
82
- case INDEX_op_mul_i32:
84
} else {
83
- tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
85
- gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
84
- break;
86
- cpu_reg(s, rt), cpu_reg(s, rt2));
85
-
86
case INDEX_op_div_i64:
87
case INDEX_op_div_i32:
88
tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
89
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
90
case INDEX_op_negsetcond_i64:
91
return C_O1_I2(r, r, rC);
92
93
- case INDEX_op_mul_i32:
94
- case INDEX_op_mul_i64:
95
case INDEX_op_div_i32:
96
case INDEX_op_div_i64:
97
case INDEX_op_divu_i32:
98
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tcg/arm/tcg-target.c.inc
101
+++ b/tcg/arm/tcg-target.c.inc
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
103
}
104
}
105
106
-static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
107
- TCGReg rn, TCGReg rm)
108
-{
109
- /* mul */
110
- tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
111
-}
112
-
113
static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
114
TCGReg rd1, TCGReg rn, TCGReg rm)
115
{
116
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
117
.base.static_constraint = C_NotImplemented,
118
};
119
120
+static void tgen_mul(TCGContext *s, TCGType type,
121
+ TCGReg a0, TCGReg a1, TCGReg a2)
122
+{
123
+ /* mul */
124
+ tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2);
125
+}
126
+
127
+static const TCGOutOpBinary outop_mul = {
128
+ .base.static_constraint = C_O1_I2(r, r, r),
129
+ .out_rrr = tgen_mul,
130
+};
131
+
132
static const TCGOutOpBinary outop_nand = {
133
.base.static_constraint = C_NotImplemented,
134
};
135
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
136
}
137
tcg_out_mov_reg(s, COND_AL, args[0], a0);
138
break;
139
- case INDEX_op_mul_i32:
140
- tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
141
- break;
142
case INDEX_op_mulu2_i32:
143
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
144
break;
145
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
146
case INDEX_op_ctz_i32:
147
return C_O1_I2(r, r, rIK);
148
149
- case INDEX_op_mul_i32:
150
case INDEX_op_div_i32:
151
case INDEX_op_divu_i32:
152
return C_O1_I2(r, r, r);
153
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/i386/tcg-target.c.inc
156
+++ b/tcg/i386/tcg-target.c.inc
157
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
158
.base.static_constraint = C_NotImplemented,
159
};
160
161
+static void tgen_mul(TCGContext *s, TCGType type,
162
+ TCGReg a0, TCGReg a1, TCGReg a2)
163
+{
164
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
165
+ tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
166
+}
167
+
168
+static void tgen_muli(TCGContext *s, TCGType type,
169
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
170
+{
171
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
172
+
173
+ if (a2 == (int8_t)a2) {
174
+ tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
175
+ tcg_out8(s, a2);
176
+ } else {
177
+ tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
178
+ tcg_out32(s, a2);
179
+ }
180
+}
181
+
182
+static const TCGOutOpBinary outop_mul = {
183
+ .base.static_constraint = C_O1_I2(r, 0, re),
184
+ .out_rrr = tgen_mul,
185
+ .out_rri = tgen_muli,
186
+};
187
+
188
static const TCGOutOpBinary outop_nand = {
189
.base.static_constraint = C_NotImplemented,
190
};
191
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
192
}
193
break;
194
195
- OP_32_64(mul):
196
- if (const_a2) {
197
- int32_t val;
198
- val = a2;
199
- if (val == (int8_t)val) {
200
- tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
201
- tcg_out8(s, val);
202
- } else {
203
- tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
204
- tcg_out32(s, val);
205
- }
87
- }
206
- } else {
88
- } else {
207
- tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
89
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
208
- }
90
gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
209
- break;
91
cpu_exclusive_addr,
92
cpu_reg(s, rt),
93
cpu_reg(s, rt2));
94
- } else {
95
- gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
96
- cpu_reg(s, rt), cpu_reg(s, rt2));
97
}
98
+ } else if (s->be_data == MO_LE) {
99
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
100
+ cpu_reg(s, rt), cpu_reg(s, rt2));
101
+ } else {
102
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
103
+ cpu_reg(s, rt), cpu_reg(s, rt2));
104
}
105
} else {
106
tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
107
@@ -XXX,XX +XXX,XX @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
108
}
109
tcg_temp_free_i64(cmp);
110
} else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
111
- TCGv_i32 tcg_rs = tcg_const_i32(rs);
210
-
112
-
211
OP_32_64(div2):
113
- if (s->be_data == MO_LE) {
212
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
114
- gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
213
break;
115
+ if (HAVE_CMPXCHG128) {
214
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
116
+ TCGv_i32 tcg_rs = tcg_const_i32(rs);
215
case INDEX_op_st_i64:
117
+ if (s->be_data == MO_LE) {
216
return C_O0_I2(re, r);
118
+ gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
217
119
+ } else {
218
- case INDEX_op_mul_i32:
120
+ gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
219
- case INDEX_op_mul_i64:
121
+ }
220
- return C_O1_I2(r, 0, re);
122
+ tcg_temp_free_i32(tcg_rs);
221
-
123
} else {
222
case INDEX_op_shl_i32:
124
- gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
223
case INDEX_op_shl_i64:
125
+ gen_helper_exit_atomic(cpu_env);
224
case INDEX_op_shr_i32:
126
+ s->base.is_jmp = DISAS_NORETURN;
225
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
226
index XXXXXXX..XXXXXXX 100644
227
--- a/tcg/loongarch64/tcg-target.c.inc
228
+++ b/tcg/loongarch64/tcg-target.c.inc
229
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
230
.base.static_constraint = C_NotImplemented,
231
};
232
233
+static void tgen_mul(TCGContext *s, TCGType type,
234
+ TCGReg a0, TCGReg a1, TCGReg a2)
235
+{
236
+ if (type == TCG_TYPE_I32) {
237
+ tcg_out_opc_mul_w(s, a0, a1, a2);
238
+ } else {
239
+ tcg_out_opc_mul_d(s, a0, a1, a2);
240
+ }
241
+}
242
+
243
+static const TCGOutOpBinary outop_mul = {
244
+ .base.static_constraint = C_O1_I2(r, r, r),
245
+ .out_rrr = tgen_mul,
246
+};
247
+
248
static const TCGOutOpBinary outop_nand = {
249
.base.static_constraint = C_NotImplemented,
250
};
251
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
252
}
127
}
253
break;
128
- tcg_temp_free_i32(tcg_rs);
254
129
} else {
255
- case INDEX_op_mul_i32:
130
TCGv_i64 d1 = tcg_temp_new_i64();
256
- tcg_out_opc_mul_w(s, a0, a1, a2);
131
TCGv_i64 d2 = tcg_temp_new_i64();
257
- break;
258
- case INDEX_op_mul_i64:
259
- tcg_out_opc_mul_d(s, a0, a1, a2);
260
- break;
261
-
262
case INDEX_op_mulsh_i32:
263
tcg_out_opc_mulh_w(s, a0, a1, a2);
264
break;
265
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
266
case INDEX_op_setcond_i64:
267
return C_O1_I2(r, rz, rJ);
268
269
- case INDEX_op_mul_i32:
270
- case INDEX_op_mul_i64:
271
case INDEX_op_mulsh_i32:
272
case INDEX_op_mulsh_i64:
273
case INDEX_op_muluh_i32:
274
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
275
index XXXXXXX..XXXXXXX 100644
276
--- a/tcg/mips/tcg-target.c.inc
277
+++ b/tcg/mips/tcg-target.c.inc
278
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
279
.base.static_constraint = C_NotImplemented,
280
};
281
282
+static void tgen_mul(TCGContext *s, TCGType type,
283
+ TCGReg a0, TCGReg a1, TCGReg a2)
284
+{
285
+ MIPSInsn insn;
286
+
287
+ if (type == TCG_TYPE_I32) {
288
+ if (use_mips32_instructions) {
289
+ tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
290
+ return;
291
+ }
292
+ insn = OPC_MULT;
293
+ } else {
294
+ if (use_mips32r6_instructions) {
295
+ tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
296
+ return;
297
+ }
298
+ insn = OPC_DMULT;
299
+ }
300
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
301
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
302
+}
303
+
304
+static const TCGOutOpBinary outop_mul = {
305
+ .base.static_constraint = C_O1_I2(r, r, r),
306
+ .out_rrr = tgen_mul,
307
+};
308
+
309
static const TCGOutOpBinary outop_nand = {
310
.base.static_constraint = C_NotImplemented,
311
};
312
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
313
tcg_out_ldst(s, i1, a0, a1, a2);
314
break;
315
316
- case INDEX_op_mul_i32:
317
- if (use_mips32_instructions) {
318
- tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
319
- break;
320
- }
321
- i1 = OPC_MULT, i2 = OPC_MFLO;
322
- goto do_hilo1;
323
case INDEX_op_mulsh_i32:
324
if (use_mips32r6_instructions) {
325
tcg_out_opc_reg(s, OPC_MUH, a0, a1, a2);
326
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
327
}
328
i1 = OPC_DIVU, i2 = OPC_MFHI;
329
goto do_hilo1;
330
- case INDEX_op_mul_i64:
331
- if (use_mips32r6_instructions) {
332
- tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
333
- break;
334
- }
335
- i1 = OPC_DMULT, i2 = OPC_MFLO;
336
- goto do_hilo1;
337
case INDEX_op_mulsh_i64:
338
if (use_mips32r6_instructions) {
339
tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
340
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
341
case INDEX_op_st_i64:
342
return C_O0_I2(rz, r);
343
344
- case INDEX_op_mul_i32:
345
case INDEX_op_mulsh_i32:
346
case INDEX_op_muluh_i32:
347
case INDEX_op_div_i32:
348
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
349
case INDEX_op_rem_i32:
350
case INDEX_op_remu_i32:
351
case INDEX_op_setcond_i32:
352
- case INDEX_op_mul_i64:
353
case INDEX_op_mulsh_i64:
354
case INDEX_op_muluh_i64:
355
case INDEX_op_div_i64:
356
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
357
index XXXXXXX..XXXXXXX 100644
358
--- a/tcg/ppc/tcg-target.c.inc
359
+++ b/tcg/ppc/tcg-target.c.inc
360
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
361
.out_rrr = tgen_eqv,
362
};
363
364
+static void tgen_mul(TCGContext *s, TCGType type,
365
+ TCGReg a0, TCGReg a1, TCGReg a2)
366
+{
367
+ uint32_t insn = type == TCG_TYPE_I32 ? MULLW : MULLD;
368
+ tcg_out32(s, insn | TAB(a0, a1, a2));
369
+}
370
+
371
+static void tgen_muli(TCGContext *s, TCGType type,
372
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
373
+{
374
+ tcg_out32(s, MULLI | TAI(a0, a1, a2));
375
+}
376
+
377
+static const TCGOutOpBinary outop_mul = {
378
+ .base.static_constraint = C_O1_I2(r, r, rI),
379
+ .out_rrr = tgen_mul,
380
+ .out_rri = tgen_muli,
381
+};
382
+
383
static void tgen_nand(TCGContext *s, TCGType type,
384
TCGReg a0, TCGReg a1, TCGReg a2)
385
{
386
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
387
const TCGArg args[TCG_MAX_OP_ARGS],
388
const int const_args[TCG_MAX_OP_ARGS])
389
{
390
- TCGArg a0, a1, a2;
391
+ TCGArg a0, a1;
392
393
switch (opc) {
394
case INDEX_op_goto_ptr:
395
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
396
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
397
break;
398
399
- case INDEX_op_mul_i32:
400
- a0 = args[0], a1 = args[1], a2 = args[2];
401
- if (const_args[2]) {
402
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
403
- } else {
404
- tcg_out32(s, MULLW | TAB(a0, a1, a2));
405
- }
406
- break;
407
-
408
case INDEX_op_div_i32:
409
tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
410
break;
411
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
412
}
413
break;
414
415
- case INDEX_op_mul_i64:
416
- a0 = args[0], a1 = args[1], a2 = args[2];
417
- if (const_args[2]) {
418
- tcg_out32(s, MULLI | TAI(a0, a1, a2));
419
- } else {
420
- tcg_out32(s, MULLD | TAB(a0, a1, a2));
421
- }
422
- break;
423
case INDEX_op_div_i64:
424
tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
425
break;
426
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
427
case INDEX_op_rotr_i64:
428
return C_O1_I2(r, r, ri);
429
430
- case INDEX_op_mul_i32:
431
- case INDEX_op_mul_i64:
432
- return C_O1_I2(r, r, rI);
433
-
434
case INDEX_op_div_i32:
435
case INDEX_op_divu_i32:
436
case INDEX_op_rem_i32:
437
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
438
index XXXXXXX..XXXXXXX 100644
439
--- a/tcg/riscv/tcg-target.c.inc
440
+++ b/tcg/riscv/tcg-target.c.inc
441
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
442
.out_rrr = tgen_eqv,
443
};
444
445
+static void tgen_mul(TCGContext *s, TCGType type,
446
+ TCGReg a0, TCGReg a1, TCGReg a2)
447
+{
448
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_MULW : OPC_MUL;
449
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
450
+}
451
+
452
+static const TCGOutOpBinary outop_mul = {
453
+ .base.static_constraint = C_O1_I2(r, r, r),
454
+ .out_rrr = tgen_mul,
455
+};
456
+
457
static const TCGOutOpBinary outop_nand = {
458
.base.static_constraint = C_NotImplemented,
459
};
460
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
461
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
462
break;
463
464
- case INDEX_op_mul_i32:
465
- tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
466
- break;
467
- case INDEX_op_mul_i64:
468
- tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
469
- break;
470
-
471
case INDEX_op_div_i32:
472
tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
473
break;
474
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
475
case INDEX_op_negsetcond_i64:
476
return C_O1_I2(r, r, rI);
477
478
- case INDEX_op_mul_i32:
479
case INDEX_op_mulsh_i32:
480
case INDEX_op_muluh_i32:
481
case INDEX_op_div_i32:
482
case INDEX_op_divu_i32:
483
case INDEX_op_rem_i32:
484
case INDEX_op_remu_i32:
485
- case INDEX_op_mul_i64:
486
case INDEX_op_mulsh_i64:
487
case INDEX_op_muluh_i64:
488
case INDEX_op_div_i64:
489
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
490
index XXXXXXX..XXXXXXX 100644
491
--- a/tcg/s390x/tcg-target.c.inc
492
+++ b/tcg/s390x/tcg-target.c.inc
493
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
494
.out_rrr = tgen_eqv,
495
};
496
497
+static void tgen_mul(TCGContext *s, TCGType type,
498
+ TCGReg a0, TCGReg a1, TCGReg a2)
499
+{
500
+ if (type == TCG_TYPE_I32) {
501
+ if (a0 == a1) {
502
+ tcg_out_insn(s, RRE, MSR, a0, a2);
503
+ } else {
504
+ tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
505
+ }
506
+ } else {
507
+ if (a0 == a1) {
508
+ tcg_out_insn(s, RRE, MSGR, a0, a2);
509
+ } else {
510
+ tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
511
+ }
512
+ }
513
+}
514
+
515
+static void tgen_muli(TCGContext *s, TCGType type,
516
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
517
+{
518
+ tcg_out_mov(s, type, a0, a1);
519
+ if (type == TCG_TYPE_I32) {
520
+ if (a2 == (int16_t)a2) {
521
+ tcg_out_insn(s, RI, MHI, a0, a2);
522
+ } else {
523
+ tcg_out_insn(s, RIL, MSFI, a0, a2);
524
+ }
525
+ } else {
526
+ if (a2 == (int16_t)a2) {
527
+ tcg_out_insn(s, RI, MGHI, a0, a2);
528
+ } else {
529
+ tcg_out_insn(s, RIL, MSGFI, a0, a2);
530
+ }
531
+ }
532
+}
533
+
534
+static TCGConstraintSetIndex cset_mul(TCGType type, unsigned flags)
535
+{
536
+ return (HAVE_FACILITY(MISC_INSN_EXT2)
537
+ ? C_O1_I2(r, r, rJ)
538
+ : C_O1_I2(r, 0, rJ));
539
+}
540
+
541
+static const TCGOutOpBinary outop_mul = {
542
+ .base.static_constraint = C_Dynamic,
543
+ .base.dynamic_constraint = cset_mul,
544
+ .out_rrr = tgen_mul,
545
+ .out_rri = tgen_muli,
546
+};
547
+
548
static void tgen_nand(TCGContext *s, TCGType type,
549
TCGReg a0, TCGReg a1, TCGReg a2)
550
{
551
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
552
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
553
break;
554
555
- case INDEX_op_mul_i32:
556
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
557
- if (const_args[2]) {
558
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
559
- if (a2 == (int16_t)a2) {
560
- tcg_out_insn(s, RI, MHI, a0, a2);
561
- } else {
562
- tcg_out_insn(s, RIL, MSFI, a0, a2);
563
- }
564
- } else if (a0 == a1) {
565
- tcg_out_insn(s, RRE, MSR, a0, a2);
566
- } else {
567
- tcg_out_insn(s, RRFa, MSRKC, a0, a1, a2);
568
- }
569
- break;
570
-
571
case INDEX_op_div2_i32:
572
tcg_debug_assert(args[0] == args[2]);
573
tcg_debug_assert(args[1] == args[3]);
574
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
575
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
576
break;
577
578
- case INDEX_op_mul_i64:
579
- a0 = args[0], a1 = args[1], a2 = args[2];
580
- if (const_args[2]) {
581
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
582
- if (a2 == (int16_t)a2) {
583
- tcg_out_insn(s, RI, MGHI, a0, a2);
584
- } else {
585
- tcg_out_insn(s, RIL, MSGFI, a0, a2);
586
- }
587
- } else if (a0 == a1) {
588
- tcg_out_insn(s, RRE, MSGR, a0, a2);
589
- } else {
590
- tcg_out_insn(s, RRFa, MSGRKC, a0, a1, a2);
591
- }
592
- break;
593
-
594
case INDEX_op_div2_i64:
595
/*
596
* ??? We get an unnecessary sign-extension of the dividend
597
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
598
case INDEX_op_clz_i64:
599
return C_O1_I2(r, r, rI);
600
601
- case INDEX_op_mul_i32:
602
- return (HAVE_FACILITY(MISC_INSN_EXT2)
603
- ? C_O1_I2(r, r, ri)
604
- : C_O1_I2(r, 0, ri));
605
- case INDEX_op_mul_i64:
606
- return (HAVE_FACILITY(MISC_INSN_EXT2)
607
- ? C_O1_I2(r, r, rJ)
608
- : C_O1_I2(r, 0, rJ));
609
-
610
case INDEX_op_shl_i32:
611
case INDEX_op_shr_i32:
612
case INDEX_op_sar_i32:
613
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
614
index XXXXXXX..XXXXXXX 100644
615
--- a/tcg/sparc64/tcg-target.c.inc
616
+++ b/tcg/sparc64/tcg-target.c.inc
617
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
618
.base.static_constraint = C_NotImplemented,
619
};
620
621
+static void tgen_mul(TCGContext *s, TCGType type,
622
+ TCGReg a0, TCGReg a1, TCGReg a2)
623
+{
624
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
625
+ tcg_out_arith(s, a0, a1, a2, insn);
626
+}
627
+
628
+static void tgen_muli(TCGContext *s, TCGType type,
629
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
630
+{
631
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
632
+ tcg_out_arithi(s, a0, a1, a2, insn);
633
+}
634
+
635
+static const TCGOutOpBinary outop_mul = {
636
+ .base.static_constraint = C_O1_I2(r, r, rJ),
637
+ .out_rrr = tgen_mul,
638
+ .out_rri = tgen_muli,
639
+};
640
+
641
static const TCGOutOpBinary outop_nand = {
642
.base.static_constraint = C_NotImplemented,
643
};
644
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
645
case INDEX_op_sar_i32:
646
c = SHIFT_SRA;
647
goto do_shift32;
648
- case INDEX_op_mul_i32:
649
- c = ARITH_UMUL;
650
- goto gen_arith;
651
652
case INDEX_op_div_i32:
653
tcg_out_div32(s, a0, a1, a2, c2, 0);
654
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
655
case INDEX_op_sar_i64:
656
c = SHIFT_SRAX;
657
goto do_shift64;
658
- case INDEX_op_mul_i64:
659
- c = ARITH_MULX;
660
- goto gen_arith;
661
case INDEX_op_div_i64:
662
c = ARITH_SDIVX;
663
goto gen_arith;
664
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
665
case INDEX_op_qemu_st_i64:
666
return C_O0_I2(rz, r);
667
668
- case INDEX_op_mul_i32:
669
- case INDEX_op_mul_i64:
670
case INDEX_op_div_i32:
671
case INDEX_op_div_i64:
672
case INDEX_op_divu_i32:
673
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
674
index XXXXXXX..XXXXXXX 100644
675
--- a/tcg/tci/tcg-target.c.inc
676
+++ b/tcg/tci/tcg-target.c.inc
677
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
678
case INDEX_op_rem_i64:
679
case INDEX_op_remu_i32:
680
case INDEX_op_remu_i64:
681
- case INDEX_op_mul_i32:
682
- case INDEX_op_mul_i64:
683
case INDEX_op_shl_i32:
684
case INDEX_op_shl_i64:
685
case INDEX_op_shr_i32:
686
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
687
.out_rrr = tgen_eqv,
688
};
689
690
+static void tgen_mul(TCGContext *s, TCGType type,
691
+ TCGReg a0, TCGReg a1, TCGReg a2)
692
+{
693
+ tcg_out_op_rrr(s, glue(INDEX_op_mul_i,TCG_TARGET_REG_BITS), a0, a1, a2);
694
+}
695
+
696
+static const TCGOutOpBinary outop_mul = {
697
+ .base.static_constraint = C_O1_I2(r, r, r),
698
+ .out_rrr = tgen_mul,
699
+};
700
+
701
static void tgen_nand(TCGContext *s, TCGType type,
702
TCGReg a0, TCGReg a1, TCGReg a2)
703
{
704
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
705
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
706
break;
707
708
- CASE_32_64(mul)
709
CASE_32_64(shl)
710
CASE_32_64(shr)
711
CASE_32_64(sar)
712
--
132
--
713
2.43.0
133
2.17.2
714
134
715
135
diff view generated by jsdifflib
1
Tested-by: Nicholas Piggin <npiggin@gmail.com>
1
Reviewed-by: Emilio G. Cota <cota@braap.org>
2
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
target/ppc/translate.c | 11 ++++-------
4
target/ppc/helper.h | 2 +-
8
1 file changed, 4 insertions(+), 7 deletions(-)
5
target/ppc/mem_helper.c | 33 ++++++++++--
6
target/ppc/translate.c | 115 +++++++++++++++++++++-------------------
7
3 files changed, 88 insertions(+), 62 deletions(-)
9
8
9
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/ppc/helper.h
12
+++ b/target/ppc/helper.h
13
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
14
DEF_HELPER_1(tbegin, void, env)
15
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
16
17
-#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
18
+#ifdef TARGET_PPC64
19
DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
20
DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
21
DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
22
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/ppc/mem_helper.c
25
+++ b/target/ppc/mem_helper.c
26
@@ -XXX,XX +XXX,XX @@
27
#include "exec/cpu_ldst.h"
28
#include "tcg.h"
29
#include "internal.h"
30
+#include "qemu/atomic128.h"
31
32
//#define DEBUG_OP
33
34
@@ -XXX,XX +XXX,XX @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
35
return i;
36
}
37
38
-#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
39
+#ifdef TARGET_PPC64
40
uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
41
uint32_t opidx)
42
{
43
- Int128 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
44
+ Int128 ret;
45
+
46
+ /* We will have raised EXCP_ATOMIC from the translator. */
47
+ assert(HAVE_ATOMIC128);
48
+ ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
49
env->retxh = int128_gethi(ret);
50
return int128_getlo(ret);
51
}
52
@@ -XXX,XX +XXX,XX @@ uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
53
uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
54
uint32_t opidx)
55
{
56
- Int128 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
57
+ Int128 ret;
58
+
59
+ /* We will have raised EXCP_ATOMIC from the translator. */
60
+ assert(HAVE_ATOMIC128);
61
+ ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
62
env->retxh = int128_gethi(ret);
63
return int128_getlo(ret);
64
}
65
@@ -XXX,XX +XXX,XX @@ uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
66
void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
67
uint64_t lo, uint64_t hi, uint32_t opidx)
68
{
69
- Int128 val = int128_make128(lo, hi);
70
+ Int128 val;
71
+
72
+ /* We will have raised EXCP_ATOMIC from the translator. */
73
+ assert(HAVE_ATOMIC128);
74
+ val = int128_make128(lo, hi);
75
helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
76
}
77
78
void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
79
uint64_t lo, uint64_t hi, uint32_t opidx)
80
{
81
- Int128 val = int128_make128(lo, hi);
82
+ Int128 val;
83
+
84
+ /* We will have raised EXCP_ATOMIC from the translator. */
85
+ assert(HAVE_ATOMIC128);
86
+ val = int128_make128(lo, hi);
87
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
88
}
89
90
@@ -XXX,XX +XXX,XX @@ uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
91
{
92
bool success = false;
93
94
+ /* We will have raised EXCP_ATOMIC from the translator. */
95
+ assert(HAVE_CMPXCHG128);
96
+
97
if (likely(addr == env->reserve_addr)) {
98
Int128 oldv, cmpv, newv;
99
100
@@ -XXX,XX +XXX,XX @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
101
{
102
bool success = false;
103
104
+ /* We will have raised EXCP_ATOMIC from the translator. */
105
+ assert(HAVE_CMPXCHG128);
106
+
107
if (likely(addr == env->reserve_addr)) {
108
Int128 oldv, cmpv, newv;
109
10
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
110
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
11
index XXXXXXX..XXXXXXX 100644
111
index XXXXXXX..XXXXXXX 100644
12
--- a/target/ppc/translate.c
112
--- a/target/ppc/translate.c
13
+++ b/target/ppc/translate.c
113
+++ b/target/ppc/translate.c
14
@@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
114
@@ -XXX,XX +XXX,XX @@
15
tcg_gen_mov_tl(ca32, ca);
115
#include "trace-tcg.h"
116
#include "exec/translator.h"
117
#include "exec/log.h"
118
+#include "qemu/atomic128.h"
119
120
121
#define CPU_SINGLE_STEP 0x1
122
@@ -XXX,XX +XXX,XX @@ static void gen_lq(DisasContext *ctx)
123
hi = cpu_gpr[rd];
124
125
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
126
-#ifdef CONFIG_ATOMIC128
127
- TCGv_i32 oi = tcg_temp_new_i32();
128
- if (ctx->le_mode) {
129
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
130
- gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
131
+ if (HAVE_ATOMIC128) {
132
+ TCGv_i32 oi = tcg_temp_new_i32();
133
+ if (ctx->le_mode) {
134
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
135
+ gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
136
+ } else {
137
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
138
+ gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
139
+ }
140
+ tcg_temp_free_i32(oi);
141
+ tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
142
} else {
143
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
144
- gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
145
+ /* Restart with exclusive lock. */
146
+ gen_helper_exit_atomic(cpu_env);
147
+ ctx->base.is_jmp = DISAS_NORETURN;
148
}
149
- tcg_temp_free_i32(oi);
150
- tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
151
-#else
152
- /* Restart with exclusive lock. */
153
- gen_helper_exit_atomic(cpu_env);
154
- ctx->base.is_jmp = DISAS_NORETURN;
155
-#endif
156
} else if (ctx->le_mode) {
157
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
158
gen_addr_add(ctx, EA, EA, 8);
159
@@ -XXX,XX +XXX,XX @@ static void gen_std(DisasContext *ctx)
160
hi = cpu_gpr[rs];
161
162
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
163
-#ifdef CONFIG_ATOMIC128
164
- TCGv_i32 oi = tcg_temp_new_i32();
165
- if (ctx->le_mode) {
166
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
167
- gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
168
+ if (HAVE_ATOMIC128) {
169
+ TCGv_i32 oi = tcg_temp_new_i32();
170
+ if (ctx->le_mode) {
171
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
172
+ gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
173
+ } else {
174
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
175
+ gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
176
+ }
177
+ tcg_temp_free_i32(oi);
178
} else {
179
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
180
- gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
181
+ /* Restart with exclusive lock. */
182
+ gen_helper_exit_atomic(cpu_env);
183
+ ctx->base.is_jmp = DISAS_NORETURN;
16
}
184
}
185
- tcg_temp_free_i32(oi);
186
-#else
187
- /* Restart with exclusive lock. */
188
- gen_helper_exit_atomic(cpu_env);
189
- ctx->base.is_jmp = DISAS_NORETURN;
190
-#endif
191
} else if (ctx->le_mode) {
192
tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
193
gen_addr_add(ctx, EA, EA, 8);
194
@@ -XXX,XX +XXX,XX @@ static void gen_lqarx(DisasContext *ctx)
195
hi = cpu_gpr[rd];
196
197
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
198
-#ifdef CONFIG_ATOMIC128
199
- TCGv_i32 oi = tcg_temp_new_i32();
200
- if (ctx->le_mode) {
201
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
202
- ctx->mem_idx));
203
- gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
204
+ if (HAVE_ATOMIC128) {
205
+ TCGv_i32 oi = tcg_temp_new_i32();
206
+ if (ctx->le_mode) {
207
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
208
+ ctx->mem_idx));
209
+ gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
210
+ } else {
211
+ tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
212
+ ctx->mem_idx));
213
+ gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
214
+ }
215
+ tcg_temp_free_i32(oi);
216
+ tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
17
} else {
217
} else {
18
- TCGv zero = tcg_constant_tl(0);
218
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
19
if (add_ca) {
219
- ctx->mem_idx));
20
- tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
220
- gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
21
- tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
221
+ /* Restart with exclusive lock. */
22
+ tcg_gen_addcio_tl(t0, ca, arg1, arg2, ca);
222
+ gen_helper_exit_atomic(cpu_env);
23
} else {
223
+ ctx->base.is_jmp = DISAS_NORETURN;
24
+ TCGv zero = tcg_constant_tl(0);
224
+ tcg_temp_free(EA);
25
tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
225
+ return;
26
}
226
}
27
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
227
- tcg_temp_free_i32(oi);
28
@@ -XXX,XX +XXX,XX @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
228
- tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
29
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
229
-#else
30
}
230
- /* Restart with exclusive lock. */
31
} else if (add_ca) {
231
- gen_helper_exit_atomic(cpu_env);
32
- TCGv zero, inv1 = tcg_temp_new();
232
- ctx->base.is_jmp = DISAS_NORETURN;
33
+ TCGv inv1 = tcg_temp_new();
233
- tcg_temp_free(EA);
34
tcg_gen_not_tl(inv1, arg1);
234
- return;
35
- zero = tcg_constant_tl(0);
235
-#endif
36
- tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
236
} else if (ctx->le_mode) {
37
- tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
237
tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
38
+ tcg_gen_addcio_tl(t0, cpu_ca, arg2, inv1, cpu_ca);
238
tcg_gen_mov_tl(cpu_reserve, EA);
39
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
239
@@ -XXX,XX +XXX,XX @@ static void gen_stqcx_(DisasContext *ctx)
240
hi = cpu_gpr[rs];
241
242
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
243
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
244
-#ifdef CONFIG_ATOMIC128
245
- if (ctx->le_mode) {
246
- gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
247
+ if (HAVE_CMPXCHG128) {
248
+ TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
249
+ if (ctx->le_mode) {
250
+ gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
251
+ EA, lo, hi, oi);
252
+ } else {
253
+ gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
254
+ EA, lo, hi, oi);
255
+ }
256
+ tcg_temp_free_i32(oi);
40
} else {
257
} else {
41
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
258
- gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi);
259
+ /* Restart with exclusive lock. */
260
+ gen_helper_exit_atomic(cpu_env);
261
+ ctx->base.is_jmp = DISAS_NORETURN;
262
}
263
-#else
264
- /* Restart with exclusive lock. */
265
- gen_helper_exit_atomic(cpu_env);
266
- ctx->base.is_jmp = DISAS_NORETURN;
267
-#endif
268
tcg_temp_free(EA);
269
- tcg_temp_free_i32(oi);
270
} else {
271
TCGLabel *lab_fail = gen_new_label();
272
TCGLabel *lab_over = gen_new_label();
42
--
273
--
43
2.43.0
274
2.17.2
44
275
45
276
diff view generated by jsdifflib
1
All targets now provide negsetcond, so remove the conditional.
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
tcg/aarch64/tcg-target-has.h | 2 --
4
target/s390x/mem_helper.c | 92 +++++++++++++++++----------------------
7
tcg/arm/tcg-target-has.h | 1 -
5
1 file changed, 41 insertions(+), 51 deletions(-)
8
tcg/i386/tcg-target-has.h | 2 --
9
tcg/loongarch64/tcg-target-has.h | 2 --
10
tcg/mips/tcg-target-has.h | 2 --
11
tcg/ppc/tcg-target-has.h | 2 --
12
tcg/riscv/tcg-target-has.h | 2 --
13
tcg/s390x/tcg-target-has.h | 2 --
14
tcg/sparc64/tcg-target-has.h | 2 --
15
tcg/tcg-has.h | 1 -
16
tcg/tci/tcg-target-has.h | 2 --
17
tcg/optimize.c | 24 +++++++++---------------
18
tcg/tcg-op.c | 12 +++---------
19
tcg/tcg.c | 6 ++----
20
14 files changed, 14 insertions(+), 48 deletions(-)
21
6
22
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
7
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
23
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/aarch64/tcg-target-has.h
9
--- a/target/s390x/mem_helper.c
25
+++ b/tcg/aarch64/tcg-target-has.h
10
+++ b/target/s390x/mem_helper.c
26
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@
27
#define TCG_TARGET_HAS_bswap16_i32 1
12
#include "exec/exec-all.h"
28
#define TCG_TARGET_HAS_bswap32_i32 1
13
#include "exec/cpu_ldst.h"
29
#define TCG_TARGET_HAS_extract2_i32 1
14
#include "qemu/int128.h"
30
-#define TCG_TARGET_HAS_negsetcond_i32 1
15
+#include "qemu/atomic128.h"
31
#define TCG_TARGET_HAS_add2_i32 1
16
32
#define TCG_TARGET_HAS_sub2_i32 1
17
#if !defined(CONFIG_USER_ONLY)
33
#define TCG_TARGET_HAS_extr_i64_i32 0
18
#include "hw/s390x/storage-keys.h"
34
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ static void do_cdsg(CPUS390XState *env, uint64_t addr,
35
#define TCG_TARGET_HAS_bswap32_i64 1
20
bool fail;
36
#define TCG_TARGET_HAS_bswap64_i64 1
21
37
#define TCG_TARGET_HAS_extract2_i64 1
22
if (parallel) {
38
-#define TCG_TARGET_HAS_negsetcond_i64 1
23
-#ifndef CONFIG_ATOMIC128
39
#define TCG_TARGET_HAS_add2_i64 1
24
+#if !HAVE_CMPXCHG128
40
#define TCG_TARGET_HAS_sub2_i64 1
25
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
41
26
#else
42
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
27
int mem_idx = cpu_mmu_index(env, false);
43
index XXXXXXX..XXXXXXX 100644
28
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
44
--- a/tcg/arm/tcg-target-has.h
29
static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
45
+++ b/tcg/arm/tcg-target-has.h
30
uint64_t a2, bool parallel)
46
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
31
{
47
#define TCG_TARGET_HAS_bswap16_i32 1
32
-#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
48
#define TCG_TARGET_HAS_bswap32_i32 1
33
uint32_t mem_idx = cpu_mmu_index(env, false);
49
#define TCG_TARGET_HAS_extract2_i32 1
34
-#endif
50
-#define TCG_TARGET_HAS_negsetcond_i32 1
35
uintptr_t ra = GETPC();
51
#define TCG_TARGET_HAS_qemu_st8_i32 0
36
uint32_t fc = extract32(env->regs[0], 0, 8);
52
37
uint32_t sc = extract32(env->regs[0], 8, 8);
53
#define TCG_TARGET_HAS_qemu_ldst_i128 0
38
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
54
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
39
probe_write(env, a2, 0, mem_idx, ra);
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/i386/tcg-target-has.h
57
+++ b/tcg/i386/tcg-target-has.h
58
@@ -XXX,XX +XXX,XX @@
59
#define TCG_TARGET_HAS_bswap16_i32 1
60
#define TCG_TARGET_HAS_bswap32_i32 1
61
#define TCG_TARGET_HAS_extract2_i32 1
62
-#define TCG_TARGET_HAS_negsetcond_i32 1
63
#define TCG_TARGET_HAS_add2_i32 1
64
#define TCG_TARGET_HAS_sub2_i32 1
65
66
@@ -XXX,XX +XXX,XX @@
67
#define TCG_TARGET_HAS_bswap32_i64 1
68
#define TCG_TARGET_HAS_bswap64_i64 1
69
#define TCG_TARGET_HAS_extract2_i64 1
70
-#define TCG_TARGET_HAS_negsetcond_i64 1
71
#define TCG_TARGET_HAS_add2_i64 1
72
#define TCG_TARGET_HAS_sub2_i64 1
73
#define TCG_TARGET_HAS_qemu_st8_i32 0
74
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/loongarch64/tcg-target-has.h
77
+++ b/tcg/loongarch64/tcg-target-has.h
78
@@ -XXX,XX +XXX,XX @@
79
#include "host/cpuinfo.h"
80
81
/* optional instructions */
82
-#define TCG_TARGET_HAS_negsetcond_i32 1
83
#define TCG_TARGET_HAS_extract2_i32 0
84
#define TCG_TARGET_HAS_add2_i32 0
85
#define TCG_TARGET_HAS_sub2_i32 0
86
@@ -XXX,XX +XXX,XX @@
87
#define TCG_TARGET_HAS_qemu_st8_i32 0
88
89
/* 64-bit operations */
90
-#define TCG_TARGET_HAS_negsetcond_i64 1
91
#define TCG_TARGET_HAS_extract2_i64 0
92
#define TCG_TARGET_HAS_extr_i64_i32 1
93
#define TCG_TARGET_HAS_bswap16_i64 1
94
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
95
index XXXXXXX..XXXXXXX 100644
96
--- a/tcg/mips/tcg-target-has.h
97
+++ b/tcg/mips/tcg-target-has.h
98
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
99
/* optional instructions */
100
#define TCG_TARGET_HAS_bswap16_i32 1
101
#define TCG_TARGET_HAS_bswap32_i32 1
102
-#define TCG_TARGET_HAS_negsetcond_i32 1
103
104
#if TCG_TARGET_REG_BITS == 64
105
#define TCG_TARGET_HAS_add2_i32 0
106
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
107
#define TCG_TARGET_HAS_sub2_i64 0
108
#define TCG_TARGET_HAS_ext32s_i64 1
109
#define TCG_TARGET_HAS_ext32u_i64 1
110
-#define TCG_TARGET_HAS_negsetcond_i64 1
111
#endif
40
#endif
112
41
113
/* optional instructions detected at runtime */
42
- /* Note that the compare-and-swap is atomic, and the store is atomic, but
114
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
43
- the complete operation is not. Therefore we do not need to assert serial
115
index XXXXXXX..XXXXXXX 100644
44
- context in order to implement this. That said, restart early if we can't
116
--- a/tcg/ppc/tcg-target-has.h
45
- support either operation that is supposed to be atomic. */
117
+++ b/tcg/ppc/tcg-target-has.h
46
+ /*
118
@@ -XXX,XX +XXX,XX @@
47
+ * Note that the compare-and-swap is atomic, and the store is atomic,
119
#define TCG_TARGET_HAS_bswap16_i32 1
48
+ * but the complete operation is not. Therefore we do not need to
120
#define TCG_TARGET_HAS_bswap32_i32 1
49
+ * assert serial context in order to implement this. That said,
121
#define TCG_TARGET_HAS_extract2_i32 0
50
+ * restart early if we can't support either operation that is supposed
122
-#define TCG_TARGET_HAS_negsetcond_i32 1
51
+ * to be atomic.
123
#define TCG_TARGET_HAS_qemu_st8_i32 0
52
+ */
124
53
if (parallel) {
125
#if TCG_TARGET_REG_BITS == 64
54
- int mask = 0;
126
@@ -XXX,XX +XXX,XX @@
55
-#if !defined(CONFIG_ATOMIC64)
127
#define TCG_TARGET_HAS_bswap32_i64 1
56
- mask = -8;
128
#define TCG_TARGET_HAS_bswap64_i64 1
57
-#elif !defined(CONFIG_ATOMIC128)
129
#define TCG_TARGET_HAS_extract2_i64 0
58
- mask = -16;
130
-#define TCG_TARGET_HAS_negsetcond_i64 1
59
+ uint32_t max = 2;
131
#define TCG_TARGET_HAS_add2_i64 1
60
+#ifdef CONFIG_ATOMIC64
132
#define TCG_TARGET_HAS_sub2_i64 1
61
+ max = 3;
133
#endif
62
#endif
134
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
63
- if (((4 << fc) | (1 << sc)) & mask) {
135
index XXXXXXX..XXXXXXX 100644
64
+ if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) ||
136
--- a/tcg/riscv/tcg-target-has.h
65
+ (HAVE_ATOMIC128 ? 0 : sc > max)) {
137
+++ b/tcg/riscv/tcg-target-has.h
66
cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
138
@@ -XXX,XX +XXX,XX @@
67
}
139
#include "host/cpuinfo.h"
68
}
140
69
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
141
/* optional instructions */
70
Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
142
-#define TCG_TARGET_HAS_negsetcond_i32 1
71
Int128 ov;
143
#define TCG_TARGET_HAS_extract2_i32 0
72
144
#define TCG_TARGET_HAS_add2_i32 1
73
- if (parallel) {
145
#define TCG_TARGET_HAS_sub2_i32 1
74
-#ifdef CONFIG_ATOMIC128
146
@@ -XXX,XX +XXX,XX @@
75
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
147
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
76
- ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
148
#define TCG_TARGET_HAS_qemu_st8_i32 0
77
- cc = !int128_eq(ov, cv);
149
78
-#else
150
-#define TCG_TARGET_HAS_negsetcond_i64 1
79
- /* Note that we asserted !parallel above. */
151
#define TCG_TARGET_HAS_extract2_i64 0
80
- g_assert_not_reached();
152
#define TCG_TARGET_HAS_extr_i64_i32 1
81
-#endif
153
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
82
- } else {
154
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
83
+ if (!parallel) {
155
index XXXXXXX..XXXXXXX 100644
84
uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
156
--- a/tcg/s390x/tcg-target-has.h
85
uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
157
+++ b/tcg/s390x/tcg-target-has.h
86
158
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
87
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
159
#define TCG_TARGET_HAS_bswap16_i32 1
88
160
#define TCG_TARGET_HAS_bswap32_i32 1
89
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
161
#define TCG_TARGET_HAS_extract2_i32 0
90
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
162
-#define TCG_TARGET_HAS_negsetcond_i32 1
91
+ } else if (HAVE_CMPXCHG128) {
163
#define TCG_TARGET_HAS_add2_i32 1
92
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
164
#define TCG_TARGET_HAS_sub2_i32 1
93
+ ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
165
#define TCG_TARGET_HAS_extr_i64_i32 0
94
+ cc = !int128_eq(ov, cv);
166
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
95
+ } else {
167
#define TCG_TARGET_HAS_bswap32_i64 1
96
+ /* Note that we asserted !parallel above. */
168
#define TCG_TARGET_HAS_bswap64_i64 1
97
+ g_assert_not_reached();
169
#define TCG_TARGET_HAS_extract2_i64 0
98
}
170
-#define TCG_TARGET_HAS_negsetcond_i64 1
99
171
#define TCG_TARGET_HAS_add2_i64 1
100
env->regs[r3 + 0] = int128_gethi(ov);
172
#define TCG_TARGET_HAS_sub2_i64 1
101
@@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
173
102
cpu_stq_data_ra(env, a2, svh, ra);
174
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/sparc64/tcg-target-has.h
177
+++ b/tcg/sparc64/tcg-target-has.h
178
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
179
#define TCG_TARGET_HAS_bswap16_i32 0
180
#define TCG_TARGET_HAS_bswap32_i32 0
181
#define TCG_TARGET_HAS_extract2_i32 0
182
-#define TCG_TARGET_HAS_negsetcond_i32 1
183
#define TCG_TARGET_HAS_add2_i32 1
184
#define TCG_TARGET_HAS_sub2_i32 1
185
#define TCG_TARGET_HAS_qemu_st8_i32 0
186
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
187
#define TCG_TARGET_HAS_bswap32_i64 0
188
#define TCG_TARGET_HAS_bswap64_i64 0
189
#define TCG_TARGET_HAS_extract2_i64 0
190
-#define TCG_TARGET_HAS_negsetcond_i64 1
191
#define TCG_TARGET_HAS_add2_i64 1
192
#define TCG_TARGET_HAS_sub2_i64 1
193
194
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/tcg-has.h
197
+++ b/tcg/tcg-has.h
198
@@ -XXX,XX +XXX,XX @@
199
#define TCG_TARGET_HAS_bswap32_i64 0
200
#define TCG_TARGET_HAS_bswap64_i64 0
201
#define TCG_TARGET_HAS_extract2_i64 0
202
-#define TCG_TARGET_HAS_negsetcond_i64 0
203
#define TCG_TARGET_HAS_add2_i64 0
204
#define TCG_TARGET_HAS_sub2_i64 0
205
/* Turn some undef macros into true macros. */
206
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tcg/tci/tcg-target-has.h
209
+++ b/tcg/tci/tcg-target-has.h
210
@@ -XXX,XX +XXX,XX @@
211
#define TCG_TARGET_HAS_bswap16_i32 1
212
#define TCG_TARGET_HAS_bswap32_i32 1
213
#define TCG_TARGET_HAS_extract2_i32 0
214
-#define TCG_TARGET_HAS_negsetcond_i32 1
215
#define TCG_TARGET_HAS_qemu_st8_i32 0
216
217
#if TCG_TARGET_REG_BITS == 64
218
@@ -XXX,XX +XXX,XX @@
219
#define TCG_TARGET_HAS_bswap32_i64 1
220
#define TCG_TARGET_HAS_bswap64_i64 1
221
#define TCG_TARGET_HAS_extract2_i64 0
222
-#define TCG_TARGET_HAS_negsetcond_i64 1
223
#define TCG_TARGET_HAS_add2_i32 1
224
#define TCG_TARGET_HAS_sub2_i32 1
225
#define TCG_TARGET_HAS_add2_i64 1
226
diff --git a/tcg/optimize.c b/tcg/optimize.c
227
index XXXXXXX..XXXXXXX 100644
228
--- a/tcg/optimize.c
229
+++ b/tcg/optimize.c
230
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
231
if (ti_is_const(tt) && ti_is_const(ft)) {
232
uint64_t tv = ti_const_val(tt);
233
uint64_t fv = ti_const_val(ft);
234
- TCGOpcode opc, negopc = 0;
235
+ TCGOpcode opc, negopc;
236
TCGCond cond = op->args[5];
237
238
switch (ctx->type) {
239
case TCG_TYPE_I32:
240
opc = INDEX_op_setcond_i32;
241
- if (TCG_TARGET_HAS_negsetcond_i32) {
242
- negopc = INDEX_op_negsetcond_i32;
243
- }
244
+ negopc = INDEX_op_negsetcond_i32;
245
tv = (int32_t)tv;
246
fv = (int32_t)fv;
247
break;
103
break;
248
case TCG_TYPE_I64:
104
case 4:
249
opc = INDEX_op_setcond_i64;
105
- if (parallel) {
250
- if (TCG_TARGET_HAS_negsetcond_i64) {
106
-#ifdef CONFIG_ATOMIC128
251
- negopc = INDEX_op_negsetcond_i64;
107
+ if (!parallel) {
252
- }
108
+ cpu_stq_data_ra(env, a2 + 0, svh, ra);
253
+ negopc = INDEX_op_negsetcond_i64;
109
+ cpu_stq_data_ra(env, a2 + 8, svl, ra);
110
+ } else if (HAVE_ATOMIC128) {
111
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
112
Int128 sv = int128_make128(svl, svh);
113
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
114
-#else
115
+ } else {
116
/* Note that we asserted !parallel above. */
117
g_assert_not_reached();
118
-#endif
119
- } else {
120
- cpu_stq_data_ra(env, a2 + 0, svh, ra);
121
- cpu_stq_data_ra(env, a2 + 8, svl, ra);
122
}
254
break;
123
break;
255
default:
124
default:
256
g_assert_not_reached();
125
@@ -XXX,XX +XXX,XX @@ static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
126
uintptr_t ra = GETPC();
258
} else if (fv == 1 && tv == 0) {
127
uint64_t hi, lo;
259
op->opc = opc;
128
260
op->args[3] = tcg_invert_cond(cond);
129
- if (parallel) {
261
- } else if (negopc) {
130
-#ifndef CONFIG_ATOMIC128
262
- if (tv == -1 && fv == 0) {
131
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
263
- op->opc = negopc;
132
-#else
264
- op->args[3] = cond;
133
+ if (!parallel) {
265
- } else if (fv == -1 && tv == 0) {
134
+ check_alignment(env, addr, 16, ra);
266
- op->opc = negopc;
135
+ hi = cpu_ldq_data_ra(env, addr + 0, ra);
267
- op->args[3] = tcg_invert_cond(cond);
136
+ lo = cpu_ldq_data_ra(env, addr + 8, ra);
268
- }
137
+ } else if (HAVE_ATOMIC128) {
269
+ } else if (tv == -1 && fv == 0) {
138
int mem_idx = cpu_mmu_index(env, false);
270
+ op->opc = negopc;
139
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
271
+ op->args[3] = cond;
140
Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
272
+ } else if (fv == -1 && tv == 0) {
141
hi = int128_gethi(v);
273
+ op->opc = negopc;
142
lo = int128_getlo(v);
274
+ op->args[3] = tcg_invert_cond(cond);
143
-#endif
275
}
144
} else {
145
- check_alignment(env, addr, 16, ra);
146
-
147
- hi = cpu_ldq_data_ra(env, addr + 0, ra);
148
- lo = cpu_ldq_data_ra(env, addr + 8, ra);
149
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
276
}
150
}
277
151
278
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
152
env->retxl = lo;
279
index XXXXXXX..XXXXXXX 100644
153
@@ -XXX,XX +XXX,XX @@ static void do_stpq(CPUS390XState *env, uint64_t addr,
280
--- a/tcg/tcg-op.c
154
{
281
+++ b/tcg/tcg-op.c
155
uintptr_t ra = GETPC();
282
@@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
156
283
tcg_gen_movi_i32(ret, -1);
157
- if (parallel) {
284
} else if (cond == TCG_COND_NEVER) {
158
-#ifndef CONFIG_ATOMIC128
285
tcg_gen_movi_i32(ret, 0);
159
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
286
- } else if (TCG_TARGET_HAS_negsetcond_i32) {
160
-#else
287
- tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond);
161
- int mem_idx = cpu_mmu_index(env, false);
288
} else {
162
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
289
- tcg_gen_setcond_i32(cond, ret, arg1, arg2);
163
-
290
- tcg_gen_neg_i32(ret, ret);
164
- Int128 v = int128_make128(low, high);
291
+ tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond);
165
- helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
166
-#endif
167
- } else {
168
+ if (!parallel) {
169
check_alignment(env, addr, 16, ra);
170
-
171
cpu_stq_data_ra(env, addr + 0, high, ra);
172
cpu_stq_data_ra(env, addr + 8, low, ra);
173
+ } else if (HAVE_ATOMIC128) {
174
+ int mem_idx = cpu_mmu_index(env, false);
175
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
176
+ Int128 v = int128_make128(low, high);
177
+ helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
178
+ } else {
179
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
292
}
180
}
293
}
181
}
294
182
295
@@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
296
tcg_gen_movi_i64(ret, -1);
297
} else if (cond == TCG_COND_NEVER) {
298
tcg_gen_movi_i64(ret, 0);
299
- } else if (TCG_TARGET_HAS_negsetcond_i64) {
300
+ } else if (TCG_TARGET_REG_BITS == 64) {
301
tcg_gen_op4i_i64(INDEX_op_negsetcond_i64, ret, arg1, arg2, cond);
302
- } else if (TCG_TARGET_REG_BITS == 32) {
303
+ } else {
304
tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
305
TCGV_LOW(arg1), TCGV_HIGH(arg1),
306
TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
307
tcg_gen_neg_i32(TCGV_LOW(ret), TCGV_LOW(ret));
308
tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_LOW(ret));
309
- } else {
310
- tcg_gen_setcond_i64(cond, ret, arg1, arg2);
311
- tcg_gen_neg_i64(ret, ret);
312
}
313
}
314
315
diff --git a/tcg/tcg.c b/tcg/tcg.c
316
index XXXXXXX..XXXXXXX 100644
317
--- a/tcg/tcg.c
318
+++ b/tcg/tcg.c
319
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
320
return has_type;
321
322
case INDEX_op_setcond_i32:
323
+ case INDEX_op_negsetcond_i32:
324
case INDEX_op_brcond_i32:
325
case INDEX_op_movcond_i32:
326
case INDEX_op_ld8u_i32:
327
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
328
case INDEX_op_deposit_i32:
329
return true;
330
331
- case INDEX_op_negsetcond_i32:
332
- return TCG_TARGET_HAS_negsetcond_i32;
333
case INDEX_op_extract2_i32:
334
return TCG_TARGET_HAS_extract2_i32;
335
case INDEX_op_add2_i32:
336
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
337
return TCG_TARGET_REG_BITS == 32;
338
339
case INDEX_op_setcond_i64:
340
+ case INDEX_op_negsetcond_i64:
341
case INDEX_op_brcond_i64:
342
case INDEX_op_movcond_i64:
343
case INDEX_op_ld8u_i64:
344
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
345
case INDEX_op_deposit_i64:
346
return TCG_TARGET_REG_BITS == 64;
347
348
- case INDEX_op_negsetcond_i64:
349
- return TCG_TARGET_HAS_negsetcond_i64;
350
case INDEX_op_extract2_i64:
351
return TCG_TARGET_HAS_extract2_i64;
352
case INDEX_op_extrl_i64_i32:
353
--
183
--
354
2.43.0
184
2.17.2
355
185
356
186
diff view generated by jsdifflib
1
All uses have been replaced by add/sub carry opcodes.
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
include/tcg/tcg-opc.h | 5 --
4
target/s390x/mem_helper.c | 128 ++++++++++++++++++--------------------
8
tcg/aarch64/tcg-target-has.h | 5 --
5
1 file changed, 61 insertions(+), 67 deletions(-)
9
tcg/arm/tcg-target-has.h | 4 --
10
tcg/i386/tcg-target-has.h | 5 --
11
tcg/loongarch64/tcg-target-has.h | 4 --
12
tcg/mips/tcg-target-has.h | 5 --
13
tcg/ppc/tcg-target-has.h | 4 --
14
tcg/riscv/tcg-target-has.h | 5 --
15
tcg/s390x/tcg-target-has.h | 7 ---
16
tcg/sparc64/tcg-target-has.h | 7 ---
17
tcg/tcg-has.h | 2 -
18
tcg/tci/tcg-target-has.h | 4 --
19
tcg/optimize.c | 87 --------------------------------
20
tcg/tcg-op.c | 26 ----------
21
tcg/tcg.c | 36 -------------
22
docs/devel/tcg-ops.rst | 14 ++---
23
16 files changed, 3 insertions(+), 217 deletions(-)
24
6
25
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
7
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
26
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
27
--- a/include/tcg/tcg-opc.h
9
--- a/target/s390x/mem_helper.c
28
+++ b/include/tcg/tcg-opc.h
10
+++ b/target/s390x/mem_helper.c
29
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
11
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
30
DEF(st16_i32, 0, 2, 1, 0)
12
return cc;
31
DEF(st_i32, 0, 2, 1, 0)
13
}
32
14
33
-DEF(add2_i32, 2, 4, 0, 0)
15
-static void do_cdsg(CPUS390XState *env, uint64_t addr,
34
-DEF(sub2_i32, 2, 4, 0, 0)
16
- uint32_t r1, uint32_t r3, bool parallel)
35
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
17
+void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
36
DEF(setcond2_i32, 1, 4, 1, 0)
18
+ uint32_t r1, uint32_t r3)
37
19
{
38
@@ -XXX,XX +XXX,XX @@ DEF(extu_i32_i64, 1, 1, 0, 0)
20
uintptr_t ra = GETPC();
39
DEF(extrl_i64_i32, 1, 1, 0, 0)
21
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
40
DEF(extrh_i64_i32, 1, 1, 0, 0)
22
Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
41
23
Int128 oldv;
42
-DEF(add2_i64, 2, 4, 0, 0)
24
+ uint64_t oldh, oldl;
43
-DEF(sub2_i64, 2, 4, 0, 0)
25
bool fail;
26
27
- if (parallel) {
28
-#if !HAVE_CMPXCHG128
29
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
30
-#else
31
- int mem_idx = cpu_mmu_index(env, false);
32
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
33
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
34
- fail = !int128_eq(oldv, cmpv);
35
-#endif
36
- } else {
37
- uint64_t oldh, oldl;
38
+ check_alignment(env, addr, 16, ra);
39
40
- check_alignment(env, addr, 16, ra);
41
+ oldh = cpu_ldq_data_ra(env, addr + 0, ra);
42
+ oldl = cpu_ldq_data_ra(env, addr + 8, ra);
43
44
- oldh = cpu_ldq_data_ra(env, addr + 0, ra);
45
- oldl = cpu_ldq_data_ra(env, addr + 8, ra);
44
-
46
-
45
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
47
- oldv = int128_make128(oldl, oldh);
46
48
- fail = !int128_eq(oldv, cmpv);
47
/* There are tcg_ctx->insn_start_words here, not just one. */
49
- if (fail) {
48
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
50
- newv = oldv;
49
index XXXXXXX..XXXXXXX 100644
50
--- a/tcg/aarch64/tcg-target-has.h
51
+++ b/tcg/aarch64/tcg-target-has.h
52
@@ -XXX,XX +XXX,XX @@
53
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
54
55
/* optional instructions */
56
-#define TCG_TARGET_HAS_add2_i32 0
57
-#define TCG_TARGET_HAS_sub2_i32 0
58
#define TCG_TARGET_HAS_extr_i64_i32 0
59
#define TCG_TARGET_HAS_qemu_st8_i32 0
60
61
-#define TCG_TARGET_HAS_add2_i64 0
62
-#define TCG_TARGET_HAS_sub2_i64 0
63
-
64
/*
65
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
66
* which requires writable pages. We must defer to the helper for user-only,
67
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/arm/tcg-target-has.h
70
+++ b/tcg/arm/tcg-target-has.h
71
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
72
#endif
73
74
/* optional instructions */
75
-#define TCG_TARGET_HAS_add2_i32 0
76
-#define TCG_TARGET_HAS_sub2_i32 0
77
#define TCG_TARGET_HAS_qemu_st8_i32 0
78
-
79
#define TCG_TARGET_HAS_qemu_ldst_i128 0
80
-
81
#define TCG_TARGET_HAS_tst 1
82
83
#define TCG_TARGET_HAS_v64 use_neon_instructions
84
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/i386/tcg-target-has.h
87
+++ b/tcg/i386/tcg-target-has.h
88
@@ -XXX,XX +XXX,XX @@
89
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
90
91
/* optional instructions */
92
-#define TCG_TARGET_HAS_add2_i32 0
93
-#define TCG_TARGET_HAS_sub2_i32 0
94
-
95
#if TCG_TARGET_REG_BITS == 64
96
/* Keep 32-bit values zero-extended in a register. */
97
#define TCG_TARGET_HAS_extr_i64_i32 1
98
-#define TCG_TARGET_HAS_add2_i64 0
99
-#define TCG_TARGET_HAS_sub2_i64 0
100
#define TCG_TARGET_HAS_qemu_st8_i32 0
101
#else
102
#define TCG_TARGET_HAS_qemu_st8_i32 1
103
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/loongarch64/tcg-target-has.h
106
+++ b/tcg/loongarch64/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@
108
#include "host/cpuinfo.h"
109
110
/* optional instructions */
111
-#define TCG_TARGET_HAS_add2_i32 0
112
-#define TCG_TARGET_HAS_sub2_i32 0
113
#define TCG_TARGET_HAS_qemu_st8_i32 0
114
115
/* 64-bit operations */
116
#define TCG_TARGET_HAS_extr_i64_i32 1
117
-#define TCG_TARGET_HAS_add2_i64 0
118
-#define TCG_TARGET_HAS_sub2_i64 0
119
120
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
121
122
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/mips/tcg-target-has.h
125
+++ b/tcg/mips/tcg-target-has.h
126
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
127
#endif
128
129
/* optional instructions */
130
-#define TCG_TARGET_HAS_add2_i32 0
131
-#define TCG_TARGET_HAS_sub2_i32 0
132
-
133
#if TCG_TARGET_REG_BITS == 64
134
#define TCG_TARGET_HAS_extr_i64_i32 1
135
-#define TCG_TARGET_HAS_add2_i64 0
136
-#define TCG_TARGET_HAS_sub2_i64 0
137
#define TCG_TARGET_HAS_ext32s_i64 1
138
#define TCG_TARGET_HAS_ext32u_i64 1
139
#endif
140
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/ppc/tcg-target-has.h
143
+++ b/tcg/ppc/tcg-target-has.h
144
@@ -XXX,XX +XXX,XX @@
145
146
/* optional instructions */
147
#define TCG_TARGET_HAS_qemu_st8_i32 0
148
-#define TCG_TARGET_HAS_add2_i32 0
149
-#define TCG_TARGET_HAS_sub2_i32 0
150
151
#if TCG_TARGET_REG_BITS == 64
152
#define TCG_TARGET_HAS_extr_i64_i32 0
153
-#define TCG_TARGET_HAS_add2_i64 0
154
-#define TCG_TARGET_HAS_sub2_i64 0
155
#endif
156
157
#define TCG_TARGET_HAS_qemu_ldst_i128 \
158
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/riscv/tcg-target-has.h
161
+++ b/tcg/riscv/tcg-target-has.h
162
@@ -XXX,XX +XXX,XX @@
163
164
/* optional instructions */
165
#define TCG_TARGET_HAS_qemu_st8_i32 0
166
-
167
#define TCG_TARGET_HAS_extr_i64_i32 1
168
-#define TCG_TARGET_HAS_add2_i64 0
169
-#define TCG_TARGET_HAS_sub2_i64 0
170
-
171
#define TCG_TARGET_HAS_qemu_ldst_i128 0
172
-
173
#define TCG_TARGET_HAS_tst 0
174
175
/* vector instructions */
176
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/s390x/tcg-target-has.h
179
+++ b/tcg/s390x/tcg-target-has.h
180
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
181
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
182
183
/* optional instructions */
184
-#define TCG_TARGET_HAS_add2_i32 0
185
-#define TCG_TARGET_HAS_sub2_i32 0
186
#define TCG_TARGET_HAS_extr_i64_i32 0
187
#define TCG_TARGET_HAS_qemu_st8_i32 0
188
-
189
-#define TCG_TARGET_HAS_add2_i64 0
190
-#define TCG_TARGET_HAS_sub2_i64 0
191
-
192
#define TCG_TARGET_HAS_qemu_ldst_i128 1
193
-
194
#define TCG_TARGET_HAS_tst 1
195
196
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
197
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
198
index XXXXXXX..XXXXXXX 100644
199
--- a/tcg/sparc64/tcg-target-has.h
200
+++ b/tcg/sparc64/tcg-target-has.h
201
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
202
#endif
203
204
/* optional instructions */
205
-#define TCG_TARGET_HAS_add2_i32 0
206
-#define TCG_TARGET_HAS_sub2_i32 0
207
#define TCG_TARGET_HAS_qemu_st8_i32 0
208
-
209
#define TCG_TARGET_HAS_extr_i64_i32 0
210
-#define TCG_TARGET_HAS_add2_i64 0
211
-#define TCG_TARGET_HAS_sub2_i64 0
212
-
213
#define TCG_TARGET_HAS_qemu_ldst_i128 0
214
-
215
#define TCG_TARGET_HAS_tst 1
216
217
#define TCG_TARGET_extract_valid(type, ofs, len) \
218
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/tcg-has.h
221
+++ b/tcg/tcg-has.h
222
@@ -XXX,XX +XXX,XX @@
223
#if TCG_TARGET_REG_BITS == 32
224
/* Turn some undef macros into false macros. */
225
#define TCG_TARGET_HAS_extr_i64_i32 0
226
-#define TCG_TARGET_HAS_add2_i64 0
227
-#define TCG_TARGET_HAS_sub2_i64 0
228
#endif
229
230
#if !defined(TCG_TARGET_HAS_v64) \
231
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
232
index XXXXXXX..XXXXXXX 100644
233
--- a/tcg/tci/tcg-target-has.h
234
+++ b/tcg/tci/tcg-target-has.h
235
@@ -XXX,XX +XXX,XX @@
236
#define TCG_TARGET_HAS_H
237
238
#define TCG_TARGET_HAS_qemu_st8_i32 0
239
-#define TCG_TARGET_HAS_add2_i32 0
240
-#define TCG_TARGET_HAS_sub2_i32 0
241
242
#if TCG_TARGET_REG_BITS == 64
243
#define TCG_TARGET_HAS_extr_i64_i32 0
244
-#define TCG_TARGET_HAS_add2_i64 0
245
-#define TCG_TARGET_HAS_sub2_i64 0
246
#endif /* TCG_TARGET_REG_BITS == 64 */
247
248
#define TCG_TARGET_HAS_qemu_ldst_i128 0
249
diff --git a/tcg/optimize.c b/tcg/optimize.c
250
index XXXXXXX..XXXXXXX 100644
251
--- a/tcg/optimize.c
252
+++ b/tcg/optimize.c
253
@@ -XXX,XX +XXX,XX @@ static bool fold_addco(OptContext *ctx, TCGOp *op)
254
return finish_folding(ctx, op);
255
}
256
257
-static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
258
-{
259
- bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
260
- bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
261
-
262
- if (a_const && b_const) {
263
- uint64_t al = arg_info(op->args[2])->val;
264
- uint64_t ah = arg_info(op->args[3])->val;
265
- uint64_t bl = arg_info(op->args[4])->val;
266
- uint64_t bh = arg_info(op->args[5])->val;
267
- TCGArg rl, rh;
268
- TCGOp *op2;
269
-
270
- if (ctx->type == TCG_TYPE_I32) {
271
- uint64_t a = deposit64(al, 32, 32, ah);
272
- uint64_t b = deposit64(bl, 32, 32, bh);
273
-
274
- if (add) {
275
- a += b;
276
- } else {
277
- a -= b;
278
- }
279
-
280
- al = sextract64(a, 0, 32);
281
- ah = sextract64(a, 32, 32);
282
- } else {
283
- Int128 a = int128_make128(al, ah);
284
- Int128 b = int128_make128(bl, bh);
285
-
286
- if (add) {
287
- a = int128_add(a, b);
288
- } else {
289
- a = int128_sub(a, b);
290
- }
291
-
292
- al = int128_getlo(a);
293
- ah = int128_gethi(a);
294
- }
51
- }
295
-
52
-
296
- rl = op->args[0];
53
- cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
297
- rh = op->args[1];
54
- cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
298
-
55
+ oldv = int128_make128(oldl, oldh);
299
- /* The proper opcode is supplied by tcg_opt_gen_mov. */
56
+ fail = !int128_eq(oldv, cmpv);
300
- op2 = opt_insert_before(ctx, op, 0, 2);
57
+ if (fail) {
301
-
58
+ newv = oldv;
302
- tcg_opt_gen_movi(ctx, op, rl, al);
59
}
303
- tcg_opt_gen_movi(ctx, op2, rh, ah);
60
304
- return true;
61
+ cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
305
- }
62
+ cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
306
-
63
+
307
- /* Fold sub2 r,x,i to add2 r,x,-i */
64
env->cc_op = fail;
308
- if (!add && b_const) {
65
env->regs[r1] = int128_gethi(oldv);
309
- uint64_t bl = arg_info(op->args[4])->val;
66
env->regs[r1 + 1] = int128_getlo(oldv);
310
- uint64_t bh = arg_info(op->args[5])->val;
67
}
311
-
68
312
- /* Negate the two parts without assembling and disassembling. */
69
-void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
313
- bl = -bl;
70
- uint32_t r1, uint32_t r3)
314
- bh = ~bh + !bl;
71
-{
315
-
72
- do_cdsg(env, addr, r1, r3, false);
316
- op->opc = (ctx->type == TCG_TYPE_I32
317
- ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
318
- op->args[4] = arg_new_constant(ctx, bl);
319
- op->args[5] = arg_new_constant(ctx, bh);
320
- }
321
- return finish_folding(ctx, op);
322
-}
73
-}
323
-
74
-
324
-static bool fold_add2(OptContext *ctx, TCGOp *op)
75
void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
76
uint32_t r1, uint32_t r3)
77
{
78
- do_cdsg(env, addr, r1, r3, true);
79
+ uintptr_t ra = GETPC();
80
+ Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
81
+ Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
82
+ int mem_idx;
83
+ TCGMemOpIdx oi;
84
+ Int128 oldv;
85
+ bool fail;
86
+
87
+ if (!HAVE_CMPXCHG128) {
88
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
89
+ }
90
+
91
+ mem_idx = cpu_mmu_index(env, false);
92
+ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
93
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
94
+ fail = !int128_eq(oldv, cmpv);
95
+
96
+ env->cc_op = fail;
97
+ env->regs[r1] = int128_gethi(oldv);
98
+ env->regs[r1 + 1] = int128_getlo(oldv);
99
}
100
101
static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
102
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
103
#endif
104
105
/* load pair from quadword */
106
-static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
107
+uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
108
{
109
uintptr_t ra = GETPC();
110
uint64_t hi, lo;
111
112
- if (!parallel) {
113
- check_alignment(env, addr, 16, ra);
114
- hi = cpu_ldq_data_ra(env, addr + 0, ra);
115
- lo = cpu_ldq_data_ra(env, addr + 8, ra);
116
- } else if (HAVE_ATOMIC128) {
117
+ check_alignment(env, addr, 16, ra);
118
+ hi = cpu_ldq_data_ra(env, addr + 0, ra);
119
+ lo = cpu_ldq_data_ra(env, addr + 8, ra);
120
+
121
+ env->retxl = lo;
122
+ return hi;
123
+}
124
+
125
+uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
126
+{
127
+ uintptr_t ra = GETPC();
128
+ uint64_t hi, lo;
129
+
130
+ if (HAVE_ATOMIC128) {
131
int mem_idx = cpu_mmu_index(env, false);
132
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
133
Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
134
@@ -XXX,XX +XXX,XX @@ static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
135
return hi;
136
}
137
138
-uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
325
-{
139
-{
326
- /* Note that the high and low parts may be independently swapped. */
140
- return do_lpq(env, addr, false);
327
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
328
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
329
-
330
- return fold_addsub2(ctx, op, true);
331
-}
141
-}
332
-
142
-
333
static bool fold_and(OptContext *ctx, TCGOp *op)
143
-uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
334
{
335
uint64_t z1, z2, z_mask, s_mask;
336
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
337
return finish_folding(ctx, op);
338
}
339
340
-static bool fold_sub2(OptContext *ctx, TCGOp *op)
341
-{
144
-{
342
- return fold_addsub2(ctx, op, false);
145
- return do_lpq(env, addr, true);
343
-}
146
-}
344
-
147
-
345
static void squash_prev_borrowout(OptContext *ctx, TCGOp *op)
148
/* store pair to quadword */
149
-static void do_stpq(CPUS390XState *env, uint64_t addr,
150
- uint64_t low, uint64_t high, bool parallel)
151
+void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
152
+ uint64_t low, uint64_t high)
346
{
153
{
347
TempOptInfo *t2;
154
uintptr_t ra = GETPC();
348
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
155
349
case INDEX_op_addco:
156
- if (!parallel) {
350
done = fold_addco(&ctx, op);
157
- check_alignment(env, addr, 16, ra);
351
break;
158
- cpu_stq_data_ra(env, addr + 0, high, ra);
352
- CASE_OP_32_64(add2):
159
- cpu_stq_data_ra(env, addr + 8, low, ra);
353
- done = fold_add2(&ctx, op);
160
- } else if (HAVE_ATOMIC128) {
354
- break;
161
+ check_alignment(env, addr, 16, ra);
355
case INDEX_op_and:
162
+ cpu_stq_data_ra(env, addr + 0, high, ra);
356
case INDEX_op_and_vec:
163
+ cpu_stq_data_ra(env, addr + 8, low, ra);
357
done = fold_and(&ctx, op);
164
+}
358
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
+
359
case INDEX_op_sub_vec:
166
+void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
360
done = fold_sub_vec(&ctx, op);
167
+ uint64_t low, uint64_t high)
361
break;
168
+{
362
- CASE_OP_32_64(sub2):
169
+ uintptr_t ra = GETPC();
363
- done = fold_sub2(&ctx, op);
170
+
364
- break;
171
+ if (HAVE_ATOMIC128) {
365
case INDEX_op_xor:
172
int mem_idx = cpu_mmu_index(env, false);
366
case INDEX_op_xor_vec:
173
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
367
done = fold_xor(&ctx, op);
174
Int128 v = int128_make128(low, high);
368
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
175
@@ -XXX,XX +XXX,XX @@ static void do_stpq(CPUS390XState *env, uint64_t addr,
369
index XXXXXXX..XXXXXXX 100644
176
}
370
--- a/tcg/tcg-op.c
371
+++ b/tcg/tcg-op.c
372
@@ -XXX,XX +XXX,XX @@ static void DNI tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
373
tcgv_i64_arg(a3), a4, a5);
374
}
177
}
375
178
376
-static void DNI tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
179
-void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
377
- TCGv_i32 a3, TCGv_i32 a4,
180
- uint64_t low, uint64_t high)
378
- TCGv_i32 a5, TCGv_i32 a6)
379
-{
181
-{
380
- tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
182
- do_stpq(env, addr, low, high, false);
381
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
382
- tcgv_i32_arg(a6));
383
-}
183
-}
384
-
184
-
385
-static void DNI tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
185
-void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
386
- TCGv_i64 a3, TCGv_i64 a4,
186
- uint64_t low, uint64_t high)
387
- TCGv_i64 a5, TCGv_i64 a6)
388
-{
187
-{
389
- tcg_gen_op6(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
188
- do_stpq(env, addr, low, high, true);
390
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
391
- tcgv_i64_arg(a6));
392
-}
189
-}
393
-
190
-
394
static void DNI tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
191
/* Execute instruction. This instruction executes an insn modified with
395
TCGv_i32 a3, TCGv_i32 a4,
192
the contents of r1. It does not change the executed instruction in memory;
396
TCGv_i32 a5, TCGArg a6)
193
it does not change the program counter.
397
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
398
tcg_gen_op3_i32(INDEX_op_addci, rh, ah, bh);
399
tcg_gen_mov_i32(rl, t0);
400
tcg_temp_free_i32(t0);
401
- } else if (TCG_TARGET_HAS_add2_i32) {
402
- tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
403
} else {
404
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
405
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
406
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
407
tcg_gen_op3_i32(INDEX_op_subbi, rh, ah, bh);
408
tcg_gen_mov_i32(rl, t0);
409
tcg_temp_free_i32(t0);
410
- } else if (TCG_TARGET_HAS_sub2_i32) {
411
- tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
412
} else {
413
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
414
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
415
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
416
417
tcg_gen_mov_i64(rl, t0);
418
tcg_temp_free_i64(t0);
419
- } else if (TCG_TARGET_HAS_add2_i64) {
420
- tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
421
} else {
422
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
423
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
424
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
425
426
tcg_gen_mov_i64(rl, t0);
427
tcg_temp_free_i64(t0);
428
- } else if (TCG_TARGET_HAS_sub2_i64) {
429
- tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
430
} else {
431
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
432
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
433
diff --git a/tcg/tcg.c b/tcg/tcg.c
434
index XXXXXXX..XXXXXXX 100644
435
--- a/tcg/tcg.c
436
+++ b/tcg/tcg.c
437
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
438
case INDEX_op_st_i32:
439
return true;
440
441
- case INDEX_op_add2_i32:
442
- return TCG_TARGET_HAS_add2_i32;
443
- case INDEX_op_sub2_i32:
444
- return TCG_TARGET_HAS_sub2_i32;
445
-
446
case INDEX_op_brcond2_i32:
447
case INDEX_op_setcond2_i32:
448
return TCG_TARGET_REG_BITS == 32;
449
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
450
case INDEX_op_extrh_i64_i32:
451
return TCG_TARGET_REG_BITS == 64;
452
453
- case INDEX_op_add2_i64:
454
- return TCG_TARGET_HAS_add2_i64;
455
- case INDEX_op_sub2_i64:
456
- return TCG_TARGET_HAS_sub2_i64;
457
-
458
case INDEX_op_mov_vec:
459
case INDEX_op_dup_vec:
460
case INDEX_op_dupm_vec:
461
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
462
la_reset_pref(ts);
463
break;
464
465
- case INDEX_op_add2_i32:
466
- case INDEX_op_add2_i64:
467
- opc_new = INDEX_op_add;
468
- goto do_addsub2;
469
- case INDEX_op_sub2_i32:
470
- case INDEX_op_sub2_i64:
471
- opc_new = INDEX_op_sub;
472
- do_addsub2:
473
- assert_carry_dead(s);
474
- /* Test if the high part of the operation is dead, but not
475
- the low part. The result can be optimized to a simple
476
- add or sub. This happens often for x86_64 guest when the
477
- cpu mode is set to 32 bit. */
478
- if (arg_temp(op->args[1])->state == TS_DEAD) {
479
- if (arg_temp(op->args[0])->state == TS_DEAD) {
480
- goto do_remove;
481
- }
482
- /* Replace the opcode and adjust the args in place,
483
- leaving 3 unused args at the end. */
484
- op->opc = opc = opc_new;
485
- op->args[1] = op->args[2];
486
- op->args[2] = op->args[4];
487
- /* Fall through and mark the single-word operation live. */
488
- }
489
- goto do_not_remove;
490
-
491
case INDEX_op_muls2:
492
opc_new = INDEX_op_mul;
493
opc_new2 = INDEX_op_mulsh;
494
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
495
index XXXXXXX..XXXXXXX 100644
496
--- a/docs/devel/tcg-ops.rst
497
+++ b/docs/devel/tcg-ops.rst
498
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
499
code generator will use ``tcg_out_set_borrow`` and then
500
the output routine for *subbio*.
501
502
- * - add2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
503
-
504
- sub2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
505
-
506
- - | Similar to add/sub, except that the double-word inputs *t1* and *t2* are
507
- formed from two single-word arguments, and the double-word output *t0*
508
- is returned in two single-word outputs.
509
-
510
* - mulu2 *t0_low*, *t0_high*, *t1*, *t2*
511
512
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
513
@@ -XXX,XX +XXX,XX @@ Assumptions
514
The target word size (``TCG_TARGET_REG_BITS``) is expected to be 32 bit or
515
64 bit. It is expected that the pointer has the same size as the word.
516
517
-On a 32 bit target, all 64 bit operations are converted to 32 bits. A
518
-few specific operations must be implemented to allow it (see add2_i32,
519
-sub2_i32, brcond2_i32).
520
+On a 32 bit target, all 64 bit operations are converted to 32 bits.
521
+A few specific operations must be implemented to allow it
522
+(see brcond2_i32, setcond2_i32).
523
524
On a 64 bit target, the values are transferred between 32 and 64-bit
525
registers using the following ops:
526
--
194
--
527
2.43.0
195
2.17.2
528
196
529
197
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
When op raises an exception, it may not have initialized the output
2
temps that would be written back by wout or cout.
3
4
Reviewed-by: David Hildenbrand <david@redhat.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/tcg-op.c | 16 ++++++++++++----
7
target/s390x/translate.c | 20 +++++++++++++++-----
5
1 file changed, 12 insertions(+), 4 deletions(-)
8
1 file changed, 15 insertions(+), 5 deletions(-)
6
9
7
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
10
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tcg-op.c
12
--- a/target/s390x/translate.c
10
+++ b/tcg/tcg-op.c
13
+++ b/target/s390x/translate.c
11
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
14
@@ -XXX,XX +XXX,XX @@ struct DisasInsn {
12
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
15
13
return;
16
const char *name;
14
}
17
15
- /* The field is split across two words. One double-word
18
+ /* Pre-process arguments before HELP_OP. */
16
- shift is better than two double-word shifts. */
19
void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
17
- goto do_shift_and;
20
void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
21
void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
18
+
22
+
19
+ /* The field is split across two words. */
23
+ /*
20
+ tcg_gen_extract2_i32(TCGV_LOW(ret), TCGV_LOW(arg),
24
+ * Post-process output after HELP_OP.
21
+ TCGV_HIGH(arg), ofs);
25
+ * Note that these are not called if HELP_OP returns DISAS_NORETURN.
22
+ if (len <= 32) {
26
+ */
23
+ tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(ret), 0, len);
27
void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
24
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
28
void (*help_cout)(DisasContext *, DisasOps *);
25
+ } else {
29
+
26
+ tcg_gen_extract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg),
30
+ /* Implement the operation itself. */
27
+ ofs, len - 32);
31
DisasJumpType (*help_op)(DisasContext *, DisasOps *);
32
33
uint64_t data;
34
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
35
if (insn->help_op) {
36
ret = insn->help_op(s, &o);
37
}
38
- if (insn->help_wout) {
39
- insn->help_wout(s, &f, &o);
40
- }
41
- if (insn->help_cout) {
42
- insn->help_cout(s, &o);
43
+ if (ret != DISAS_NORETURN) {
44
+ if (insn->help_wout) {
45
+ insn->help_wout(s, &f, &o);
28
+ }
46
+ }
29
+ return;
47
+ if (insn->help_cout) {
48
+ insn->help_cout(s, &o);
49
+ }
30
}
50
}
31
51
32
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
52
/* Free any temporaries created by the helpers. */
33
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
34
so that we get ext8u, ext16u, and ext32u. */
35
switch (len) {
36
case 1 ... 8: case 16: case 32:
37
- do_shift_and:
38
tcg_gen_shri_i64(ret, arg, ofs);
39
tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
40
break;
41
--
53
--
42
2.43.0
54
2.17.2
43
55
44
56
diff view generated by jsdifflib
1
Consolidate the places we call tcg_op_insert_{before,after}
1
Reviewed-by: David Hildenbrand <david@redhat.com>
2
within the optimization pass.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/optimize.c | 30 +++++++++++++++++++++---------
4
target/s390x/mem_helper.c | 40 +++++++++++++++++++--------------------
8
1 file changed, 21 insertions(+), 9 deletions(-)
5
target/s390x/translate.c | 25 +++++++++++++++++-------
6
2 files changed, 38 insertions(+), 27 deletions(-)
9
7
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
10
--- a/target/s390x/mem_helper.c
13
+++ b/tcg/optimize.c
11
+++ b/target/s390x/mem_helper.c
14
@@ -XXX,XX +XXX,XX @@ static TCGArg arg_new_temp(OptContext *ctx)
12
@@ -XXX,XX +XXX,XX @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
15
return temp_arg(ts);
13
Int128 oldv;
14
bool fail;
15
16
- if (!HAVE_CMPXCHG128) {
17
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
18
- }
19
+ assert(HAVE_CMPXCHG128);
20
21
mem_idx = cpu_mmu_index(env, false);
22
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
23
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
24
{
25
uintptr_t ra = GETPC();
26
uint64_t hi, lo;
27
+ int mem_idx;
28
+ TCGMemOpIdx oi;
29
+ Int128 v;
30
31
- if (HAVE_ATOMIC128) {
32
- int mem_idx = cpu_mmu_index(env, false);
33
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
34
- Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
35
- hi = int128_gethi(v);
36
- lo = int128_getlo(v);
37
- } else {
38
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
39
- }
40
+ assert(HAVE_ATOMIC128);
41
+
42
+ mem_idx = cpu_mmu_index(env, false);
43
+ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
44
+ v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
45
+ hi = int128_gethi(v);
46
+ lo = int128_getlo(v);
47
48
env->retxl = lo;
49
return hi;
50
@@ -XXX,XX +XXX,XX @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
51
uint64_t low, uint64_t high)
52
{
53
uintptr_t ra = GETPC();
54
+ int mem_idx;
55
+ TCGMemOpIdx oi;
56
+ Int128 v;
57
58
- if (HAVE_ATOMIC128) {
59
- int mem_idx = cpu_mmu_index(env, false);
60
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
61
- Int128 v = int128_make128(low, high);
62
- helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
63
- } else {
64
- cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
65
- }
66
+ assert(HAVE_ATOMIC128);
67
+
68
+ mem_idx = cpu_mmu_index(env, false);
69
+ oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
70
+ v = int128_make128(low, high);
71
+ helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
16
}
72
}
17
73
18
+static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
74
/* Execute instruction. This instruction executes an insn modified with
19
+ TCGOpcode opc, unsigned narg)
75
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
20
+{
76
index XXXXXXX..XXXXXXX 100644
21
+ return tcg_op_insert_after(ctx->tcg, op, opc, narg);
77
--- a/target/s390x/translate.c
22
+}
78
+++ b/target/s390x/translate.c
23
+
79
@@ -XXX,XX +XXX,XX @@
24
+static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
80
#include "trace-tcg.h"
25
+ TCGOpcode opc, unsigned narg)
81
#include "exec/translator.h"
26
+{
82
#include "exec/log.h"
27
+ return tcg_op_insert_before(ctx->tcg, op, opc, narg);
83
+#include "qemu/atomic128.h"
28
+}
84
29
+
85
30
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
/* Information that (most) every instruction needs to manipulate. */
87
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
88
int r3 = get_field(s->fields, r3);
89
int d2 = get_field(s->fields, d2);
90
int b2 = get_field(s->fields, b2);
91
+ DisasJumpType ret = DISAS_NEXT;
92
TCGv_i64 addr;
93
TCGv_i32 t_r1, t_r3;
94
95
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
96
addr = get_address(s, 0, b2, d2);
97
t_r1 = tcg_const_i32(r1);
98
t_r3 = tcg_const_i32(r3);
99
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
100
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
101
+ gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
102
+ } else if (HAVE_CMPXCHG128) {
103
gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
104
} else {
105
- gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
106
+ gen_helper_exit_atomic(cpu_env);
107
+ ret = DISAS_NORETURN;
108
}
109
tcg_temp_free_i64(addr);
110
tcg_temp_free_i32(t_r1);
111
tcg_temp_free_i32(t_r3);
112
113
set_cc_static(s);
114
- return DISAS_NEXT;
115
+ return ret;
116
}
117
118
static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
119
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
120
121
static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
31
{
122
{
32
TCGTemp *dst_ts = arg_temp(dst);
123
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
33
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
124
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
34
if (!TCG_TARGET_HAS_tst) {
125
+ gen_helper_lpq(o->out, cpu_env, o->in2);
35
TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
126
+ } else if (HAVE_ATOMIC128) {
36
? INDEX_op_and_i32 : INDEX_op_and_i64);
127
gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
37
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
38
+ TCGOp *op2 = opt_insert_before(ctx, op, and_opc, 3);
39
TCGArg tmp = arg_new_temp(ctx);
40
41
op2->args[0] = tmp;
42
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
43
44
/* Expand to AND with a temporary if no backend support. */
45
if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
46
- TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
47
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
48
+ TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and_i32, 3);
49
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and_i32, 3);
50
TCGArg t1 = arg_new_temp(ctx);
51
TCGArg t2 = arg_new_temp(ctx);
52
53
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
54
rh = op->args[1];
55
56
/* The proper opcode is supplied by tcg_opt_gen_mov. */
57
- op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
58
+ op2 = opt_insert_before(ctx, op, 0, 2);
59
60
tcg_opt_gen_movi(ctx, op, rl, al);
61
tcg_opt_gen_movi(ctx, op2, rh, ah);
62
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
63
rh = op->args[1];
64
65
/* The proper opcode is supplied by tcg_opt_gen_mov. */
66
- op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
67
+ op2 = opt_insert_before(ctx, op, 0, 2);
68
69
tcg_opt_gen_movi(ctx, op, rl, l);
70
tcg_opt_gen_movi(ctx, op2, rh, h);
71
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
72
op->args[3] = 1;
73
} else {
128
} else {
74
if (sh) {
129
- gen_helper_lpq(o->out, cpu_env, o->in2);
75
- op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
130
+ gen_helper_exit_atomic(cpu_env);
76
+ op2 = opt_insert_before(ctx, op, shr_opc, 3);
131
+ return DISAS_NORETURN;
77
op2->args[0] = ret;
78
op2->args[1] = src1;
79
op2->args[2] = arg_new_constant(ctx, sh);
80
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
81
}
132
}
82
133
return_low128(o->out2);
83
if (neg && inv) {
134
return DISAS_NEXT;
84
- op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
135
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
85
+ op2 = opt_insert_after(ctx, op, sub_opc, 3);
136
86
op2->args[0] = ret;
137
static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
87
op2->args[1] = ret;
138
{
88
op2->args[2] = arg_new_constant(ctx, 1);
139
- if (tb_cflags(s->base.tb) & CF_PARALLEL) {
89
} else if (inv) {
140
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
90
- op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
141
+ gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
91
+ op2 = opt_insert_after(ctx, op, xor_opc, 3);
142
+ } else if (HAVE_ATOMIC128) {
92
op2->args[0] = ret;
143
gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
93
op2->args[1] = ret;
144
} else {
94
op2->args[2] = arg_new_constant(ctx, 1);
145
- gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
95
} else if (neg) {
146
+ gen_helper_exit_atomic(cpu_env);
96
- op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
147
+ return DISAS_NORETURN;
97
+ op2 = opt_insert_after(ctx, op, neg_opc, 2);
98
op2->args[0] = ret;
99
op2->args[1] = ret;
100
}
148
}
149
return DISAS_NEXT;
150
}
101
--
151
--
102
2.43.0
152
2.17.2
103
153
104
154
diff view generated by jsdifflib
Deleted patch
1
We cannot rely on the value copied from TCGOP_TYPE(op), because
2
the relevant op could be typeless, such as INDEX_op_call.
3
1
4
Fixes: fb744ece3a78 ("tcg: Copy TCGOP_TYPE in tcg_op_insert_{after,before}")
5
Suggested-by: Nicholas Piggin <npiggin@gmail.com>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/tcg-internal.h | 4 ++--
10
tcg/optimize.c | 4 ++--
11
tcg/tcg.c | 17 ++++++++++-------
12
3 files changed, 14 insertions(+), 11 deletions(-)
13
14
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg-internal.h
17
+++ b/tcg/tcg-internal.h
18
@@ -XXX,XX +XXX,XX @@ void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
19
TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e);
20
21
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
22
- TCGOpcode opc, unsigned nargs);
23
+ TCGOpcode, TCGType, unsigned nargs);
24
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
25
- TCGOpcode opc, unsigned nargs);
26
+ TCGOpcode, TCGType, unsigned nargs);
27
28
#endif /* TCG_INTERNAL_H */
29
diff --git a/tcg/optimize.c b/tcg/optimize.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/optimize.c
32
+++ b/tcg/optimize.c
33
@@ -XXX,XX +XXX,XX @@ static TCGArg arg_new_temp(OptContext *ctx)
34
static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
35
TCGOpcode opc, unsigned narg)
36
{
37
- return tcg_op_insert_after(ctx->tcg, op, opc, narg);
38
+ return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
39
}
40
41
static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
42
TCGOpcode opc, unsigned narg)
43
{
44
- return tcg_op_insert_before(ctx->tcg, op, opc, narg);
45
+ return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
46
}
47
48
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
49
diff --git a/tcg/tcg.c b/tcg/tcg.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/tcg.c
52
+++ b/tcg/tcg.c
53
@@ -XXX,XX +XXX,XX @@ TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
54
}
55
56
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
57
- TCGOpcode opc, unsigned nargs)
58
+ TCGOpcode opc, TCGType type, unsigned nargs)
59
{
60
TCGOp *new_op = tcg_op_alloc(opc, nargs);
61
62
- TCGOP_TYPE(new_op) = TCGOP_TYPE(old_op);
63
+ TCGOP_TYPE(new_op) = type;
64
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
65
return new_op;
66
}
67
68
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
69
- TCGOpcode opc, unsigned nargs)
70
+ TCGOpcode opc, TCGType type, unsigned nargs)
71
{
72
TCGOp *new_op = tcg_op_alloc(opc, nargs);
73
74
- TCGOP_TYPE(new_op) = TCGOP_TYPE(old_op);
75
+ TCGOP_TYPE(new_op) = type;
76
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
77
return new_op;
78
}
79
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
80
TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
81
? INDEX_op_ld_i32
82
: INDEX_op_ld_i64);
83
- TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
84
+ TCGOp *lop = tcg_op_insert_before(s, op, lopc,
85
+ arg_ts->type, 3);
86
87
lop->args[0] = temp_arg(dir_ts);
88
lop->args[1] = temp_arg(arg_ts->mem_base);
89
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
90
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
91
? INDEX_op_st_i32
92
: INDEX_op_st_i64);
93
- TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
94
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc,
95
+ arg_ts->type, 3);
96
TCGTemp *out_ts = dir_ts;
97
98
if (IS_DEAD_ARG(0)) {
99
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
100
TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
101
? INDEX_op_st_i32
102
: INDEX_op_st_i64);
103
- TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
104
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc,
105
+ arg_ts->type, 3);
106
107
sop->args[0] = temp_arg(dir_ts);
108
sop->args[1] = temp_arg(arg_ts->mem_base);
109
--
110
2.43.0
111
112
diff view generated by jsdifflib
Deleted patch
1
Begin to rely on TCGOp.type to discriminate operations,
2
rather than two different opcodes. Convert mov first.
3
Introduce TCG_OPF_INT in order to keep opcode dumps the same.
4
1
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 4 ++--
9
include/tcg/tcg.h | 2 ++
10
tcg/optimize.c | 7 +++----
11
tcg/tcg-op.c | 4 ++--
12
tcg/tcg.c | 32 ++++++++++++++++++--------------
13
tcg/tci.c | 5 ++---
14
docs/devel/tcg-ops.rst | 4 ++--
15
tcg/aarch64/tcg-target.c.inc | 2 --
16
tcg/arm/tcg-target.c.inc | 1 -
17
tcg/i386/tcg-target.c.inc | 2 --
18
tcg/loongarch64/tcg-target.c.inc | 2 --
19
tcg/mips/tcg-target.c.inc | 2 --
20
tcg/ppc/tcg-target.c.inc | 2 --
21
tcg/riscv/tcg-target.c.inc | 2 --
22
tcg/s390x/tcg-target.c.inc | 2 --
23
tcg/sparc64/tcg-target.c.inc | 2 --
24
tcg/tci/tcg-target.c.inc | 15 +--------------
25
17 files changed, 32 insertions(+), 58 deletions(-)
26
27
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/tcg/tcg-opc.h
30
+++ b/include/tcg/tcg-opc.h
31
@@ -XXX,XX +XXX,XX @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
32
33
DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
34
35
-DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
36
+DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
37
+
38
DEF(setcond_i32, 1, 2, 1, 0)
39
DEF(negsetcond_i32, 1, 2, 1, 0)
40
DEF(movcond_i32, 1, 4, 1, 0)
41
@@ -XXX,XX +XXX,XX @@ DEF(clz_i32, 1, 2, 0, 0)
42
DEF(ctz_i32, 1, 2, 0, 0)
43
DEF(ctpop_i32, 1, 1, 0, 0)
44
45
-DEF(mov_i64, 1, 1, 0, TCG_OPF_NOT_PRESENT)
46
DEF(setcond_i64, 1, 2, 1, 0)
47
DEF(negsetcond_i64, 1, 2, 1, 0)
48
DEF(movcond_i64, 1, 4, 1, 0)
49
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/include/tcg/tcg.h
52
+++ b/include/tcg/tcg.h
53
@@ -XXX,XX +XXX,XX @@ enum {
54
/* Instruction has side effects: it cannot be removed if its outputs
55
are not used, and might trigger exceptions. */
56
TCG_OPF_SIDE_EFFECTS = 0x08,
57
+ /* Instruction operands may be I32 or I64 */
58
+ TCG_OPF_INT = 0x10,
59
/* Instruction is optional and not implemented by the host, or insn
60
is generic and should not be implemented by the host. */
61
TCG_OPF_NOT_PRESENT = 0x20,
62
diff --git a/tcg/optimize.c b/tcg/optimize.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/optimize.c
65
+++ b/tcg/optimize.c
66
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
67
68
switch (ctx->type) {
69
case TCG_TYPE_I32:
70
- new_op = INDEX_op_mov_i32;
71
- break;
72
case TCG_TYPE_I64:
73
- new_op = INDEX_op_mov_i64;
74
+ new_op = INDEX_op_mov;
75
break;
76
case TCG_TYPE_V64:
77
case TCG_TYPE_V128:
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
case INDEX_op_mb:
80
done = fold_mb(&ctx, op);
81
break;
82
- CASE_OP_32_64_VEC(mov):
83
+ case INDEX_op_mov:
84
+ case INDEX_op_mov_vec:
85
done = fold_mov(&ctx, op);
86
break;
87
CASE_OP_32_64(movcond):
88
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/tcg-op.c
91
+++ b/tcg/tcg-op.c
92
@@ -XXX,XX +XXX,XX @@ void tcg_gen_discard_i32(TCGv_i32 arg)
93
void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
94
{
95
if (ret != arg) {
96
- tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
97
+ tcg_gen_op2_i32(INDEX_op_mov, ret, arg);
98
}
99
}
100
101
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
102
return;
103
}
104
if (TCG_TARGET_REG_BITS == 64) {
105
- tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
106
+ tcg_gen_op2_i64(INDEX_op_mov, ret, arg);
107
} else {
108
TCGTemp *ts = tcgv_i64_temp(arg);
109
110
diff --git a/tcg/tcg.c b/tcg/tcg.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/tcg.c
113
+++ b/tcg/tcg.c
114
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
115
case INDEX_op_qemu_st_i128:
116
return TCG_TARGET_HAS_qemu_ldst_i128;
117
118
- case INDEX_op_mov_i32:
119
+ case INDEX_op_mov:
120
+ return has_type;
121
+
122
case INDEX_op_setcond_i32:
123
case INDEX_op_brcond_i32:
124
case INDEX_op_movcond_i32:
125
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
126
case INDEX_op_setcond2_i32:
127
return TCG_TARGET_REG_BITS == 32;
128
129
- case INDEX_op_mov_i64:
130
case INDEX_op_setcond_i64:
131
case INDEX_op_brcond_i64:
132
case INDEX_op_movcond_i64:
133
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
134
col += ne_fprintf(f, ",%s", t);
135
}
136
} else {
137
- col += ne_fprintf(f, " %s ", def->name);
138
+ if (def->flags & TCG_OPF_INT) {
139
+ col += ne_fprintf(f, " %s_i%d ",
140
+ def->name,
141
+ 8 * tcg_type_size(TCGOP_TYPE(op)));
142
+ } else if (def->flags & TCG_OPF_VECTOR) {
143
+ col += ne_fprintf(f, "%s v%d,e%d,",
144
+ def->name,
145
+ 8 * tcg_type_size(TCGOP_TYPE(op)),
146
+ 8 << TCGOP_VECE(op));
147
+ } else {
148
+ col += ne_fprintf(f, " %s ", def->name);
149
+ }
150
151
nb_oargs = def->nb_oargs;
152
nb_iargs = def->nb_iargs;
153
nb_cargs = def->nb_cargs;
154
155
- if (def->flags & TCG_OPF_VECTOR) {
156
- col += ne_fprintf(f, "v%d,e%d,",
157
- 8 * tcg_type_size(TCGOP_TYPE(op)),
158
- 8 << TCGOP_VECE(op));
159
- }
160
-
161
k = 0;
162
for (i = 0; i < nb_oargs; i++) {
163
const char *sep = k ? "," : "";
164
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
165
166
/* Incorporate constraints for this operand. */
167
switch (opc) {
168
- case INDEX_op_mov_i32:
169
- case INDEX_op_mov_i64:
170
+ case INDEX_op_mov:
171
/* Note that these are TCG_OPF_NOT_PRESENT and do not
172
have proper constraints. That said, special case
173
moves to propagate preferences backward. */
174
@@ -XXX,XX +XXX,XX @@ liveness_pass_2(TCGContext *s)
175
}
176
177
/* Outputs become available. */
178
- if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
179
+ if (opc == INDEX_op_mov) {
180
arg_ts = arg_temp(op->args[0]);
181
dir_ts = arg_ts->state_ptr;
182
if (dir_ts) {
183
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
184
TCGOpcode opc = op->opc;
185
186
switch (opc) {
187
- case INDEX_op_mov_i32:
188
- case INDEX_op_mov_i64:
189
+ case INDEX_op_mov:
190
case INDEX_op_mov_vec:
191
tcg_reg_alloc_mov(s, op);
192
break;
193
diff --git a/tcg/tci.c b/tcg/tci.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tcg/tci.c
196
+++ b/tcg/tci.c
197
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
198
regs[r0] = regs[tmp32 ? r3 : r4];
199
break;
200
#endif
201
- CASE_32_64(mov)
202
+ case INDEX_op_mov:
203
tci_args_rr(insn, &r0, &r1);
204
regs[r0] = regs[r1];
205
break;
206
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
207
op_name, str_r(r0), str_r(r1), s2);
208
break;
209
210
- case INDEX_op_mov_i32:
211
- case INDEX_op_mov_i64:
212
+ case INDEX_op_mov:
213
case INDEX_op_ext_i32_i64:
214
case INDEX_op_extu_i32_i64:
215
case INDEX_op_bswap16_i32:
216
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
217
index XXXXXXX..XXXXXXX 100644
218
--- a/docs/devel/tcg-ops.rst
219
+++ b/docs/devel/tcg-ops.rst
220
@@ -XXX,XX +XXX,XX @@ Misc
221
222
.. list-table::
223
224
- * - mov_i32/i64 *t0*, *t1*
225
+ * - mov *t0*, *t1*
226
227
- | *t0* = *t1*
228
- | Move *t1* to *t0* (both operands must have the same type).
229
+ | Move *t1* to *t0*.
230
231
* - bswap16_i32/i64 *t0*, *t1*, *flags*
232
233
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/aarch64/tcg-target.c.inc
236
+++ b/tcg/aarch64/tcg-target.c.inc
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
238
tcg_out_mb(s, a0);
239
break;
240
241
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
242
- case INDEX_op_mov_i64:
243
case INDEX_op_call: /* Always emitted via tcg_out_call. */
244
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
245
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
246
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tcg/arm/tcg-target.c.inc
249
+++ b/tcg/arm/tcg-target.c.inc
250
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
251
tcg_out_mb(s, args[0]);
252
break;
253
254
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
255
case INDEX_op_call: /* Always emitted via tcg_out_call. */
256
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
257
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
258
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
259
index XXXXXXX..XXXXXXX 100644
260
--- a/tcg/i386/tcg-target.c.inc
261
+++ b/tcg/i386/tcg-target.c.inc
262
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
263
case INDEX_op_mb:
264
tcg_out_mb(s, a0);
265
break;
266
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
267
- case INDEX_op_mov_i64:
268
case INDEX_op_call: /* Always emitted via tcg_out_call. */
269
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
270
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
271
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
272
index XXXXXXX..XXXXXXX 100644
273
--- a/tcg/loongarch64/tcg-target.c.inc
274
+++ b/tcg/loongarch64/tcg-target.c.inc
275
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
276
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
277
break;
278
279
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
280
- case INDEX_op_mov_i64:
281
case INDEX_op_call: /* Always emitted via tcg_out_call. */
282
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
283
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
284
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
285
index XXXXXXX..XXXXXXX 100644
286
--- a/tcg/mips/tcg-target.c.inc
287
+++ b/tcg/mips/tcg-target.c.inc
288
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
289
case INDEX_op_mb:
290
tcg_out_mb(s, a0);
291
break;
292
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
293
- case INDEX_op_mov_i64:
294
case INDEX_op_call: /* Always emitted via tcg_out_call. */
295
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
296
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
297
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
298
index XXXXXXX..XXXXXXX 100644
299
--- a/tcg/ppc/tcg-target.c.inc
300
+++ b/tcg/ppc/tcg-target.c.inc
301
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
302
tcg_out_mb(s, args[0]);
303
break;
304
305
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
306
- case INDEX_op_mov_i64:
307
case INDEX_op_call: /* Always emitted via tcg_out_call. */
308
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
309
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
310
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tcg/riscv/tcg-target.c.inc
313
+++ b/tcg/riscv/tcg-target.c.inc
314
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
315
}
316
break;
317
318
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
319
- case INDEX_op_mov_i64:
320
case INDEX_op_call: /* Always emitted via tcg_out_call. */
321
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
322
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
323
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
324
index XXXXXXX..XXXXXXX 100644
325
--- a/tcg/s390x/tcg-target.c.inc
326
+++ b/tcg/s390x/tcg-target.c.inc
327
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
328
}
329
break;
330
331
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
332
- case INDEX_op_mov_i64:
333
case INDEX_op_call: /* Always emitted via tcg_out_call. */
334
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
335
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
336
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
337
index XXXXXXX..XXXXXXX 100644
338
--- a/tcg/sparc64/tcg-target.c.inc
339
+++ b/tcg/sparc64/tcg-target.c.inc
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
341
tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
342
break;
343
344
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
345
- case INDEX_op_mov_i64:
346
case INDEX_op_call: /* Always emitted via tcg_out_call. */
347
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
348
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
349
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
350
index XXXXXXX..XXXXXXX 100644
351
--- a/tcg/tci/tcg-target.c.inc
352
+++ b/tcg/tci/tcg-target.c.inc
353
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
354
355
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
356
{
357
- switch (type) {
358
- case TCG_TYPE_I32:
359
- tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg);
360
- break;
361
-#if TCG_TARGET_REG_BITS == 64
362
- case TCG_TYPE_I64:
363
- tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg);
364
- break;
365
-#endif
366
- default:
367
- g_assert_not_reached();
368
- }
369
+ tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
370
return true;
371
}
372
373
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
374
tcg_out_op_v(s, opc);
375
break;
376
377
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
378
- case INDEX_op_mov_i64:
379
case INDEX_op_call: /* Always emitted via tcg_out_call. */
380
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
381
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
382
--
383
2.43.0
384
385
diff view generated by jsdifflib
Deleted patch
1
Rely on TCGOP_TYPE instead of opcodes specific to each type.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg-opc.h | 4 ++--
7
target/sh4/translate.c | 6 +++---
8
tcg/optimize.c | 13 +++++--------
9
tcg/tcg-op.c | 4 ++--
10
tcg/tcg.c | 15 +++++----------
11
tcg/tci.c | 5 ++---
12
docs/devel/tcg-ops.rst | 2 +-
13
tcg/tci/tcg-target.c.inc | 6 ++----
14
8 files changed, 22 insertions(+), 33 deletions(-)
15
16
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/tcg/tcg-opc.h
19
+++ b/include/tcg/tcg-opc.h
20
@@ -XXX,XX +XXX,XX @@ DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
21
22
DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
23
24
+DEF(add, 1, 2, 0, TCG_OPF_INT)
25
+
26
DEF(setcond_i32, 1, 2, 1, 0)
27
DEF(negsetcond_i32, 1, 2, 1, 0)
28
DEF(movcond_i32, 1, 4, 1, 0)
29
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
30
DEF(st16_i32, 0, 2, 1, 0)
31
DEF(st_i32, 0, 2, 1, 0)
32
/* arith */
33
-DEF(add_i32, 1, 2, 0, 0)
34
DEF(sub_i32, 1, 2, 0, 0)
35
DEF(mul_i32, 1, 2, 0, 0)
36
DEF(div_i32, 1, 2, 0, 0)
37
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
38
DEF(st32_i64, 0, 2, 1, 0)
39
DEF(st_i64, 0, 2, 1, 0)
40
/* arith */
41
-DEF(add_i64, 1, 2, 0, 0)
42
DEF(sub_i64, 1, 2, 0, 0)
43
DEF(mul_i64, 1, 2, 0, 0)
44
DEF(div_i64, 1, 2, 0, 0)
45
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/sh4/translate.c
48
+++ b/target/sh4/translate.c
49
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
50
NEXT_INSN;
51
switch (ctx->opcode & 0xf00f) {
52
case 0x300c: /* add Rm,Rn */
53
- op_opc = INDEX_op_add_i32;
54
+ op_opc = INDEX_op_add;
55
goto do_reg_op;
56
case 0x2009: /* and Rm,Rn */
57
op_opc = INDEX_op_and_i32;
58
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
59
if (op_dst != B11_8 || mv_src >= 0) {
60
goto fail;
61
}
62
- op_opc = INDEX_op_add_i32;
63
+ op_opc = INDEX_op_add;
64
op_arg = tcg_constant_i32(B7_0s);
65
break;
66
67
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
68
ctx->memidx, ld_mop);
69
break;
70
71
- case INDEX_op_add_i32:
72
+ case INDEX_op_add:
73
if (op_dst != st_src) {
74
goto fail;
75
}
76
diff --git a/tcg/optimize.c b/tcg/optimize.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/optimize.c
79
+++ b/tcg/optimize.c
80
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
81
uint64_t l64, h64;
82
83
switch (op) {
84
- CASE_OP_32_64(add):
85
+ case INDEX_op_add:
86
return x + y;
87
88
CASE_OP_32_64(sub):
89
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
90
break;
91
}
92
if (convert) {
93
- TCGOpcode add_opc, xor_opc, neg_opc;
94
+ TCGOpcode xor_opc, neg_opc;
95
96
if (!inv && !neg) {
97
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
98
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
99
100
switch (ctx->type) {
101
case TCG_TYPE_I32:
102
- add_opc = INDEX_op_add_i32;
103
neg_opc = INDEX_op_neg_i32;
104
xor_opc = INDEX_op_xor_i32;
105
break;
106
case TCG_TYPE_I64:
107
- add_opc = INDEX_op_add_i64;
108
neg_opc = INDEX_op_neg_i64;
109
xor_opc = INDEX_op_xor_i64;
110
break;
111
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
112
if (!inv) {
113
op->opc = neg_opc;
114
} else if (neg) {
115
- op->opc = add_opc;
116
+ op->opc = INDEX_op_add;
117
op->args[2] = arg_new_constant(ctx, -1);
118
} else {
119
op->opc = xor_opc;
120
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
121
if (arg_is_const(op->args[2])) {
122
uint64_t val = arg_info(op->args[2])->val;
123
124
- op->opc = (ctx->type == TCG_TYPE_I32
125
- ? INDEX_op_add_i32 : INDEX_op_add_i64);
126
+ op->opc = INDEX_op_add;
127
op->args[2] = arg_new_constant(ctx, -val);
128
}
129
return finish_folding(ctx, op);
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
131
* Sorted alphabetically by opcode as much as possible.
132
*/
133
switch (opc) {
134
- CASE_OP_32_64(add):
135
+ case INDEX_op_add:
136
done = fold_add(&ctx, op);
137
break;
138
case INDEX_op_add_vec:
139
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/tcg-op.c
142
+++ b/tcg/tcg-op.c
143
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
144
145
void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
146
{
147
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
148
+ tcg_gen_op3_i32(INDEX_op_add, ret, arg1, arg2);
149
}
150
151
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
152
@@ -XXX,XX +XXX,XX @@ void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
153
void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
154
{
155
if (TCG_TARGET_REG_BITS == 64) {
156
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
157
+ tcg_gen_op3_i64(INDEX_op_add, ret, arg1, arg2);
158
} else {
159
tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
160
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
161
diff --git a/tcg/tcg.c b/tcg/tcg.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/tcg/tcg.c
164
+++ b/tcg/tcg.c
165
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
166
167
/* Register allocation descriptions for every TCGOpcode. */
168
static const TCGOutOp * const all_outop[NB_OPS] = {
169
- OUTOP(INDEX_op_add_i32, TCGOutOpBinary, outop_add),
170
- OUTOP(INDEX_op_add_i64, TCGOutOpBinary, outop_add),
171
+ OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
172
};
173
174
#undef OUTOP
175
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
176
case INDEX_op_qemu_st_i128:
177
return TCG_TARGET_HAS_qemu_ldst_i128;
178
179
+ case INDEX_op_add:
180
case INDEX_op_mov:
181
return has_type;
182
183
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
184
case INDEX_op_st8_i32:
185
case INDEX_op_st16_i32:
186
case INDEX_op_st_i32:
187
- case INDEX_op_add_i32:
188
case INDEX_op_sub_i32:
189
case INDEX_op_neg_i32:
190
case INDEX_op_mul_i32:
191
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
192
case INDEX_op_st16_i64:
193
case INDEX_op_st32_i64:
194
case INDEX_op_st_i64:
195
- case INDEX_op_add_i64:
196
case INDEX_op_sub_i64:
197
case INDEX_op_neg_i64:
198
case INDEX_op_mul_i64:
199
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
200
break;
201
202
case INDEX_op_add2_i32:
203
- opc_new = INDEX_op_add_i32;
204
+ case INDEX_op_add2_i64:
205
+ opc_new = INDEX_op_add;
206
goto do_addsub2;
207
case INDEX_op_sub2_i32:
208
opc_new = INDEX_op_sub_i32;
209
goto do_addsub2;
210
- case INDEX_op_add2_i64:
211
- opc_new = INDEX_op_add_i64;
212
- goto do_addsub2;
213
case INDEX_op_sub2_i64:
214
opc_new = INDEX_op_sub_i64;
215
do_addsub2:
216
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
217
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
218
break;
219
220
- case INDEX_op_add_i32:
221
- case INDEX_op_add_i64:
222
+ case INDEX_op_add:
223
{
224
const TCGOutOpBinary *out =
225
container_of(all_outop[op->opc], TCGOutOpBinary, base);
226
diff --git a/tcg/tci.c b/tcg/tci.c
227
index XXXXXXX..XXXXXXX 100644
228
--- a/tcg/tci.c
229
+++ b/tcg/tci.c
230
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
231
232
/* Arithmetic operations (mixed 32/64 bit). */
233
234
- CASE_32_64(add)
235
+ case INDEX_op_add:
236
tci_args_rrr(insn, &r0, &r1, &r2);
237
regs[r0] = regs[r1] + regs[r2];
238
break;
239
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
240
op_name, str_r(r0), str_r(r1));
241
break;
242
243
- case INDEX_op_add_i32:
244
- case INDEX_op_add_i64:
245
+ case INDEX_op_add:
246
case INDEX_op_sub_i32:
247
case INDEX_op_sub_i64:
248
case INDEX_op_mul_i32:
249
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
250
index XXXXXXX..XXXXXXX 100644
251
--- a/docs/devel/tcg-ops.rst
252
+++ b/docs/devel/tcg-ops.rst
253
@@ -XXX,XX +XXX,XX @@ Arithmetic
254
255
.. list-table::
256
257
- * - add_i32/i64 *t0*, *t1*, *t2*
258
+ * - add *t0*, *t1*, *t2*
259
260
- | *t0* = *t1* + *t2*
261
262
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/tci/tcg-target.c.inc
265
+++ b/tcg/tci/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
267
stack_bounds_check(base, offset);
268
if (offset != sextract32(offset, 0, 16)) {
269
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
270
- tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
271
- ? INDEX_op_add_i32 : INDEX_op_add_i64),
272
- TCG_REG_TMP, TCG_REG_TMP, base);
273
+ tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
274
base = TCG_REG_TMP;
275
offset = 0;
276
}
277
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
278
static void tgen_add(TCGContext *s, TCGType type,
279
TCGReg a0, TCGReg a1, TCGReg a2)
280
{
281
- tcg_out_op_rrr(s, glue(INDEX_op_add_i,TCG_TARGET_REG_BITS), a0, a1, a2);
282
+ tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
283
}
284
285
static const TCGOutOpBinary outop_add = {
286
--
287
2.43.0
288
289
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
target/sh4/translate.c | 4 ++--
6
tcg/optimize.c | 40 ++++++++++++----------------------------
7
tcg/tcg-op.c | 4 ++--
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 5 ++---
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 2 +-
12
8 files changed, 24 insertions(+), 45 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
19
DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
20
21
DEF(add, 1, 2, 0, TCG_OPF_INT)
22
+DEF(and, 1, 2, 0, TCG_OPF_INT)
23
24
DEF(setcond_i32, 1, 2, 1, 0)
25
DEF(negsetcond_i32, 1, 2, 1, 0)
26
@@ -XXX,XX +XXX,XX @@ DEF(rem_i32, 1, 2, 0, 0)
27
DEF(remu_i32, 1, 2, 0, 0)
28
DEF(div2_i32, 2, 3, 0, 0)
29
DEF(divu2_i32, 2, 3, 0, 0)
30
-DEF(and_i32, 1, 2, 0, 0)
31
DEF(or_i32, 1, 2, 0, 0)
32
DEF(xor_i32, 1, 2, 0, 0)
33
/* shifts/rotates */
34
@@ -XXX,XX +XXX,XX @@ DEF(rem_i64, 1, 2, 0, 0)
35
DEF(remu_i64, 1, 2, 0, 0)
36
DEF(div2_i64, 2, 3, 0, 0)
37
DEF(divu2_i64, 2, 3, 0, 0)
38
-DEF(and_i64, 1, 2, 0, 0)
39
DEF(or_i64, 1, 2, 0, 0)
40
DEF(xor_i64, 1, 2, 0, 0)
41
/* shifts/rotates */
42
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/sh4/translate.c
45
+++ b/target/sh4/translate.c
46
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
47
op_opc = INDEX_op_add;
48
goto do_reg_op;
49
case 0x2009: /* and Rm,Rn */
50
- op_opc = INDEX_op_and_i32;
51
+ op_opc = INDEX_op_and;
52
goto do_reg_op;
53
case 0x200a: /* xor Rm,Rn */
54
op_opc = INDEX_op_xor_i32;
55
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
56
}
57
break;
58
59
- case INDEX_op_and_i32:
60
+ case INDEX_op_and:
61
if (op_dst != st_src) {
62
goto fail;
63
}
64
diff --git a/tcg/optimize.c b/tcg/optimize.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/optimize.c
67
+++ b/tcg/optimize.c
68
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
69
CASE_OP_32_64(mul):
70
return x * y;
71
72
- CASE_OP_32_64_VEC(and):
73
+ case INDEX_op_and:
74
+ case INDEX_op_and_vec:
75
return x & y;
76
77
CASE_OP_32_64_VEC(or):
78
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
79
80
/* Expand to AND with a temporary if no backend support. */
81
if (!TCG_TARGET_HAS_tst) {
82
- TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
83
- ? INDEX_op_and_i32 : INDEX_op_and_i64);
84
- TCGOp *op2 = opt_insert_before(ctx, op, and_opc, 3);
85
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
86
TCGArg tmp = arg_new_temp(ctx);
87
88
op2->args[0] = tmp;
89
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
90
91
/* Expand to AND with a temporary if no backend support. */
92
if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
93
- TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and_i32, 3);
94
- TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and_i32, 3);
95
+ TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
96
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
97
TCGArg t1 = arg_new_temp(ctx);
98
TCGArg t2 = arg_new_temp(ctx);
99
100
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
101
TempOptInfo *t2 = arg_info(op->args[2]);
102
int ofs = op->args[3];
103
int len = op->args[4];
104
- int width;
105
- TCGOpcode and_opc;
106
+ int width = 8 * tcg_type_size(ctx->type);
107
uint64_t z_mask, s_mask;
108
109
if (ti_is_const(t1) && ti_is_const(t2)) {
110
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
111
ti_const_val(t2)));
112
}
113
114
- switch (ctx->type) {
115
- case TCG_TYPE_I32:
116
- and_opc = INDEX_op_and_i32;
117
- width = 32;
118
- break;
119
- case TCG_TYPE_I64:
120
- and_opc = INDEX_op_and_i64;
121
- width = 64;
122
- break;
123
- default:
124
- g_assert_not_reached();
125
- }
126
-
127
/* Inserting a value into zero at offset 0. */
128
if (ti_is_const_val(t1, 0) && ofs == 0) {
129
uint64_t mask = MAKE_64BIT_MASK(0, len);
130
131
- op->opc = and_opc;
132
+ op->opc = INDEX_op_and;
133
op->args[1] = op->args[2];
134
op->args[2] = arg_new_constant(ctx, mask);
135
return fold_and(ctx, op);
136
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
137
if (ti_is_const_val(t2, 0)) {
138
uint64_t mask = deposit64(-1, ofs, len, 0);
139
140
- op->opc = and_opc;
141
+ op->opc = INDEX_op_and;
142
op->args[2] = arg_new_constant(ctx, mask);
143
return fold_and(ctx, op);
144
}
145
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
146
147
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
148
{
149
- TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
150
+ TCGOpcode sub_opc, xor_opc, neg_opc, shr_opc;
151
TCGOpcode uext_opc = 0, sext_opc = 0;
152
TCGCond cond = op->args[3];
153
TCGArg ret, src1, src2;
154
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
155
156
switch (ctx->type) {
157
case TCG_TYPE_I32:
158
- and_opc = INDEX_op_and_i32;
159
sub_opc = INDEX_op_sub_i32;
160
xor_opc = INDEX_op_xor_i32;
161
shr_opc = INDEX_op_shr_i32;
162
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
163
}
164
break;
165
case TCG_TYPE_I64:
166
- and_opc = INDEX_op_and_i64;
167
sub_opc = INDEX_op_sub_i64;
168
xor_opc = INDEX_op_xor_i64;
169
shr_opc = INDEX_op_shr_i64;
170
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
171
op2->args[2] = arg_new_constant(ctx, sh);
172
src1 = ret;
173
}
174
- op->opc = and_opc;
175
+ op->opc = INDEX_op_and;
176
op->args[1] = src1;
177
op->args[2] = arg_new_constant(ctx, 1);
178
}
179
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
180
CASE_OP_32_64(add2):
181
done = fold_add2(&ctx, op);
182
break;
183
- CASE_OP_32_64_VEC(and):
184
+ case INDEX_op_and:
185
+ case INDEX_op_and_vec:
186
done = fold_and(&ctx, op);
187
break;
188
CASE_OP_32_64_VEC(andc):
189
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/tcg/tcg-op.c
192
+++ b/tcg/tcg-op.c
193
@@ -XXX,XX +XXX,XX @@ void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
194
195
void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
196
{
197
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
198
+ tcg_gen_op3_i32(INDEX_op_and, ret, arg1, arg2);
199
}
200
201
void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
202
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
203
void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
204
{
205
if (TCG_TARGET_REG_BITS == 64) {
206
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
207
+ tcg_gen_op3_i64(INDEX_op_and, ret, arg1, arg2);
208
} else {
209
tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
210
tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
211
diff --git a/tcg/tcg.c b/tcg/tcg.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/tcg.c
214
+++ b/tcg/tcg.c
215
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
216
/* Register allocation descriptions for every TCGOpcode. */
217
static const TCGOutOp * const all_outop[NB_OPS] = {
218
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
219
- OUTOP(INDEX_op_and_i32, TCGOutOpBinary, outop_and),
220
- OUTOP(INDEX_op_and_i64, TCGOutOpBinary, outop_and),
221
+ OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
222
};
223
224
#undef OUTOP
225
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
226
return TCG_TARGET_HAS_qemu_ldst_i128;
227
228
case INDEX_op_add:
229
+ case INDEX_op_and:
230
case INDEX_op_mov:
231
return has_type;
232
233
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
234
case INDEX_op_sub_i32:
235
case INDEX_op_neg_i32:
236
case INDEX_op_mul_i32:
237
- case INDEX_op_and_i32:
238
case INDEX_op_or_i32:
239
case INDEX_op_xor_i32:
240
case INDEX_op_shl_i32:
241
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
242
case INDEX_op_sub_i64:
243
case INDEX_op_neg_i64:
244
case INDEX_op_mul_i64:
245
- case INDEX_op_and_i64:
246
case INDEX_op_or_i64:
247
case INDEX_op_xor_i64:
248
case INDEX_op_shl_i64:
249
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
250
break;
251
252
case INDEX_op_add:
253
- case INDEX_op_and_i32:
254
- case INDEX_op_and_i64:
255
+ case INDEX_op_and:
256
{
257
const TCGOutOpBinary *out =
258
container_of(all_outop[op->opc], TCGOutOpBinary, base);
259
diff --git a/tcg/tci.c b/tcg/tci.c
260
index XXXXXXX..XXXXXXX 100644
261
--- a/tcg/tci.c
262
+++ b/tcg/tci.c
263
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
264
tci_args_rrr(insn, &r0, &r1, &r2);
265
regs[r0] = regs[r1] * regs[r2];
266
break;
267
- CASE_32_64(and)
268
+ case INDEX_op_and:
269
tci_args_rrr(insn, &r0, &r1, &r2);
270
regs[r0] = regs[r1] & regs[r2];
271
break;
272
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
273
break;
274
275
case INDEX_op_add:
276
+ case INDEX_op_and:
277
case INDEX_op_sub_i32:
278
case INDEX_op_sub_i64:
279
case INDEX_op_mul_i32:
280
case INDEX_op_mul_i64:
281
- case INDEX_op_and_i32:
282
- case INDEX_op_and_i64:
283
case INDEX_op_or_i32:
284
case INDEX_op_or_i64:
285
case INDEX_op_xor_i32:
286
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
287
index XXXXXXX..XXXXXXX 100644
288
--- a/docs/devel/tcg-ops.rst
289
+++ b/docs/devel/tcg-ops.rst
290
@@ -XXX,XX +XXX,XX @@ Logical
291
292
.. list-table::
293
294
- * - and_i32/i64 *t0*, *t1*, *t2*
295
+ * - and *t0*, *t1*, *t2*
296
297
- | *t0* = *t1* & *t2*
298
299
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
300
index XXXXXXX..XXXXXXX 100644
301
--- a/tcg/tci/tcg-target.c.inc
302
+++ b/tcg/tci/tcg-target.c.inc
303
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
304
static void tgen_and(TCGContext *s, TCGType type,
305
TCGReg a0, TCGReg a1, TCGReg a2)
306
{
307
- tcg_out_op_rrr(s, glue(INDEX_op_and_i,TCG_TARGET_REG_BITS), a0, a1, a2);
308
+ tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
309
}
310
311
static const TCGOutOpBinary outop_and = {
312
--
313
2.43.0
314
315
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 19 +++++++++++++++++++
5
1 file changed, 19 insertions(+)
6
1
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
12
t2 = arg_info(op->args[2]);
13
z_mask = t1->z_mask;
14
15
+ if (ti_is_const(t2)) {
16
+ /* Fold andc r,x,i to and r,x,~i. */
17
+ switch (ctx->type) {
18
+ case TCG_TYPE_I32:
19
+ case TCG_TYPE_I64:
20
+ op->opc = INDEX_op_and;
21
+ break;
22
+ case TCG_TYPE_V64:
23
+ case TCG_TYPE_V128:
24
+ case TCG_TYPE_V256:
25
+ op->opc = INDEX_op_and_vec;
26
+ break;
27
+ default:
28
+ g_assert_not_reached();
29
+ }
30
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
31
+ return fold_and(ctx, op);
32
+ }
33
+
34
/*
35
* Known-zeros does not imply known-ones. Therefore unless
36
* arg2 is constant, we can't infer anything from it.
37
--
38
2.43.0
39
40
diff view generated by jsdifflib
Deleted patch
1
We canonicalize subtract with constant to add with constant.
2
Fix this missed instance.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 8 +++-----
8
1 file changed, 3 insertions(+), 5 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
15
16
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
17
{
18
- TCGOpcode sub_opc, xor_opc, neg_opc, shr_opc;
19
+ TCGOpcode xor_opc, neg_opc, shr_opc;
20
TCGOpcode uext_opc = 0, sext_opc = 0;
21
TCGCond cond = op->args[3];
22
TCGArg ret, src1, src2;
23
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
24
25
switch (ctx->type) {
26
case TCG_TYPE_I32:
27
- sub_opc = INDEX_op_sub_i32;
28
xor_opc = INDEX_op_xor_i32;
29
shr_opc = INDEX_op_shr_i32;
30
neg_opc = INDEX_op_neg_i32;
31
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
32
}
33
break;
34
case TCG_TYPE_I64:
35
- sub_opc = INDEX_op_sub_i64;
36
xor_opc = INDEX_op_xor_i64;
37
shr_opc = INDEX_op_shr_i64;
38
neg_opc = INDEX_op_neg_i64;
39
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
40
}
41
42
if (neg && inv) {
43
- op2 = opt_insert_after(ctx, op, sub_opc, 3);
44
+ op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
45
op2->args[0] = ret;
46
op2->args[1] = ret;
47
- op2->args[2] = arg_new_constant(ctx, 1);
48
+ op2->args[2] = arg_new_constant(ctx, -1);
49
} else if (inv) {
50
op2 = opt_insert_after(ctx, op, xor_opc, 3);
51
op2->args[0] = ret;
52
--
53
2.43.0
54
55
diff view generated by jsdifflib
Deleted patch
1
At the same time, drop all backend support for immediate
2
operands, as we now transform andc to and during optimize.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-has.h | 2 --
8
tcg/arm/tcg-target-has.h | 1 -
9
tcg/i386/tcg-target-con-set.h | 2 +-
10
tcg/i386/tcg-target-has.h | 2 --
11
tcg/loongarch64/tcg-target-con-set.h | 1 +
12
tcg/loongarch64/tcg-target-has.h | 2 --
13
tcg/mips/tcg-target-has.h | 2 --
14
tcg/ppc/tcg-target-has.h | 2 --
15
tcg/riscv/tcg-target-con-set.h | 1 +
16
tcg/riscv/tcg-target-has.h | 2 --
17
tcg/s390x/tcg-target-con-set.h | 1 -
18
tcg/s390x/tcg-target-has.h | 2 --
19
tcg/sparc64/tcg-target-has.h | 2 --
20
tcg/tcg-has.h | 1 -
21
tcg/tci/tcg-target-has.h | 2 --
22
tcg/tcg-op.c | 4 +--
23
tcg/tcg.c | 8 +++---
24
tcg/tci.c | 2 --
25
tcg/aarch64/tcg-target.c.inc | 24 ++++++++--------
26
tcg/arm/tcg-target.c.inc | 16 +++++++----
27
tcg/i386/tcg-target.c.inc | 31 +++++++++++---------
28
tcg/loongarch64/tcg-target.c.inc | 23 ++++++++-------
29
tcg/mips/tcg-target.c.inc | 4 +++
30
tcg/ppc/tcg-target.c.inc | 29 ++++++++-----------
31
tcg/riscv/tcg-target.c.inc | 27 +++++++++++-------
32
tcg/s390x/tcg-target.c.inc | 42 ++++++++++++++--------------
33
tcg/sparc64/tcg-target.c.inc | 16 +++++++----
34
tcg/tci/tcg-target.c.inc | 14 ++++++++--
35
28 files changed, 135 insertions(+), 130 deletions(-)
36
37
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/aarch64/tcg-target-has.h
40
+++ b/tcg/aarch64/tcg-target-has.h
41
@@ -XXX,XX +XXX,XX @@
42
#define TCG_TARGET_HAS_bswap32_i32 1
43
#define TCG_TARGET_HAS_not_i32 1
44
#define TCG_TARGET_HAS_rot_i32 1
45
-#define TCG_TARGET_HAS_andc_i32 1
46
#define TCG_TARGET_HAS_orc_i32 1
47
#define TCG_TARGET_HAS_eqv_i32 1
48
#define TCG_TARGET_HAS_nand_i32 0
49
@@ -XXX,XX +XXX,XX @@
50
#define TCG_TARGET_HAS_bswap64_i64 1
51
#define TCG_TARGET_HAS_not_i64 1
52
#define TCG_TARGET_HAS_rot_i64 1
53
-#define TCG_TARGET_HAS_andc_i64 1
54
#define TCG_TARGET_HAS_orc_i64 1
55
#define TCG_TARGET_HAS_eqv_i64 1
56
#define TCG_TARGET_HAS_nand_i64 0
57
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/arm/tcg-target-has.h
60
+++ b/tcg/arm/tcg-target-has.h
61
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
62
#define TCG_TARGET_HAS_bswap32_i32 1
63
#define TCG_TARGET_HAS_not_i32 1
64
#define TCG_TARGET_HAS_rot_i32 1
65
-#define TCG_TARGET_HAS_andc_i32 1
66
#define TCG_TARGET_HAS_orc_i32 0
67
#define TCG_TARGET_HAS_eqv_i32 0
68
#define TCG_TARGET_HAS_nand_i32 0
69
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/i386/tcg-target-con-set.h
72
+++ b/tcg/i386/tcg-target-con-set.h
73
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, reZ)
74
C_O1_I2(r, 0, ri)
75
C_O1_I2(r, 0, rI)
76
C_O1_I2(r, L, L)
77
+C_O1_I2(r, r, r)
78
C_O1_I2(r, r, re)
79
C_O1_I2(r, r, ri)
80
-C_O1_I2(r, r, rI)
81
C_O1_I2(x, x, x)
82
C_N1_I2(r, r, r)
83
C_N1_I2(r, r, rW)
84
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/i386/tcg-target-has.h
87
+++ b/tcg/i386/tcg-target-has.h
88
@@ -XXX,XX +XXX,XX @@
89
#define TCG_TARGET_HAS_bswap16_i32 1
90
#define TCG_TARGET_HAS_bswap32_i32 1
91
#define TCG_TARGET_HAS_not_i32 1
92
-#define TCG_TARGET_HAS_andc_i32 have_bmi1
93
#define TCG_TARGET_HAS_orc_i32 0
94
#define TCG_TARGET_HAS_eqv_i32 0
95
#define TCG_TARGET_HAS_nand_i32 0
96
@@ -XXX,XX +XXX,XX @@
97
#define TCG_TARGET_HAS_bswap32_i64 1
98
#define TCG_TARGET_HAS_bswap64_i64 1
99
#define TCG_TARGET_HAS_not_i64 1
100
-#define TCG_TARGET_HAS_andc_i64 have_bmi1
101
#define TCG_TARGET_HAS_orc_i64 0
102
#define TCG_TARGET_HAS_eqv_i64 0
103
#define TCG_TARGET_HAS_nand_i64 0
104
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/loongarch64/tcg-target-con-set.h
107
+++ b/tcg/loongarch64/tcg-target-con-set.h
108
@@ -XXX,XX +XXX,XX @@ C_O0_I3(r, r, r)
109
C_O1_I1(r, r)
110
C_O1_I1(w, r)
111
C_O1_I1(w, w)
112
+C_O1_I2(r, r, r)
113
C_O1_I2(r, r, rC)
114
C_O1_I2(r, r, ri)
115
C_O1_I2(r, r, rI)
116
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
117
index XXXXXXX..XXXXXXX 100644
118
--- a/tcg/loongarch64/tcg-target-has.h
119
+++ b/tcg/loongarch64/tcg-target-has.h
120
@@ -XXX,XX +XXX,XX @@
121
#define TCG_TARGET_HAS_bswap16_i32 1
122
#define TCG_TARGET_HAS_bswap32_i32 1
123
#define TCG_TARGET_HAS_not_i32 1
124
-#define TCG_TARGET_HAS_andc_i32 1
125
#define TCG_TARGET_HAS_orc_i32 1
126
#define TCG_TARGET_HAS_eqv_i32 0
127
#define TCG_TARGET_HAS_nand_i32 0
128
@@ -XXX,XX +XXX,XX @@
129
#define TCG_TARGET_HAS_bswap32_i64 1
130
#define TCG_TARGET_HAS_bswap64_i64 1
131
#define TCG_TARGET_HAS_not_i64 1
132
-#define TCG_TARGET_HAS_andc_i64 1
133
#define TCG_TARGET_HAS_orc_i64 1
134
#define TCG_TARGET_HAS_eqv_i64 0
135
#define TCG_TARGET_HAS_nand_i64 0
136
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
137
index XXXXXXX..XXXXXXX 100644
138
--- a/tcg/mips/tcg-target-has.h
139
+++ b/tcg/mips/tcg-target-has.h
140
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
141
#define TCG_TARGET_HAS_rem_i32 1
142
#define TCG_TARGET_HAS_not_i32 1
143
#define TCG_TARGET_HAS_nor_i32 1
144
-#define TCG_TARGET_HAS_andc_i32 0
145
#define TCG_TARGET_HAS_orc_i32 0
146
#define TCG_TARGET_HAS_eqv_i32 0
147
#define TCG_TARGET_HAS_nand_i32 0
148
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
149
#define TCG_TARGET_HAS_rem_i64 1
150
#define TCG_TARGET_HAS_not_i64 1
151
#define TCG_TARGET_HAS_nor_i64 1
152
-#define TCG_TARGET_HAS_andc_i64 0
153
#define TCG_TARGET_HAS_orc_i64 0
154
#define TCG_TARGET_HAS_eqv_i64 0
155
#define TCG_TARGET_HAS_nand_i64 0
156
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/ppc/tcg-target-has.h
159
+++ b/tcg/ppc/tcg-target-has.h
160
@@ -XXX,XX +XXX,XX @@
161
#define TCG_TARGET_HAS_bswap16_i32 1
162
#define TCG_TARGET_HAS_bswap32_i32 1
163
#define TCG_TARGET_HAS_not_i32 1
164
-#define TCG_TARGET_HAS_andc_i32 1
165
#define TCG_TARGET_HAS_orc_i32 1
166
#define TCG_TARGET_HAS_eqv_i32 1
167
#define TCG_TARGET_HAS_nand_i32 1
168
@@ -XXX,XX +XXX,XX @@
169
#define TCG_TARGET_HAS_bswap32_i64 1
170
#define TCG_TARGET_HAS_bswap64_i64 1
171
#define TCG_TARGET_HAS_not_i64 1
172
-#define TCG_TARGET_HAS_andc_i64 1
173
#define TCG_TARGET_HAS_orc_i64 1
174
#define TCG_TARGET_HAS_eqv_i64 1
175
#define TCG_TARGET_HAS_nand_i64 1
176
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/riscv/tcg-target-con-set.h
179
+++ b/tcg/riscv/tcg-target-con-set.h
180
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
181
C_O0_I2(rz, r)
182
C_O0_I2(rz, rz)
183
C_O1_I1(r, r)
184
+C_O1_I2(r, r, r)
185
C_O1_I2(r, r, ri)
186
C_O1_I2(r, r, rI)
187
C_O1_I2(r, r, rJ)
188
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
189
index XXXXXXX..XXXXXXX 100644
190
--- a/tcg/riscv/tcg-target-has.h
191
+++ b/tcg/riscv/tcg-target-has.h
192
@@ -XXX,XX +XXX,XX @@
193
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
194
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
195
#define TCG_TARGET_HAS_not_i32 1
196
-#define TCG_TARGET_HAS_andc_i32 (cpuinfo & CPUINFO_ZBB)
197
#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB)
198
#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
199
#define TCG_TARGET_HAS_nand_i32 0
200
@@ -XXX,XX +XXX,XX @@
201
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
202
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
203
#define TCG_TARGET_HAS_not_i64 1
204
-#define TCG_TARGET_HAS_andc_i64 (cpuinfo & CPUINFO_ZBB)
205
#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB)
206
#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
207
#define TCG_TARGET_HAS_nand_i64 0
208
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tcg/s390x/tcg-target-con-set.h
211
+++ b/tcg/s390x/tcg-target-con-set.h
212
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rC)
213
C_O1_I2(r, r, rI)
214
C_O1_I2(r, r, rJ)
215
C_O1_I2(r, r, rK)
216
-C_O1_I2(r, r, rKR)
217
C_O1_I2(r, r, rNK)
218
C_O1_I2(r, r, rNKR)
219
C_O1_I2(r, rZ, r)
220
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
221
index XXXXXXX..XXXXXXX 100644
222
--- a/tcg/s390x/tcg-target-has.h
223
+++ b/tcg/s390x/tcg-target-has.h
224
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
225
#define TCG_TARGET_HAS_bswap16_i32 1
226
#define TCG_TARGET_HAS_bswap32_i32 1
227
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
228
-#define TCG_TARGET_HAS_andc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
229
#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
230
#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
231
#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
232
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
233
#define TCG_TARGET_HAS_bswap32_i64 1
234
#define TCG_TARGET_HAS_bswap64_i64 1
235
#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
236
-#define TCG_TARGET_HAS_andc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
237
#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
238
#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
239
#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
240
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/sparc64/tcg-target-has.h
243
+++ b/tcg/sparc64/tcg-target-has.h
244
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
245
#define TCG_TARGET_HAS_bswap16_i32 0
246
#define TCG_TARGET_HAS_bswap32_i32 0
247
#define TCG_TARGET_HAS_not_i32 1
248
-#define TCG_TARGET_HAS_andc_i32 1
249
#define TCG_TARGET_HAS_orc_i32 1
250
#define TCG_TARGET_HAS_eqv_i32 0
251
#define TCG_TARGET_HAS_nand_i32 0
252
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
253
#define TCG_TARGET_HAS_bswap32_i64 0
254
#define TCG_TARGET_HAS_bswap64_i64 0
255
#define TCG_TARGET_HAS_not_i64 1
256
-#define TCG_TARGET_HAS_andc_i64 1
257
#define TCG_TARGET_HAS_orc_i64 1
258
#define TCG_TARGET_HAS_eqv_i64 0
259
#define TCG_TARGET_HAS_nand_i64 0
260
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
261
index XXXXXXX..XXXXXXX 100644
262
--- a/tcg/tcg-has.h
263
+++ b/tcg/tcg-has.h
264
@@ -XXX,XX +XXX,XX @@
265
#define TCG_TARGET_HAS_bswap32_i64 0
266
#define TCG_TARGET_HAS_bswap64_i64 0
267
#define TCG_TARGET_HAS_not_i64 0
268
-#define TCG_TARGET_HAS_andc_i64 0
269
#define TCG_TARGET_HAS_orc_i64 0
270
#define TCG_TARGET_HAS_eqv_i64 0
271
#define TCG_TARGET_HAS_nand_i64 0
272
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
273
index XXXXXXX..XXXXXXX 100644
274
--- a/tcg/tci/tcg-target-has.h
275
+++ b/tcg/tci/tcg-target-has.h
276
@@ -XXX,XX +XXX,XX @@
277
#define TCG_TARGET_HAS_bswap32_i32 1
278
#define TCG_TARGET_HAS_div_i32 1
279
#define TCG_TARGET_HAS_rem_i32 1
280
-#define TCG_TARGET_HAS_andc_i32 1
281
#define TCG_TARGET_HAS_extract2_i32 0
282
#define TCG_TARGET_HAS_eqv_i32 1
283
#define TCG_TARGET_HAS_nand_i32 1
284
@@ -XXX,XX +XXX,XX @@
285
#define TCG_TARGET_HAS_extract2_i64 0
286
#define TCG_TARGET_HAS_div_i64 1
287
#define TCG_TARGET_HAS_rem_i64 1
288
-#define TCG_TARGET_HAS_andc_i64 1
289
#define TCG_TARGET_HAS_eqv_i64 1
290
#define TCG_TARGET_HAS_nand_i64 1
291
#define TCG_TARGET_HAS_nor_i64 1
292
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
293
index XXXXXXX..XXXXXXX 100644
294
--- a/tcg/tcg-op.c
295
+++ b/tcg/tcg-op.c
296
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
297
298
void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
299
{
300
- if (TCG_TARGET_HAS_andc_i32) {
301
+ if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) {
302
tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
303
} else {
304
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
305
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
306
if (TCG_TARGET_REG_BITS == 32) {
307
tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
308
tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
309
- } else if (TCG_TARGET_HAS_andc_i64) {
310
+ } else if (tcg_op_supported(INDEX_op_andc_i64, TCG_TYPE_I64, 0)) {
311
tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
312
} else {
313
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
314
diff --git a/tcg/tcg.c b/tcg/tcg.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/tcg/tcg.c
317
+++ b/tcg/tcg.c
318
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
319
static const TCGOutOp * const all_outop[NB_OPS] = {
320
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
321
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
322
+ OUTOP(INDEX_op_andc_i32, TCGOutOpBinary, outop_andc),
323
+ OUTOP(INDEX_op_andc_i64, TCGOutOpBinary, outop_andc),
324
};
325
326
#undef OUTOP
327
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
328
return TCG_TARGET_HAS_bswap32_i32;
329
case INDEX_op_not_i32:
330
return TCG_TARGET_HAS_not_i32;
331
- case INDEX_op_andc_i32:
332
- return TCG_TARGET_HAS_andc_i32;
333
case INDEX_op_orc_i32:
334
return TCG_TARGET_HAS_orc_i32;
335
case INDEX_op_eqv_i32:
336
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
337
return TCG_TARGET_HAS_bswap64_i64;
338
case INDEX_op_not_i64:
339
return TCG_TARGET_HAS_not_i64;
340
- case INDEX_op_andc_i64:
341
- return TCG_TARGET_HAS_andc_i64;
342
case INDEX_op_orc_i64:
343
return TCG_TARGET_HAS_orc_i64;
344
case INDEX_op_eqv_i64:
345
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
346
347
case INDEX_op_add:
348
case INDEX_op_and:
349
+ case INDEX_op_andc_i32:
350
+ case INDEX_op_andc_i64:
351
{
352
const TCGOutOpBinary *out =
353
container_of(all_outop[op->opc], TCGOutOpBinary, base);
354
diff --git a/tcg/tci.c b/tcg/tci.c
355
index XXXXXXX..XXXXXXX 100644
356
--- a/tcg/tci.c
357
+++ b/tcg/tci.c
358
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
359
tci_args_rrr(insn, &r0, &r1, &r2);
360
regs[r0] = regs[r1] ^ regs[r2];
361
break;
362
-#if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
363
CASE_32_64(andc)
364
tci_args_rrr(insn, &r0, &r1, &r2);
365
regs[r0] = regs[r1] & ~regs[r2];
366
break;
367
-#endif
368
#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
369
CASE_32_64(orc)
370
tci_args_rrr(insn, &r0, &r1, &r2);
371
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
372
index XXXXXXX..XXXXXXX 100644
373
--- a/tcg/aarch64/tcg-target.c.inc
374
+++ b/tcg/aarch64/tcg-target.c.inc
375
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
376
.out_rri = tgen_andi,
377
};
378
379
+static void tgen_andc(TCGContext *s, TCGType type,
380
+ TCGReg a0, TCGReg a1, TCGReg a2)
381
+{
382
+ tcg_out_insn(s, 3510, BIC, type, a0, a1, a2);
383
+}
384
+
385
+static const TCGOutOpBinary outop_andc = {
386
+ .base.static_constraint = C_O1_I2(r, r, r),
387
+ .out_rrr = tgen_andc,
388
+};
389
+
390
391
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
392
const TCGArg args[TCG_MAX_OP_ARGS],
393
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
394
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
395
break;
396
397
- case INDEX_op_andc_i32:
398
- a2 = (int32_t)a2;
399
- /* FALLTHRU */
400
- case INDEX_op_andc_i64:
401
- if (c2) {
402
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
403
- } else {
404
- tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
405
- }
406
- break;
407
-
408
case INDEX_op_or_i32:
409
a2 = (int32_t)a2;
410
/* FALLTHRU */
411
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
412
case INDEX_op_or_i64:
413
case INDEX_op_xor_i32:
414
case INDEX_op_xor_i64:
415
- case INDEX_op_andc_i32:
416
- case INDEX_op_andc_i64:
417
case INDEX_op_orc_i32:
418
case INDEX_op_orc_i64:
419
case INDEX_op_eqv_i32:
420
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
421
index XXXXXXX..XXXXXXX 100644
422
--- a/tcg/arm/tcg-target.c.inc
423
+++ b/tcg/arm/tcg-target.c.inc
424
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
425
.out_rri = tgen_andi,
426
};
427
428
+static void tgen_andc(TCGContext *s, TCGType type,
429
+ TCGReg a0, TCGReg a1, TCGReg a2)
430
+{
431
+ tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0));
432
+}
433
+
434
+static const TCGOutOpBinary outop_andc = {
435
+ .base.static_constraint = C_O1_I2(r, r, r),
436
+ .out_rrr = tgen_andc,
437
+};
438
+
439
440
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
441
const TCGArg args[TCG_MAX_OP_ARGS],
442
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
443
args[0], args[1], args[2], const_args[2]);
444
}
445
break;
446
- case INDEX_op_andc_i32:
447
- tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
448
- args[0], args[1], args[2], const_args[2]);
449
- break;
450
case INDEX_op_or_i32:
451
c = ARITH_ORR;
452
goto gen_arith;
453
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
454
case INDEX_op_negsetcond_i32:
455
return C_O1_I2(r, r, rIN);
456
457
- case INDEX_op_andc_i32:
458
case INDEX_op_clz_i32:
459
case INDEX_op_ctz_i32:
460
return C_O1_I2(r, r, rIK);
461
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
462
index XXXXXXX..XXXXXXX 100644
463
--- a/tcg/i386/tcg-target.c.inc
464
+++ b/tcg/i386/tcg-target.c.inc
465
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
466
.out_rri = tgen_andi,
467
};
468
469
+static void tgen_andc(TCGContext *s, TCGType type,
470
+ TCGReg a0, TCGReg a1, TCGReg a2)
471
+{
472
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
473
+ tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
474
+}
475
+
476
+static TCGConstraintSetIndex cset_andc(TCGType type, unsigned flags)
477
+{
478
+ return have_bmi1 ? C_O1_I2(r, r, r) : C_NotImplemented;
479
+}
480
+
481
+static const TCGOutOpBinary outop_andc = {
482
+ .base.static_constraint = C_Dynamic,
483
+ .base.dynamic_constraint = cset_andc,
484
+ .out_rrr = tgen_andc,
485
+};
486
+
487
488
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
489
const TCGArg args[TCG_MAX_OP_ARGS],
490
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
491
}
492
break;
493
494
- OP_32_64(andc):
495
- if (const_a2) {
496
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
497
- tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
498
- } else {
499
- tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
500
- }
501
- break;
502
-
503
OP_32_64(mul):
504
if (const_a2) {
505
int32_t val;
506
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
507
case INDEX_op_xor_i64:
508
return C_O1_I2(r, 0, re);
509
510
- case INDEX_op_andc_i32:
511
- case INDEX_op_andc_i64:
512
- return C_O1_I2(r, r, rI);
513
-
514
case INDEX_op_shl_i32:
515
case INDEX_op_shl_i64:
516
case INDEX_op_shr_i32:
517
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
518
index XXXXXXX..XXXXXXX 100644
519
--- a/tcg/loongarch64/tcg-target.c.inc
520
+++ b/tcg/loongarch64/tcg-target.c.inc
521
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
522
.out_rri = tgen_andi,
523
};
524
525
+static void tgen_andc(TCGContext *s, TCGType type,
526
+ TCGReg a0, TCGReg a1, TCGReg a2)
527
+{
528
+ tcg_out_opc_andn(s, a0, a1, a2);
529
+}
530
+
531
+static const TCGOutOpBinary outop_andc = {
532
+ .base.static_constraint = C_O1_I2(r, r, r),
533
+ .out_rrr = tgen_andc,
534
+};
535
+
536
537
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
538
const TCGArg args[TCG_MAX_OP_ARGS],
539
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
540
}
541
break;
542
543
- case INDEX_op_andc_i32:
544
- case INDEX_op_andc_i64:
545
- if (c2) {
546
- /* guaranteed to fit due to constraint */
547
- tcg_out_opc_andi(s, a0, a1, ~a2);
548
- } else {
549
- tcg_out_opc_andn(s, a0, a1, a2);
550
- }
551
- break;
552
-
553
case INDEX_op_orc_i32:
554
case INDEX_op_orc_i64:
555
if (c2) {
556
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
557
case INDEX_op_qemu_ld_i64:
558
return C_O1_I1(r, r);
559
560
- case INDEX_op_andc_i32:
561
- case INDEX_op_andc_i64:
562
case INDEX_op_orc_i32:
563
case INDEX_op_orc_i64:
564
/*
565
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
566
index XXXXXXX..XXXXXXX 100644
567
--- a/tcg/mips/tcg-target.c.inc
568
+++ b/tcg/mips/tcg-target.c.inc
569
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
570
.out_rri = tgen_andi,
571
};
572
573
+static const TCGOutOpBinary outop_andc = {
574
+ .base.static_constraint = C_NotImplemented,
575
+};
576
+
577
578
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
579
const TCGArg args[TCG_MAX_OP_ARGS],
580
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
581
index XXXXXXX..XXXXXXX 100644
582
--- a/tcg/ppc/tcg-target.c.inc
583
+++ b/tcg/ppc/tcg-target.c.inc
584
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
585
.out_rri = tgen_andi,
586
};
587
588
+static void tgen_andc(TCGContext *s, TCGType type,
589
+ TCGReg a0, TCGReg a1, TCGReg a2)
590
+{
591
+ tcg_out32(s, ANDC | SAB(a1, a0, a2));
592
+}
593
+
594
+static const TCGOutOpBinary outop_andc = {
595
+ .base.static_constraint = C_O1_I2(r, r, r),
596
+ .out_rrr = tgen_andc,
597
+};
598
+
599
600
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
601
const TCGArg args[TCG_MAX_OP_ARGS],
602
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
603
tcg_out32(s, XOR | SAB(a1, a0, a2));
604
}
605
break;
606
- case INDEX_op_andc_i32:
607
- a0 = args[0], a1 = args[1], a2 = args[2];
608
- if (const_args[2]) {
609
- tcg_out_andi32(s, a0, a1, ~a2);
610
- } else {
611
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
612
- }
613
- break;
614
- case INDEX_op_andc_i64:
615
- a0 = args[0], a1 = args[1], a2 = args[2];
616
- if (const_args[2]) {
617
- tcg_out_andi64(s, a0, a1, ~a2);
618
- } else {
619
- tcg_out32(s, ANDC | SAB(a1, a0, a2));
620
- }
621
- break;
622
case INDEX_op_orc_i32:
623
if (const_args[2]) {
624
tcg_out_ori32(s, args[0], args[1], ~args[2]);
625
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
626
627
case INDEX_op_or_i32:
628
case INDEX_op_xor_i32:
629
- case INDEX_op_andc_i32:
630
case INDEX_op_orc_i32:
631
case INDEX_op_eqv_i32:
632
case INDEX_op_shl_i32:
633
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
634
case INDEX_op_sar_i32:
635
case INDEX_op_rotl_i32:
636
case INDEX_op_rotr_i32:
637
- case INDEX_op_andc_i64:
638
case INDEX_op_shl_i64:
639
case INDEX_op_shr_i64:
640
case INDEX_op_sar_i64:
641
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
642
index XXXXXXX..XXXXXXX 100644
643
--- a/tcg/riscv/tcg-target.c.inc
644
+++ b/tcg/riscv/tcg-target.c.inc
645
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
646
.out_rri = tgen_andi,
647
};
648
649
+static void tgen_andc(TCGContext *s, TCGType type,
650
+ TCGReg a0, TCGReg a1, TCGReg a2)
651
+{
652
+ tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
653
+}
654
+
655
+static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags)
656
+{
657
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented;
658
+}
659
+
660
+static const TCGOutOpBinary outop_andc = {
661
+ .base.static_constraint = C_Dynamic,
662
+ .base.dynamic_constraint = cset_zbb_rrr,
663
+ .out_rrr = tgen_andc,
664
+};
665
+
666
667
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
668
const TCGArg args[TCG_MAX_OP_ARGS],
669
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
670
}
671
break;
672
673
- case INDEX_op_andc_i32:
674
- case INDEX_op_andc_i64:
675
- if (c2) {
676
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
677
- } else {
678
- tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
679
- }
680
- break;
681
case INDEX_op_orc_i32:
682
case INDEX_op_orc_i64:
683
if (c2) {
684
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
685
case INDEX_op_negsetcond_i64:
686
return C_O1_I2(r, r, rI);
687
688
- case INDEX_op_andc_i32:
689
- case INDEX_op_andc_i64:
690
case INDEX_op_orc_i32:
691
case INDEX_op_orc_i64:
692
case INDEX_op_eqv_i32:
693
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
694
index XXXXXXX..XXXXXXX 100644
695
--- a/tcg/s390x/tcg-target.c.inc
696
+++ b/tcg/s390x/tcg-target.c.inc
697
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
698
.out_rri = tgen_andi_3,
699
};
700
701
+static void tgen_andc(TCGContext *s, TCGType type,
702
+ TCGReg a0, TCGReg a1, TCGReg a2)
703
+{
704
+ if (type == TCG_TYPE_I32) {
705
+ tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
706
+ } else {
707
+ tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
708
+ }
709
+}
710
+
711
+static TCGConstraintSetIndex cset_misc3_rrr(TCGType type, unsigned flags)
712
+{
713
+ return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I2(r, r, r) : C_NotImplemented;
714
+}
715
+
716
+static const TCGOutOpBinary outop_andc = {
717
+ .base.static_constraint = C_Dynamic,
718
+ .base.dynamic_constraint = cset_misc3_rrr,
719
+ .out_rrr = tgen_andc,
720
+};
721
+
722
723
# define OP_32_64(x) \
724
case glue(glue(INDEX_op_,x),_i32): \
725
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
726
}
727
break;
728
729
- case INDEX_op_andc_i32:
730
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
731
- if (const_args[2]) {
732
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
733
- tgen_andi(s, TCG_TYPE_I32, a0, (uint32_t)~a2);
734
-    } else {
735
- tcg_out_insn(s, RRFa, NCRK, a0, a1, a2);
736
-    }
737
- break;
738
case INDEX_op_orc_i32:
739
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
740
if (const_args[2]) {
741
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
742
}
743
break;
744
745
- case INDEX_op_andc_i64:
746
- a0 = args[0], a1 = args[1], a2 = args[2];
747
- if (const_args[2]) {
748
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
749
- tgen_andi(s, TCG_TYPE_I64, a0, ~a2);
750
- } else {
751
- tcg_out_insn(s, RRFa, NCGRK, a0, a1, a2);
752
- }
753
- break;
754
case INDEX_op_orc_i64:
755
a0 = args[0], a1 = args[1], a2 = args[2];
756
if (const_args[2]) {
757
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
758
case INDEX_op_xor_i64:
759
return C_O1_I2(r, r, rK);
760
761
- case INDEX_op_andc_i32:
762
case INDEX_op_orc_i32:
763
case INDEX_op_eqv_i32:
764
return C_O1_I2(r, r, ri);
765
- case INDEX_op_andc_i64:
766
- return C_O1_I2(r, r, rKR);
767
case INDEX_op_orc_i64:
768
case INDEX_op_eqv_i64:
769
return C_O1_I2(r, r, rNK);
770
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
771
index XXXXXXX..XXXXXXX 100644
772
--- a/tcg/sparc64/tcg-target.c.inc
773
+++ b/tcg/sparc64/tcg-target.c.inc
774
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
775
.out_rri = tgen_andi,
776
};
777
778
+static void tgen_andc(TCGContext *s, TCGType type,
779
+ TCGReg a0, TCGReg a1, TCGReg a2)
780
+{
781
+ tcg_out_arith(s, a0, a1, a2, ARITH_ANDN);
782
+}
783
+
784
+static const TCGOutOpBinary outop_andc = {
785
+ .base.static_constraint = C_O1_I2(r, r, r),
786
+ .out_rrr = tgen_andc,
787
+};
788
+
789
790
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
791
const TCGArg args[TCG_MAX_OP_ARGS],
792
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
793
OP_32_64(sub):
794
c = ARITH_SUB;
795
goto gen_arith;
796
- OP_32_64(andc):
797
- c = ARITH_ANDN;
798
- goto gen_arith;
799
OP_32_64(or):
800
c = ARITH_OR;
801
goto gen_arith;
802
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
803
case INDEX_op_divu_i64:
804
case INDEX_op_sub_i32:
805
case INDEX_op_sub_i64:
806
- case INDEX_op_andc_i32:
807
- case INDEX_op_andc_i64:
808
case INDEX_op_or_i32:
809
case INDEX_op_or_i64:
810
case INDEX_op_orc_i32:
811
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
812
index XXXXXXX..XXXXXXX 100644
813
--- a/tcg/tci/tcg-target.c.inc
814
+++ b/tcg/tci/tcg-target.c.inc
815
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
816
case INDEX_op_sub_i64:
817
case INDEX_op_mul_i32:
818
case INDEX_op_mul_i64:
819
- case INDEX_op_andc_i32:
820
- case INDEX_op_andc_i64:
821
case INDEX_op_eqv_i32:
822
case INDEX_op_eqv_i64:
823
case INDEX_op_nand_i32:
824
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
825
.out_rrr = tgen_and,
826
};
827
828
+static void tgen_andc(TCGContext *s, TCGType type,
829
+ TCGReg a0, TCGReg a1, TCGReg a2)
830
+{
831
+ tcg_out_op_rrr(s, glue(INDEX_op_andc_i,TCG_TARGET_REG_BITS), a0, a1, a2);
832
+}
833
+
834
+static const TCGOutOpBinary outop_andc = {
835
+ .base.static_constraint = C_O1_I2(r, r, r),
836
+ .out_rrr = tgen_andc,
837
+};
838
+
839
840
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
841
const TCGArg args[TCG_MAX_OP_ARGS],
842
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
843
CASE_32_64(mul)
844
CASE_32_64(or)
845
CASE_32_64(xor)
846
- CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
847
CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
848
CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
849
CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
850
--
851
2.43.0
852
853
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
target/arm/tcg/translate-a64.c | 2 +-
6
target/tricore/translate.c | 2 +-
7
tcg/optimize.c | 6 ++++--
8
tcg/tcg-op.c | 8 ++++----
9
tcg/tcg.c | 6 ++----
10
tcg/tci.c | 5 ++---
11
docs/devel/tcg-ops.rst | 2 +-
12
tcg/tci/tcg-target.c.inc | 2 +-
13
9 files changed, 17 insertions(+), 19 deletions(-)
14
1
15
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg-opc.h
18
+++ b/include/tcg/tcg-opc.h
19
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
20
21
DEF(add, 1, 2, 0, TCG_OPF_INT)
22
DEF(and, 1, 2, 0, TCG_OPF_INT)
23
+DEF(andc, 1, 2, 0, TCG_OPF_INT)
24
25
DEF(setcond_i32, 1, 2, 1, 0)
26
DEF(negsetcond_i32, 1, 2, 1, 0)
27
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
DEF(not_i32, 1, 1, 0, 0)
30
DEF(neg_i32, 1, 1, 0, 0)
31
-DEF(andc_i32, 1, 2, 0, 0)
32
DEF(orc_i32, 1, 2, 0, 0)
33
DEF(eqv_i32, 1, 2, 0, 0)
34
DEF(nand_i32, 1, 2, 0, 0)
35
@@ -XXX,XX +XXX,XX @@ DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
DEF(not_i64, 1, 1, 0, 0)
38
DEF(neg_i64, 1, 1, 0, 0)
39
-DEF(andc_i64, 1, 2, 0, 0)
40
DEF(orc_i64, 1, 2, 0, 0)
41
DEF(eqv_i64, 1, 2, 0, 0)
42
DEF(nand_i64, 1, 2, 0, 0)
43
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/tcg/translate-a64.c
46
+++ b/target/arm/tcg/translate-a64.c
47
@@ -XXX,XX +XXX,XX @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
48
tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
49
50
nzcv = a->nzcv;
51
- has_andc = tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0);
52
+ has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0);
53
if (nzcv & 8) { /* N */
54
tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
55
} else {
56
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/tricore/translate.c
59
+++ b/target/tricore/translate.c
60
@@ -XXX,XX +XXX,XX @@ static void decode_bit_andacc(DisasContext *ctx)
61
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
62
break;
63
case OPC2_32_BIT_AND_NOR_T:
64
- if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) {
65
+ if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
66
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
67
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
68
} else {
69
diff --git a/tcg/optimize.c b/tcg/optimize.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/optimize.c
72
+++ b/tcg/optimize.c
73
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
74
CASE_OP_32_64(neg):
75
return -x;
76
77
- CASE_OP_32_64_VEC(andc):
78
+ case INDEX_op_andc:
79
+ case INDEX_op_andc_vec:
80
return x & ~y;
81
82
CASE_OP_32_64_VEC(orc):
83
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
84
case INDEX_op_and_vec:
85
done = fold_and(&ctx, op);
86
break;
87
- CASE_OP_32_64_VEC(andc):
88
+ case INDEX_op_andc:
89
+ case INDEX_op_andc_vec:
90
done = fold_andc(&ctx, op);
91
break;
92
CASE_OP_32_64(brcond):
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
98
99
void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
100
{
101
- if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) {
102
- tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2);
103
+ if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0)) {
104
+ tcg_gen_op3_i32(INDEX_op_andc, ret, arg1, arg2);
105
} else {
106
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
107
tcg_gen_not_i32(t0, arg2);
108
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
109
if (TCG_TARGET_REG_BITS == 32) {
110
tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
111
tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
112
- } else if (tcg_op_supported(INDEX_op_andc_i64, TCG_TYPE_I64, 0)) {
113
- tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2);
114
+ } else if (tcg_op_supported(INDEX_op_andc, TCG_TYPE_I64, 0)) {
115
+ tcg_gen_op3_i64(INDEX_op_andc, ret, arg1, arg2);
116
} else {
117
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
118
tcg_gen_not_i64(t0, arg2);
119
diff --git a/tcg/tcg.c b/tcg/tcg.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/tcg.c
122
+++ b/tcg/tcg.c
123
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
124
static const TCGOutOp * const all_outop[NB_OPS] = {
125
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
126
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
127
- OUTOP(INDEX_op_andc_i32, TCGOutOpBinary, outop_andc),
128
- OUTOP(INDEX_op_andc_i64, TCGOutOpBinary, outop_andc),
129
+ OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
130
};
131
132
#undef OUTOP
133
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
134
135
case INDEX_op_add:
136
case INDEX_op_and:
137
- case INDEX_op_andc_i32:
138
- case INDEX_op_andc_i64:
139
+ case INDEX_op_andc:
140
{
141
const TCGOutOpBinary *out =
142
container_of(all_outop[op->opc], TCGOutOpBinary, base);
143
diff --git a/tcg/tci.c b/tcg/tci.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/tci.c
146
+++ b/tcg/tci.c
147
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
148
tci_args_rrr(insn, &r0, &r1, &r2);
149
regs[r0] = regs[r1] ^ regs[r2];
150
break;
151
- CASE_32_64(andc)
152
+ case INDEX_op_andc:
153
tci_args_rrr(insn, &r0, &r1, &r2);
154
regs[r0] = regs[r1] & ~regs[r2];
155
break;
156
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
157
158
case INDEX_op_add:
159
case INDEX_op_and:
160
+ case INDEX_op_andc:
161
case INDEX_op_sub_i32:
162
case INDEX_op_sub_i64:
163
case INDEX_op_mul_i32:
164
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
165
case INDEX_op_or_i64:
166
case INDEX_op_xor_i32:
167
case INDEX_op_xor_i64:
168
- case INDEX_op_andc_i32:
169
- case INDEX_op_andc_i64:
170
case INDEX_op_orc_i32:
171
case INDEX_op_orc_i64:
172
case INDEX_op_eqv_i32:
173
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
174
index XXXXXXX..XXXXXXX 100644
175
--- a/docs/devel/tcg-ops.rst
176
+++ b/docs/devel/tcg-ops.rst
177
@@ -XXX,XX +XXX,XX @@ Logical
178
179
- | *t0* = ~\ *t1*
180
181
- * - andc_i32/i64 *t0*, *t1*, *t2*
182
+ * - andc *t0*, *t1*, *t2*
183
184
- | *t0* = *t1* & ~\ *t2*
185
186
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/tci/tcg-target.c.inc
189
+++ b/tcg/tci/tcg-target.c.inc
190
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_and = {
191
static void tgen_andc(TCGContext *s, TCGType type,
192
TCGReg a0, TCGReg a1, TCGReg a2)
193
{
194
- tcg_out_op_rrr(s, glue(INDEX_op_andc_i,TCG_TARGET_REG_BITS), a0, a1, a2);
195
+ tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
196
}
197
198
static const TCGOutOpBinary outop_andc = {
199
--
200
2.43.0
201
202
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 4 +++
5
tcg/aarch64/tcg-target.c.inc | 31 ++++++++++++---------
6
tcg/arm/tcg-target.c.inc | 24 ++++++++++++----
7
tcg/i386/tcg-target.c.inc | 25 +++++++++++++----
8
tcg/loongarch64/tcg-target.c.inc | 29 ++++++++++++--------
9
tcg/mips/tcg-target.c.inc | 25 ++++++++++++-----
10
tcg/ppc/tcg-target.c.inc | 29 ++++++++++++--------
11
tcg/riscv/tcg-target.c.inc | 29 ++++++++++++--------
12
tcg/s390x/tcg-target.c.inc | 47 +++++++++++++++++---------------
13
tcg/sparc64/tcg-target.c.inc | 23 ++++++++++++----
14
tcg/tci/tcg-target.c.inc | 14 ++++++++--
15
11 files changed, 186 insertions(+), 94 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
23
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
24
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
25
+ OUTOP(INDEX_op_or_i32, TCGOutOpBinary, outop_or),
26
+ OUTOP(INDEX_op_or_i64, TCGOutOpBinary, outop_or),
27
};
28
29
#undef OUTOP
30
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
31
case INDEX_op_add:
32
case INDEX_op_and:
33
case INDEX_op_andc:
34
+ case INDEX_op_or_i32:
35
+ case INDEX_op_or_i64:
36
{
37
const TCGOutOpBinary *out =
38
container_of(all_outop[op->opc], TCGOutOpBinary, base);
39
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/aarch64/tcg-target.c.inc
42
+++ b/tcg/aarch64/tcg-target.c.inc
43
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
44
.out_rrr = tgen_andc,
45
};
46
47
+static void tgen_or(TCGContext *s, TCGType type,
48
+ TCGReg a0, TCGReg a1, TCGReg a2)
49
+{
50
+ tcg_out_insn(s, 3510, ORR, type, a0, a1, a2);
51
+}
52
+
53
+static void tgen_ori(TCGContext *s, TCGType type,
54
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
55
+{
56
+ tcg_out_logicali(s, I3404_ORRI, type, a0, a1, a2);
57
+}
58
+
59
+static const TCGOutOpBinary outop_or = {
60
+ .base.static_constraint = C_O1_I2(r, r, rL),
61
+ .out_rrr = tgen_or,
62
+ .out_rri = tgen_ori,
63
+};
64
+
65
66
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
67
const TCGArg args[TCG_MAX_OP_ARGS],
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
69
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
70
break;
71
72
- case INDEX_op_or_i32:
73
- a2 = (int32_t)a2;
74
- /* FALLTHRU */
75
- case INDEX_op_or_i64:
76
- if (c2) {
77
- tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
78
- } else {
79
- tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
80
- }
81
- break;
82
-
83
case INDEX_op_orc_i32:
84
a2 = (int32_t)a2;
85
/* FALLTHRU */
86
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
87
case INDEX_op_mulsh_i64:
88
return C_O1_I2(r, r, r);
89
90
- case INDEX_op_or_i32:
91
- case INDEX_op_or_i64:
92
case INDEX_op_xor_i32:
93
case INDEX_op_xor_i64:
94
case INDEX_op_orc_i32:
95
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/arm/tcg-target.c.inc
98
+++ b/tcg/arm/tcg-target.c.inc
99
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
100
.out_rrr = tgen_andc,
101
};
102
103
+static void tgen_or(TCGContext *s, TCGType type,
104
+ TCGReg a0, TCGReg a1, TCGReg a2)
105
+{
106
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0));
107
+}
108
+
109
+static void tgen_ori(TCGContext *s, TCGType type,
110
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
111
+{
112
+ tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2));
113
+}
114
+
115
+static const TCGOutOpBinary outop_or = {
116
+ .base.static_constraint = C_O1_I2(r, r, rI),
117
+ .out_rrr = tgen_or,
118
+ .out_rri = tgen_ori,
119
+};
120
+
121
122
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
123
const TCGArg args[TCG_MAX_OP_ARGS],
124
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
125
args[0], args[1], args[2], const_args[2]);
126
}
127
break;
128
- case INDEX_op_or_i32:
129
- c = ARITH_ORR;
130
- goto gen_arith;
131
case INDEX_op_xor_i32:
132
c = ARITH_EOR;
133
- /* Fall through. */
134
- gen_arith:
135
tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
136
break;
137
case INDEX_op_add2_i32:
138
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
139
case INDEX_op_muls2_i32:
140
return C_O2_I2(r, r, r, r);
141
142
- case INDEX_op_or_i32:
143
case INDEX_op_xor_i32:
144
return C_O1_I2(r, r, rI);
145
146
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
147
index XXXXXXX..XXXXXXX 100644
148
--- a/tcg/i386/tcg-target.c.inc
149
+++ b/tcg/i386/tcg-target.c.inc
150
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
151
.out_rrr = tgen_andc,
152
};
153
154
+static void tgen_or(TCGContext *s, TCGType type,
155
+ TCGReg a0, TCGReg a1, TCGReg a2)
156
+{
157
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
158
+ tgen_arithr(s, ARITH_OR + rexw, a0, a2);
159
+}
160
+
161
+static void tgen_ori(TCGContext *s, TCGType type,
162
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
163
+{
164
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
165
+ tgen_arithi(s, ARITH_OR + rexw, a0, a2, false);
166
+}
167
+
168
+static const TCGOutOpBinary outop_or = {
169
+ .base.static_constraint = C_O1_I2(r, 0, re),
170
+ .out_rrr = tgen_or,
171
+ .out_rri = tgen_ori,
172
+};
173
+
174
175
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
176
const TCGArg args[TCG_MAX_OP_ARGS],
177
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
178
OP_32_64(sub):
179
c = ARITH_SUB;
180
goto gen_arith;
181
- OP_32_64(or):
182
- c = ARITH_OR;
183
- goto gen_arith;
184
OP_32_64(xor):
185
c = ARITH_XOR;
186
goto gen_arith;
187
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
188
case INDEX_op_sub_i64:
189
case INDEX_op_mul_i32:
190
case INDEX_op_mul_i64:
191
- case INDEX_op_or_i32:
192
- case INDEX_op_or_i64:
193
case INDEX_op_xor_i32:
194
case INDEX_op_xor_i64:
195
return C_O1_I2(r, 0, re);
196
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tcg/loongarch64/tcg-target.c.inc
199
+++ b/tcg/loongarch64/tcg-target.c.inc
200
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
201
.out_rrr = tgen_andc,
202
};
203
204
+static void tgen_or(TCGContext *s, TCGType type,
205
+ TCGReg a0, TCGReg a1, TCGReg a2)
206
+{
207
+ tcg_out_opc_or(s, a0, a1, a2);
208
+}
209
+
210
+static void tgen_ori(TCGContext *s, TCGType type,
211
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
212
+{
213
+ tcg_out_opc_ori(s, a0, a1, a2);
214
+}
215
+
216
+static const TCGOutOpBinary outop_or = {
217
+ .base.static_constraint = C_O1_I2(r, r, rU),
218
+ .out_rrr = tgen_or,
219
+ .out_rri = tgen_ori,
220
+};
221
+
222
223
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
224
const TCGArg args[TCG_MAX_OP_ARGS],
225
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
226
}
227
break;
228
229
- case INDEX_op_or_i32:
230
- case INDEX_op_or_i64:
231
- if (c2) {
232
- tcg_out_opc_ori(s, a0, a1, a2);
233
- } else {
234
- tcg_out_opc_or(s, a0, a1, a2);
235
- }
236
- break;
237
-
238
case INDEX_op_xor_i32:
239
case INDEX_op_xor_i64:
240
if (c2) {
241
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
242
243
case INDEX_op_nor_i32:
244
case INDEX_op_nor_i64:
245
- case INDEX_op_or_i32:
246
- case INDEX_op_or_i64:
247
case INDEX_op_xor_i32:
248
case INDEX_op_xor_i64:
249
/* LoongArch reg-imm bitops have their imms ZERO-extended */
250
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
251
index XXXXXXX..XXXXXXX 100644
252
--- a/tcg/mips/tcg-target.c.inc
253
+++ b/tcg/mips/tcg-target.c.inc
254
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
255
.base.static_constraint = C_NotImplemented,
256
};
257
258
+static void tgen_or(TCGContext *s, TCGType type,
259
+ TCGReg a0, TCGReg a1, TCGReg a2)
260
+{
261
+ tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
262
+}
263
+
264
+static void tgen_ori(TCGContext *s, TCGType type,
265
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
266
+{
267
+ tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
268
+}
269
+
270
+static const TCGOutOpBinary outop_or = {
271
+ .base.static_constraint = C_O1_I2(r, r, rI),
272
+ .out_rrr = tgen_or,
273
+ .out_rri = tgen_ori,
274
+};
275
+
276
277
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
278
const TCGArg args[TCG_MAX_OP_ARGS],
279
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
280
tcg_out_ldst(s, i1, a0, a1, a2);
281
break;
282
283
- case INDEX_op_or_i32:
284
- case INDEX_op_or_i64:
285
- i1 = OPC_OR, i2 = OPC_ORI;
286
- goto do_binary;
287
case INDEX_op_xor_i32:
288
case INDEX_op_xor_i64:
289
i1 = OPC_XOR, i2 = OPC_XORI;
290
- do_binary:
291
if (c2) {
292
tcg_out_opc_imm(s, i2, a0, a1, a2);
293
break;
294
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
295
case INDEX_op_muls2_i64:
296
case INDEX_op_mulu2_i64:
297
return C_O2_I2(r, r, r, r);
298
- case INDEX_op_or_i32:
299
case INDEX_op_xor_i32:
300
- case INDEX_op_or_i64:
301
case INDEX_op_xor_i64:
302
return C_O1_I2(r, r, rI);
303
case INDEX_op_shl_i32:
304
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
305
index XXXXXXX..XXXXXXX 100644
306
--- a/tcg/ppc/tcg-target.c.inc
307
+++ b/tcg/ppc/tcg-target.c.inc
308
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
309
.out_rrr = tgen_andc,
310
};
311
312
+static void tgen_or(TCGContext *s, TCGType type,
313
+ TCGReg a0, TCGReg a1, TCGReg a2)
314
+{
315
+ tcg_out32(s, OR | SAB(a1, a0, a2));
316
+}
317
+
318
+static void tgen_ori(TCGContext *s, TCGType type,
319
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
320
+{
321
+ tcg_out_ori32(s, a0, a1, a2);
322
+}
323
+
324
+static const TCGOutOpBinary outop_or = {
325
+ .base.static_constraint = C_O1_I2(r, r, rU),
326
+ .out_rrr = tgen_or,
327
+ .out_rri = tgen_ori,
328
+};
329
+
330
331
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
332
const TCGArg args[TCG_MAX_OP_ARGS],
333
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
334
}
335
break;
336
337
- case INDEX_op_or_i64:
338
- case INDEX_op_or_i32:
339
- a0 = args[0], a1 = args[1], a2 = args[2];
340
- if (const_args[2]) {
341
- tcg_out_ori32(s, a0, a1, a2);
342
- } else {
343
- tcg_out32(s, OR | SAB(a1, a0, a2));
344
- }
345
- break;
346
case INDEX_op_xor_i64:
347
case INDEX_op_xor_i32:
348
a0 = args[0], a1 = args[1], a2 = args[2];
349
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
350
case INDEX_op_st_i64:
351
return C_O0_I2(r, r);
352
353
- case INDEX_op_or_i32:
354
case INDEX_op_xor_i32:
355
case INDEX_op_orc_i32:
356
case INDEX_op_eqv_i32:
357
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
358
359
case INDEX_op_sub_i32:
360
return C_O1_I2(r, rI, ri);
361
- case INDEX_op_or_i64:
362
case INDEX_op_xor_i64:
363
return C_O1_I2(r, r, rU);
364
case INDEX_op_sub_i64:
365
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
366
index XXXXXXX..XXXXXXX 100644
367
--- a/tcg/riscv/tcg-target.c.inc
368
+++ b/tcg/riscv/tcg-target.c.inc
369
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
370
.out_rrr = tgen_andc,
371
};
372
373
+static void tgen_or(TCGContext *s, TCGType type,
374
+ TCGReg a0, TCGReg a1, TCGReg a2)
375
+{
376
+ tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
377
+}
378
+
379
+static void tgen_ori(TCGContext *s, TCGType type,
380
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
381
+{
382
+ tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
383
+}
384
+
385
+static const TCGOutOpBinary outop_or = {
386
+ .base.static_constraint = C_O1_I2(r, r, rI),
387
+ .out_rrr = tgen_or,
388
+ .out_rri = tgen_ori,
389
+};
390
+
391
392
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
393
const TCGArg args[TCG_MAX_OP_ARGS],
394
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
395
}
396
break;
397
398
- case INDEX_op_or_i32:
399
- case INDEX_op_or_i64:
400
- if (c2) {
401
- tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
402
- } else {
403
- tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
404
- }
405
- break;
406
-
407
case INDEX_op_xor_i32:
408
case INDEX_op_xor_i64:
409
if (c2) {
410
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
411
case INDEX_op_st_i64:
412
return C_O0_I2(rz, r);
413
414
- case INDEX_op_or_i32:
415
case INDEX_op_xor_i32:
416
- case INDEX_op_or_i64:
417
case INDEX_op_xor_i64:
418
case INDEX_op_setcond_i32:
419
case INDEX_op_setcond_i64:
420
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
421
index XXXXXXX..XXXXXXX 100644
422
--- a/tcg/s390x/tcg-target.c.inc
423
+++ b/tcg/s390x/tcg-target.c.inc
424
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
425
.out_rrr = tgen_andc,
426
};
427
428
+static void tgen_or(TCGContext *s, TCGType type,
429
+ TCGReg a0, TCGReg a1, TCGReg a2)
430
+{
431
+ if (type != TCG_TYPE_I32) {
432
+ tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
433
+ } else if (a0 == a1) {
434
+ tcg_out_insn(s, RR, OR, a0, a2);
435
+ } else {
436
+ tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
437
+ }
438
+}
439
+
440
+static void tgen_ori_3(TCGContext *s, TCGType type,
441
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
442
+{
443
+ tcg_out_mov(s, type, a0, a1);
444
+ tgen_ori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2);
445
+}
446
+
447
+static const TCGOutOpBinary outop_or = {
448
+ .base.static_constraint = C_O1_I2(r, r, rK),
449
+ .out_rrr = tgen_or,
450
+ .out_rri = tgen_ori_3,
451
+};
452
+
453
454
# define OP_32_64(x) \
455
case glue(glue(INDEX_op_,x),_i32): \
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
457
}
458
break;
459
460
- case INDEX_op_or_i32:
461
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
462
- if (const_args[2]) {
463
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
464
- tgen_ori(s, a0, a2);
465
- } else if (a0 == a1) {
466
- tcg_out_insn(s, RR, OR, a0, a2);
467
- } else {
468
- tcg_out_insn(s, RRFa, ORK, a0, a1, a2);
469
- }
470
- break;
471
case INDEX_op_xor_i32:
472
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
473
if (const_args[2]) {
474
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
475
}
476
break;
477
478
- case INDEX_op_or_i64:
479
- a0 = args[0], a1 = args[1], a2 = args[2];
480
- if (const_args[2]) {
481
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
482
- tgen_ori(s, a0, a2);
483
- } else {
484
- tcg_out_insn(s, RRFa, OGRK, a0, a1, a2);
485
- }
486
- break;
487
case INDEX_op_xor_i64:
488
a0 = args[0], a1 = args[1], a2 = args[2];
489
if (const_args[2]) {
490
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
491
492
case INDEX_op_sub_i32:
493
case INDEX_op_sub_i64:
494
- case INDEX_op_or_i32:
495
case INDEX_op_xor_i32:
496
return C_O1_I2(r, r, ri);
497
- case INDEX_op_or_i64:
498
case INDEX_op_xor_i64:
499
return C_O1_I2(r, r, rK);
500
501
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
502
index XXXXXXX..XXXXXXX 100644
503
--- a/tcg/sparc64/tcg-target.c.inc
504
+++ b/tcg/sparc64/tcg-target.c.inc
505
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
506
.out_rrr = tgen_andc,
507
};
508
509
+static void tgen_or(TCGContext *s, TCGType type,
510
+ TCGReg a0, TCGReg a1, TCGReg a2)
511
+{
512
+ tcg_out_arith(s, a0, a1, a2, ARITH_OR);
513
+}
514
+
515
+static void tgen_ori(TCGContext *s, TCGType type,
516
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
517
+{
518
+ tcg_out_arithi(s, a0, a1, a2, ARITH_OR);
519
+}
520
+
521
+static const TCGOutOpBinary outop_or = {
522
+ .base.static_constraint = C_O1_I2(r, r, rJ),
523
+ .out_rrr = tgen_or,
524
+ .out_rri = tgen_ori,
525
+};
526
+
527
528
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
529
const TCGArg args[TCG_MAX_OP_ARGS],
530
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
531
OP_32_64(sub):
532
c = ARITH_SUB;
533
goto gen_arith;
534
- OP_32_64(or):
535
- c = ARITH_OR;
536
- goto gen_arith;
537
OP_32_64(orc):
538
c = ARITH_ORN;
539
goto gen_arith;
540
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
541
case INDEX_op_divu_i64:
542
case INDEX_op_sub_i32:
543
case INDEX_op_sub_i64:
544
- case INDEX_op_or_i32:
545
- case INDEX_op_or_i64:
546
case INDEX_op_orc_i32:
547
case INDEX_op_orc_i64:
548
case INDEX_op_xor_i32:
549
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
550
index XXXXXXX..XXXXXXX 100644
551
--- a/tcg/tci/tcg-target.c.inc
552
+++ b/tcg/tci/tcg-target.c.inc
553
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
554
case INDEX_op_nand_i64:
555
case INDEX_op_nor_i32:
556
case INDEX_op_nor_i64:
557
- case INDEX_op_or_i32:
558
- case INDEX_op_or_i64:
559
case INDEX_op_orc_i32:
560
case INDEX_op_orc_i64:
561
case INDEX_op_xor_i32:
562
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
563
.out_rrr = tgen_andc,
564
};
565
566
+static void tgen_or(TCGContext *s, TCGType type,
567
+ TCGReg a0, TCGReg a1, TCGReg a2)
568
+{
569
+ tcg_out_op_rrr(s, glue(INDEX_op_or_i,TCG_TARGET_REG_BITS), a0, a1, a2);
570
+}
571
+
572
+static const TCGOutOpBinary outop_or = {
573
+ .base.static_constraint = C_O1_I2(r, r, r),
574
+ .out_rrr = tgen_or,
575
+};
576
+
577
578
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
579
const TCGArg args[TCG_MAX_OP_ARGS],
580
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
581
582
CASE_32_64(sub)
583
CASE_32_64(mul)
584
- CASE_32_64(or)
585
CASE_32_64(xor)
586
CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
587
CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
588
--
589
2.43.0
590
591
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
target/sh4/translate.c | 4 ++--
6
tcg/optimize.c | 6 ++++--
7
tcg/tcg-op.c | 4 ++--
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 5 ++---
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 2 +-
12
8 files changed, 16 insertions(+), 19 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
19
DEF(add, 1, 2, 0, TCG_OPF_INT)
20
DEF(and, 1, 2, 0, TCG_OPF_INT)
21
DEF(andc, 1, 2, 0, TCG_OPF_INT)
22
+DEF(or, 1, 2, 0, TCG_OPF_INT)
23
24
DEF(setcond_i32, 1, 2, 1, 0)
25
DEF(negsetcond_i32, 1, 2, 1, 0)
26
@@ -XXX,XX +XXX,XX @@ DEF(rem_i32, 1, 2, 0, 0)
27
DEF(remu_i32, 1, 2, 0, 0)
28
DEF(div2_i32, 2, 3, 0, 0)
29
DEF(divu2_i32, 2, 3, 0, 0)
30
-DEF(or_i32, 1, 2, 0, 0)
31
DEF(xor_i32, 1, 2, 0, 0)
32
/* shifts/rotates */
33
DEF(shl_i32, 1, 2, 0, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(rem_i64, 1, 2, 0, 0)
35
DEF(remu_i64, 1, 2, 0, 0)
36
DEF(div2_i64, 2, 3, 0, 0)
37
DEF(divu2_i64, 2, 3, 0, 0)
38
-DEF(or_i64, 1, 2, 0, 0)
39
DEF(xor_i64, 1, 2, 0, 0)
40
/* shifts/rotates */
41
DEF(shl_i64, 1, 2, 0, 0)
42
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/sh4/translate.c
45
+++ b/target/sh4/translate.c
46
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
47
op_opc = INDEX_op_xor_i32;
48
goto do_reg_op;
49
case 0x200b: /* or Rm,Rn */
50
- op_opc = INDEX_op_or_i32;
51
+ op_opc = INDEX_op_or;
52
do_reg_op:
53
/* The operation register should be as expected, and the
54
other input cannot depend on the load. */
55
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
56
}
57
break;
58
59
- case INDEX_op_or_i32:
60
+ case INDEX_op_or:
61
if (op_dst != st_src) {
62
goto fail;
63
}
64
diff --git a/tcg/optimize.c b/tcg/optimize.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/optimize.c
67
+++ b/tcg/optimize.c
68
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
69
case INDEX_op_and_vec:
70
return x & y;
71
72
- CASE_OP_32_64_VEC(or):
73
+ case INDEX_op_or:
74
+ case INDEX_op_or_vec:
75
return x | y;
76
77
CASE_OP_32_64_VEC(xor):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
CASE_OP_32_64_VEC(not):
80
done = fold_not(&ctx, op);
81
break;
82
- CASE_OP_32_64_VEC(or):
83
+ case INDEX_op_or:
84
+ case INDEX_op_or_vec:
85
done = fold_or(&ctx, op);
86
break;
87
CASE_OP_32_64_VEC(orc):
88
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/tcg-op.c
91
+++ b/tcg/tcg-op.c
92
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
93
94
void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
95
{
96
- tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
97
+ tcg_gen_op3_i32(INDEX_op_or, ret, arg1, arg2);
98
}
99
100
void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
101
@@ -XXX,XX +XXX,XX @@ void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
102
void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
103
{
104
if (TCG_TARGET_REG_BITS == 64) {
105
- tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
106
+ tcg_gen_op3_i64(INDEX_op_or, ret, arg1, arg2);
107
} else {
108
tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
109
tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
110
diff --git a/tcg/tcg.c b/tcg/tcg.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/tcg.c
113
+++ b/tcg/tcg.c
114
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
115
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
116
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
117
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
118
- OUTOP(INDEX_op_or_i32, TCGOutOpBinary, outop_or),
119
- OUTOP(INDEX_op_or_i64, TCGOutOpBinary, outop_or),
120
+ OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
121
};
122
123
#undef OUTOP
124
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
125
case INDEX_op_add:
126
case INDEX_op_and:
127
case INDEX_op_mov:
128
+ case INDEX_op_or:
129
return has_type;
130
131
case INDEX_op_setcond_i32:
132
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
133
case INDEX_op_sub_i32:
134
case INDEX_op_neg_i32:
135
case INDEX_op_mul_i32:
136
- case INDEX_op_or_i32:
137
case INDEX_op_xor_i32:
138
case INDEX_op_shl_i32:
139
case INDEX_op_shr_i32:
140
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
141
case INDEX_op_sub_i64:
142
case INDEX_op_neg_i64:
143
case INDEX_op_mul_i64:
144
- case INDEX_op_or_i64:
145
case INDEX_op_xor_i64:
146
case INDEX_op_shl_i64:
147
case INDEX_op_shr_i64:
148
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
149
case INDEX_op_add:
150
case INDEX_op_and:
151
case INDEX_op_andc:
152
- case INDEX_op_or_i32:
153
- case INDEX_op_or_i64:
154
+ case INDEX_op_or:
155
{
156
const TCGOutOpBinary *out =
157
container_of(all_outop[op->opc], TCGOutOpBinary, base);
158
diff --git a/tcg/tci.c b/tcg/tci.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/tci.c
161
+++ b/tcg/tci.c
162
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
163
tci_args_rrr(insn, &r0, &r1, &r2);
164
regs[r0] = regs[r1] & regs[r2];
165
break;
166
- CASE_32_64(or)
167
+ case INDEX_op_or:
168
tci_args_rrr(insn, &r0, &r1, &r2);
169
regs[r0] = regs[r1] | regs[r2];
170
break;
171
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
172
case INDEX_op_add:
173
case INDEX_op_and:
174
case INDEX_op_andc:
175
+ case INDEX_op_or:
176
case INDEX_op_sub_i32:
177
case INDEX_op_sub_i64:
178
case INDEX_op_mul_i32:
179
case INDEX_op_mul_i64:
180
- case INDEX_op_or_i32:
181
- case INDEX_op_or_i64:
182
case INDEX_op_xor_i32:
183
case INDEX_op_xor_i64:
184
case INDEX_op_orc_i32:
185
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
186
index XXXXXXX..XXXXXXX 100644
187
--- a/docs/devel/tcg-ops.rst
188
+++ b/docs/devel/tcg-ops.rst
189
@@ -XXX,XX +XXX,XX @@ Logical
190
191
- | *t0* = *t1* & *t2*
192
193
- * - or_i32/i64 *t0*, *t1*, *t2*
194
+ * - or *t0*, *t1*, *t2*
195
196
- | *t0* = *t1* | *t2*
197
198
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
199
index XXXXXXX..XXXXXXX 100644
200
--- a/tcg/tci/tcg-target.c.inc
201
+++ b/tcg/tci/tcg-target.c.inc
202
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
203
static void tgen_or(TCGContext *s, TCGType type,
204
TCGReg a0, TCGReg a1, TCGReg a2)
205
{
206
- tcg_out_op_rrr(s, glue(INDEX_op_or_i,TCG_TARGET_REG_BITS), a0, a1, a2);
207
+ tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
208
}
209
210
static const TCGOutOpBinary outop_or = {
211
--
212
2.43.0
213
214
diff view generated by jsdifflib
Deleted patch
1
At the same time, drop all backend support for immediate
2
operands, as we now transform orc to or during optimize.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-has.h | 2 --
8
tcg/arm/tcg-target-has.h | 1 -
9
tcg/i386/tcg-target-has.h | 2 --
10
tcg/loongarch64/tcg-target-con-set.h | 1 -
11
tcg/loongarch64/tcg-target-con-str.h | 1 -
12
tcg/loongarch64/tcg-target-has.h | 2 --
13
tcg/mips/tcg-target-has.h | 2 --
14
tcg/ppc/tcg-target-has.h | 2 --
15
tcg/riscv/tcg-target-has.h | 2 --
16
tcg/s390x/tcg-target-has.h | 2 --
17
tcg/sparc64/tcg-target-has.h | 2 --
18
tcg/tcg-has.h | 1 -
19
tcg/tci/tcg-target-has.h | 2 --
20
tcg/tcg-op.c | 4 +--
21
tcg/tcg.c | 8 +++---
22
tcg/tci.c | 2 --
23
tcg/aarch64/tcg-target.c.inc | 24 ++++++++---------
24
tcg/arm/tcg-target.c.inc | 4 +++
25
tcg/i386/tcg-target.c.inc | 4 +++
26
tcg/loongarch64/tcg-target.c.inc | 40 ++++++++++------------------
27
tcg/mips/tcg-target.c.inc | 4 +++
28
tcg/ppc/tcg-target.c.inc | 22 +++++++--------
29
tcg/riscv/tcg-target.c.inc | 22 ++++++++-------
30
tcg/s390x/tcg-target.c.inc | 36 +++++++++++--------------
31
tcg/sparc64/tcg-target.c.inc | 16 +++++++----
32
tcg/tci/tcg-target.c.inc | 14 +++++++---
33
26 files changed, 104 insertions(+), 118 deletions(-)
34
35
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/aarch64/tcg-target-has.h
38
+++ b/tcg/aarch64/tcg-target-has.h
39
@@ -XXX,XX +XXX,XX @@
40
#define TCG_TARGET_HAS_bswap32_i32 1
41
#define TCG_TARGET_HAS_not_i32 1
42
#define TCG_TARGET_HAS_rot_i32 1
43
-#define TCG_TARGET_HAS_orc_i32 1
44
#define TCG_TARGET_HAS_eqv_i32 1
45
#define TCG_TARGET_HAS_nand_i32 0
46
#define TCG_TARGET_HAS_nor_i32 0
47
@@ -XXX,XX +XXX,XX @@
48
#define TCG_TARGET_HAS_bswap64_i64 1
49
#define TCG_TARGET_HAS_not_i64 1
50
#define TCG_TARGET_HAS_rot_i64 1
51
-#define TCG_TARGET_HAS_orc_i64 1
52
#define TCG_TARGET_HAS_eqv_i64 1
53
#define TCG_TARGET_HAS_nand_i64 0
54
#define TCG_TARGET_HAS_nor_i64 0
55
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/arm/tcg-target-has.h
58
+++ b/tcg/arm/tcg-target-has.h
59
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
60
#define TCG_TARGET_HAS_bswap32_i32 1
61
#define TCG_TARGET_HAS_not_i32 1
62
#define TCG_TARGET_HAS_rot_i32 1
63
-#define TCG_TARGET_HAS_orc_i32 0
64
#define TCG_TARGET_HAS_eqv_i32 0
65
#define TCG_TARGET_HAS_nand_i32 0
66
#define TCG_TARGET_HAS_nor_i32 0
67
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/i386/tcg-target-has.h
70
+++ b/tcg/i386/tcg-target-has.h
71
@@ -XXX,XX +XXX,XX @@
72
#define TCG_TARGET_HAS_bswap16_i32 1
73
#define TCG_TARGET_HAS_bswap32_i32 1
74
#define TCG_TARGET_HAS_not_i32 1
75
-#define TCG_TARGET_HAS_orc_i32 0
76
#define TCG_TARGET_HAS_eqv_i32 0
77
#define TCG_TARGET_HAS_nand_i32 0
78
#define TCG_TARGET_HAS_nor_i32 0
79
@@ -XXX,XX +XXX,XX @@
80
#define TCG_TARGET_HAS_bswap32_i64 1
81
#define TCG_TARGET_HAS_bswap64_i64 1
82
#define TCG_TARGET_HAS_not_i64 1
83
-#define TCG_TARGET_HAS_orc_i64 0
84
#define TCG_TARGET_HAS_eqv_i64 0
85
#define TCG_TARGET_HAS_nand_i64 0
86
#define TCG_TARGET_HAS_nor_i64 0
87
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/tcg/loongarch64/tcg-target-con-set.h
90
+++ b/tcg/loongarch64/tcg-target-con-set.h
91
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
92
C_O1_I1(w, r)
93
C_O1_I1(w, w)
94
C_O1_I2(r, r, r)
95
-C_O1_I2(r, r, rC)
96
C_O1_I2(r, r, ri)
97
C_O1_I2(r, r, rI)
98
C_O1_I2(r, r, rJ)
99
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/loongarch64/tcg-target-con-str.h
102
+++ b/tcg/loongarch64/tcg-target-con-str.h
103
@@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS)
104
CONST('I', TCG_CT_CONST_S12)
105
CONST('J', TCG_CT_CONST_S32)
106
CONST('U', TCG_CT_CONST_U12)
107
-CONST('C', TCG_CT_CONST_C12)
108
CONST('W', TCG_CT_CONST_WSZ)
109
CONST('M', TCG_CT_CONST_VCMP)
110
CONST('A', TCG_CT_CONST_VADD)
111
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/loongarch64/tcg-target-has.h
114
+++ b/tcg/loongarch64/tcg-target-has.h
115
@@ -XXX,XX +XXX,XX @@
116
#define TCG_TARGET_HAS_bswap16_i32 1
117
#define TCG_TARGET_HAS_bswap32_i32 1
118
#define TCG_TARGET_HAS_not_i32 1
119
-#define TCG_TARGET_HAS_orc_i32 1
120
#define TCG_TARGET_HAS_eqv_i32 0
121
#define TCG_TARGET_HAS_nand_i32 0
122
#define TCG_TARGET_HAS_nor_i32 1
123
@@ -XXX,XX +XXX,XX @@
124
#define TCG_TARGET_HAS_bswap32_i64 1
125
#define TCG_TARGET_HAS_bswap64_i64 1
126
#define TCG_TARGET_HAS_not_i64 1
127
-#define TCG_TARGET_HAS_orc_i64 1
128
#define TCG_TARGET_HAS_eqv_i64 0
129
#define TCG_TARGET_HAS_nand_i64 0
130
#define TCG_TARGET_HAS_nor_i64 1
131
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
132
index XXXXXXX..XXXXXXX 100644
133
--- a/tcg/mips/tcg-target-has.h
134
+++ b/tcg/mips/tcg-target-has.h
135
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
136
#define TCG_TARGET_HAS_rem_i32 1
137
#define TCG_TARGET_HAS_not_i32 1
138
#define TCG_TARGET_HAS_nor_i32 1
139
-#define TCG_TARGET_HAS_orc_i32 0
140
#define TCG_TARGET_HAS_eqv_i32 0
141
#define TCG_TARGET_HAS_nand_i32 0
142
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
143
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
144
#define TCG_TARGET_HAS_rem_i64 1
145
#define TCG_TARGET_HAS_not_i64 1
146
#define TCG_TARGET_HAS_nor_i64 1
147
-#define TCG_TARGET_HAS_orc_i64 0
148
#define TCG_TARGET_HAS_eqv_i64 0
149
#define TCG_TARGET_HAS_nand_i64 0
150
#define TCG_TARGET_HAS_add2_i64 0
151
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/ppc/tcg-target-has.h
154
+++ b/tcg/ppc/tcg-target-has.h
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_bswap16_i32 1
157
#define TCG_TARGET_HAS_bswap32_i32 1
158
#define TCG_TARGET_HAS_not_i32 1
159
-#define TCG_TARGET_HAS_orc_i32 1
160
#define TCG_TARGET_HAS_eqv_i32 1
161
#define TCG_TARGET_HAS_nand_i32 1
162
#define TCG_TARGET_HAS_nor_i32 1
163
@@ -XXX,XX +XXX,XX @@
164
#define TCG_TARGET_HAS_bswap32_i64 1
165
#define TCG_TARGET_HAS_bswap64_i64 1
166
#define TCG_TARGET_HAS_not_i64 1
167
-#define TCG_TARGET_HAS_orc_i64 1
168
#define TCG_TARGET_HAS_eqv_i64 1
169
#define TCG_TARGET_HAS_nand_i64 1
170
#define TCG_TARGET_HAS_nor_i64 1
171
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
172
index XXXXXXX..XXXXXXX 100644
173
--- a/tcg/riscv/tcg-target-has.h
174
+++ b/tcg/riscv/tcg-target-has.h
175
@@ -XXX,XX +XXX,XX @@
176
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
177
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
178
#define TCG_TARGET_HAS_not_i32 1
179
-#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB)
180
#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
181
#define TCG_TARGET_HAS_nand_i32 0
182
#define TCG_TARGET_HAS_nor_i32 0
183
@@ -XXX,XX +XXX,XX @@
184
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
185
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
186
#define TCG_TARGET_HAS_not_i64 1
187
-#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB)
188
#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
189
#define TCG_TARGET_HAS_nand_i64 0
190
#define TCG_TARGET_HAS_nor_i64 0
191
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/s390x/tcg-target-has.h
194
+++ b/tcg/s390x/tcg-target-has.h
195
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
196
#define TCG_TARGET_HAS_bswap16_i32 1
197
#define TCG_TARGET_HAS_bswap32_i32 1
198
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
199
-#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
200
#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
201
#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
202
#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
203
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
204
#define TCG_TARGET_HAS_bswap32_i64 1
205
#define TCG_TARGET_HAS_bswap64_i64 1
206
#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
207
-#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
208
#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
209
#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
210
#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
211
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/sparc64/tcg-target-has.h
214
+++ b/tcg/sparc64/tcg-target-has.h
215
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
216
#define TCG_TARGET_HAS_bswap16_i32 0
217
#define TCG_TARGET_HAS_bswap32_i32 0
218
#define TCG_TARGET_HAS_not_i32 1
219
-#define TCG_TARGET_HAS_orc_i32 1
220
#define TCG_TARGET_HAS_eqv_i32 0
221
#define TCG_TARGET_HAS_nand_i32 0
222
#define TCG_TARGET_HAS_nor_i32 0
223
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
224
#define TCG_TARGET_HAS_bswap32_i64 0
225
#define TCG_TARGET_HAS_bswap64_i64 0
226
#define TCG_TARGET_HAS_not_i64 1
227
-#define TCG_TARGET_HAS_orc_i64 1
228
#define TCG_TARGET_HAS_eqv_i64 0
229
#define TCG_TARGET_HAS_nand_i64 0
230
#define TCG_TARGET_HAS_nor_i64 0
231
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
232
index XXXXXXX..XXXXXXX 100644
233
--- a/tcg/tcg-has.h
234
+++ b/tcg/tcg-has.h
235
@@ -XXX,XX +XXX,XX @@
236
#define TCG_TARGET_HAS_bswap32_i64 0
237
#define TCG_TARGET_HAS_bswap64_i64 0
238
#define TCG_TARGET_HAS_not_i64 0
239
-#define TCG_TARGET_HAS_orc_i64 0
240
#define TCG_TARGET_HAS_eqv_i64 0
241
#define TCG_TARGET_HAS_nand_i64 0
242
#define TCG_TARGET_HAS_nor_i64 0
243
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
244
index XXXXXXX..XXXXXXX 100644
245
--- a/tcg/tci/tcg-target-has.h
246
+++ b/tcg/tci/tcg-target-has.h
247
@@ -XXX,XX +XXX,XX @@
248
#define TCG_TARGET_HAS_ctz_i32 1
249
#define TCG_TARGET_HAS_ctpop_i32 1
250
#define TCG_TARGET_HAS_not_i32 1
251
-#define TCG_TARGET_HAS_orc_i32 1
252
#define TCG_TARGET_HAS_rot_i32 1
253
#define TCG_TARGET_HAS_negsetcond_i32 0
254
#define TCG_TARGET_HAS_muls2_i32 1
255
@@ -XXX,XX +XXX,XX @@
256
#define TCG_TARGET_HAS_ctz_i64 1
257
#define TCG_TARGET_HAS_ctpop_i64 1
258
#define TCG_TARGET_HAS_not_i64 1
259
-#define TCG_TARGET_HAS_orc_i64 1
260
#define TCG_TARGET_HAS_rot_i64 1
261
#define TCG_TARGET_HAS_negsetcond_i64 0
262
#define TCG_TARGET_HAS_muls2_i64 1
263
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
264
index XXXXXXX..XXXXXXX 100644
265
--- a/tcg/tcg-op.c
266
+++ b/tcg/tcg-op.c
267
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
268
269
void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
270
{
271
- if (TCG_TARGET_HAS_orc_i32) {
272
+ if (tcg_op_supported(INDEX_op_orc_i32, TCG_TYPE_I32, 0)) {
273
tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
274
} else {
275
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
276
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
277
if (TCG_TARGET_REG_BITS == 32) {
278
tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
279
tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
280
- } else if (TCG_TARGET_HAS_orc_i64) {
281
+ } else if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
282
tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
283
} else {
284
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
285
diff --git a/tcg/tcg.c b/tcg/tcg.c
286
index XXXXXXX..XXXXXXX 100644
287
--- a/tcg/tcg.c
288
+++ b/tcg/tcg.c
289
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
290
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
291
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
292
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
293
+ OUTOP(INDEX_op_orc_i32, TCGOutOpBinary, outop_orc),
294
+ OUTOP(INDEX_op_orc_i64, TCGOutOpBinary, outop_orc),
295
};
296
297
#undef OUTOP
298
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
299
return TCG_TARGET_HAS_bswap32_i32;
300
case INDEX_op_not_i32:
301
return TCG_TARGET_HAS_not_i32;
302
- case INDEX_op_orc_i32:
303
- return TCG_TARGET_HAS_orc_i32;
304
case INDEX_op_eqv_i32:
305
return TCG_TARGET_HAS_eqv_i32;
306
case INDEX_op_nand_i32:
307
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
308
return TCG_TARGET_HAS_bswap64_i64;
309
case INDEX_op_not_i64:
310
return TCG_TARGET_HAS_not_i64;
311
- case INDEX_op_orc_i64:
312
- return TCG_TARGET_HAS_orc_i64;
313
case INDEX_op_eqv_i64:
314
return TCG_TARGET_HAS_eqv_i64;
315
case INDEX_op_nand_i64:
316
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
317
case INDEX_op_and:
318
case INDEX_op_andc:
319
case INDEX_op_or:
320
+ case INDEX_op_orc_i32:
321
+ case INDEX_op_orc_i64:
322
{
323
const TCGOutOpBinary *out =
324
container_of(all_outop[op->opc], TCGOutOpBinary, base);
325
diff --git a/tcg/tci.c b/tcg/tci.c
326
index XXXXXXX..XXXXXXX 100644
327
--- a/tcg/tci.c
328
+++ b/tcg/tci.c
329
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
330
tci_args_rrr(insn, &r0, &r1, &r2);
331
regs[r0] = regs[r1] & ~regs[r2];
332
break;
333
-#if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
334
CASE_32_64(orc)
335
tci_args_rrr(insn, &r0, &r1, &r2);
336
regs[r0] = regs[r1] | ~regs[r2];
337
break;
338
-#endif
339
#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
340
CASE_32_64(eqv)
341
tci_args_rrr(insn, &r0, &r1, &r2);
342
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
343
index XXXXXXX..XXXXXXX 100644
344
--- a/tcg/aarch64/tcg-target.c.inc
345
+++ b/tcg/aarch64/tcg-target.c.inc
346
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
347
.out_rri = tgen_ori,
348
};
349
350
+static void tgen_orc(TCGContext *s, TCGType type,
351
+ TCGReg a0, TCGReg a1, TCGReg a2)
352
+{
353
+ tcg_out_insn(s, 3510, ORN, type, a0, a1, a2);
354
+}
355
+
356
+static const TCGOutOpBinary outop_orc = {
357
+ .base.static_constraint = C_O1_I2(r, r, r),
358
+ .out_rrr = tgen_orc,
359
+};
360
+
361
362
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
363
const TCGArg args[TCG_MAX_OP_ARGS],
364
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
365
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
366
break;
367
368
- case INDEX_op_orc_i32:
369
- a2 = (int32_t)a2;
370
- /* FALLTHRU */
371
- case INDEX_op_orc_i64:
372
- if (c2) {
373
- tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
374
- } else {
375
- tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
376
- }
377
- break;
378
-
379
case INDEX_op_xor_i32:
380
a2 = (int32_t)a2;
381
/* FALLTHRU */
382
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
383
384
case INDEX_op_xor_i32:
385
case INDEX_op_xor_i64:
386
- case INDEX_op_orc_i32:
387
- case INDEX_op_orc_i64:
388
case INDEX_op_eqv_i32:
389
case INDEX_op_eqv_i64:
390
return C_O1_I2(r, r, rL);
391
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
392
index XXXXXXX..XXXXXXX 100644
393
--- a/tcg/arm/tcg-target.c.inc
394
+++ b/tcg/arm/tcg-target.c.inc
395
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
396
.out_rri = tgen_ori,
397
};
398
399
+static const TCGOutOpBinary outop_orc = {
400
+ .base.static_constraint = C_NotImplemented,
401
+};
402
+
403
404
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
405
const TCGArg args[TCG_MAX_OP_ARGS],
406
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
407
index XXXXXXX..XXXXXXX 100644
408
--- a/tcg/i386/tcg-target.c.inc
409
+++ b/tcg/i386/tcg-target.c.inc
410
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
411
.out_rri = tgen_ori,
412
};
413
414
+static const TCGOutOpBinary outop_orc = {
415
+ .base.static_constraint = C_NotImplemented,
416
+};
417
+
418
419
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
420
const TCGArg args[TCG_MAX_OP_ARGS],
421
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
422
index XXXXXXX..XXXXXXX 100644
423
--- a/tcg/loongarch64/tcg-target.c.inc
424
+++ b/tcg/loongarch64/tcg-target.c.inc
425
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
426
#define TCG_CT_CONST_S12 0x100
427
#define TCG_CT_CONST_S32 0x200
428
#define TCG_CT_CONST_U12 0x400
429
-#define TCG_CT_CONST_C12 0x800
430
-#define TCG_CT_CONST_WSZ 0x1000
431
-#define TCG_CT_CONST_VCMP 0x2000
432
-#define TCG_CT_CONST_VADD 0x4000
433
+#define TCG_CT_CONST_WSZ 0x800
434
+#define TCG_CT_CONST_VCMP 0x1000
435
+#define TCG_CT_CONST_VADD 0x2000
436
437
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
438
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
439
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
440
if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
441
return true;
442
}
443
- if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
444
- return true;
445
- }
446
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
447
return true;
448
}
449
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
450
.out_rri = tgen_ori,
451
};
452
453
+static void tgen_orc(TCGContext *s, TCGType type,
454
+ TCGReg a0, TCGReg a1, TCGReg a2)
455
+{
456
+ tcg_out_opc_orn(s, a0, a1, a2);
457
+}
458
+
459
+static const TCGOutOpBinary outop_orc = {
460
+ .base.static_constraint = C_O1_I2(r, r, r),
461
+ .out_rrr = tgen_orc,
462
+};
463
+
464
465
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
466
const TCGArg args[TCG_MAX_OP_ARGS],
467
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
468
}
469
break;
470
471
- case INDEX_op_orc_i32:
472
- case INDEX_op_orc_i64:
473
- if (c2) {
474
- /* guaranteed to fit due to constraint */
475
- tcg_out_opc_ori(s, a0, a1, ~a2);
476
- } else {
477
- tcg_out_opc_orn(s, a0, a1, a2);
478
- }
479
- break;
480
-
481
case INDEX_op_xor_i32:
482
case INDEX_op_xor_i64:
483
if (c2) {
484
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
485
case INDEX_op_qemu_ld_i64:
486
return C_O1_I1(r, r);
487
488
- case INDEX_op_orc_i32:
489
- case INDEX_op_orc_i64:
490
- /*
491
- * LoongArch insns for these ops don't have reg-imm forms, but we
492
- * can express using andi/ori if ~constant satisfies
493
- * TCG_CT_CONST_U12.
494
- */
495
- return C_O1_I2(r, r, rC);
496
-
497
case INDEX_op_shl_i32:
498
case INDEX_op_shl_i64:
499
case INDEX_op_shr_i32:
500
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
501
index XXXXXXX..XXXXXXX 100644
502
--- a/tcg/mips/tcg-target.c.inc
503
+++ b/tcg/mips/tcg-target.c.inc
504
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
505
.out_rri = tgen_ori,
506
};
507
508
+static const TCGOutOpBinary outop_orc = {
509
+ .base.static_constraint = C_NotImplemented,
510
+};
511
+
512
513
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
514
const TCGArg args[TCG_MAX_OP_ARGS],
515
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
516
index XXXXXXX..XXXXXXX 100644
517
--- a/tcg/ppc/tcg-target.c.inc
518
+++ b/tcg/ppc/tcg-target.c.inc
519
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
520
.out_rri = tgen_ori,
521
};
522
523
+static void tgen_orc(TCGContext *s, TCGType type,
524
+ TCGReg a0, TCGReg a1, TCGReg a2)
525
+{
526
+ tcg_out32(s, ORC | SAB(a1, a0, a2));
527
+}
528
+
529
+static const TCGOutOpBinary outop_orc = {
530
+ .base.static_constraint = C_O1_I2(r, r, r),
531
+ .out_rrr = tgen_orc,
532
+};
533
+
534
535
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
536
const TCGArg args[TCG_MAX_OP_ARGS],
537
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
538
tcg_out32(s, XOR | SAB(a1, a0, a2));
539
}
540
break;
541
- case INDEX_op_orc_i32:
542
- if (const_args[2]) {
543
- tcg_out_ori32(s, args[0], args[1], ~args[2]);
544
- break;
545
- }
546
- /* FALLTHRU */
547
- case INDEX_op_orc_i64:
548
- tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
549
- break;
550
case INDEX_op_eqv_i32:
551
if (const_args[2]) {
552
tcg_out_xori32(s, args[0], args[1], ~args[2]);
553
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
554
return C_O0_I2(r, r);
555
556
case INDEX_op_xor_i32:
557
- case INDEX_op_orc_i32:
558
case INDEX_op_eqv_i32:
559
case INDEX_op_shl_i32:
560
case INDEX_op_shr_i32:
561
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
562
case INDEX_op_nor_i32:
563
case INDEX_op_muluh_i32:
564
case INDEX_op_mulsh_i32:
565
- case INDEX_op_orc_i64:
566
case INDEX_op_eqv_i64:
567
case INDEX_op_nand_i64:
568
case INDEX_op_nor_i64:
569
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
570
index XXXXXXX..XXXXXXX 100644
571
--- a/tcg/riscv/tcg-target.c.inc
572
+++ b/tcg/riscv/tcg-target.c.inc
573
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
574
.out_rri = tgen_ori,
575
};
576
577
+static void tgen_orc(TCGContext *s, TCGType type,
578
+ TCGReg a0, TCGReg a1, TCGReg a2)
579
+{
580
+ tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
581
+}
582
+
583
+static const TCGOutOpBinary outop_orc = {
584
+ .base.static_constraint = C_Dynamic,
585
+ .base.dynamic_constraint = cset_zbb_rrr,
586
+ .out_rrr = tgen_orc,
587
+};
588
+
589
590
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
591
const TCGArg args[TCG_MAX_OP_ARGS],
592
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
593
}
594
break;
595
596
- case INDEX_op_orc_i32:
597
- case INDEX_op_orc_i64:
598
- if (c2) {
599
- tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
600
- } else {
601
- tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
602
- }
603
- break;
604
case INDEX_op_eqv_i32:
605
case INDEX_op_eqv_i64:
606
if (c2) {
607
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
608
case INDEX_op_negsetcond_i64:
609
return C_O1_I2(r, r, rI);
610
611
- case INDEX_op_orc_i32:
612
- case INDEX_op_orc_i64:
613
case INDEX_op_eqv_i32:
614
case INDEX_op_eqv_i64:
615
return C_O1_I2(r, r, rJ);
616
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
617
index XXXXXXX..XXXXXXX 100644
618
--- a/tcg/s390x/tcg-target.c.inc
619
+++ b/tcg/s390x/tcg-target.c.inc
620
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
621
.out_rri = tgen_ori_3,
622
};
623
624
+static void tgen_orc(TCGContext *s, TCGType type,
625
+ TCGReg a0, TCGReg a1, TCGReg a2)
626
+{
627
+ if (type == TCG_TYPE_I32) {
628
+ tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
629
+ } else {
630
+ tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
631
+ }
632
+}
633
+
634
+static const TCGOutOpBinary outop_orc = {
635
+ .base.static_constraint = C_Dynamic,
636
+ .base.dynamic_constraint = cset_misc3_rrr,
637
+ .out_rrr = tgen_orc,
638
+};
639
+
640
641
# define OP_32_64(x) \
642
case glue(glue(INDEX_op_,x),_i32): \
643
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
644
}
645
break;
646
647
- case INDEX_op_orc_i32:
648
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
649
- if (const_args[2]) {
650
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
651
- tgen_ori(s, a0, (uint32_t)~a2);
652
- } else {
653
- tcg_out_insn(s, RRFa, OCRK, a0, a1, a2);
654
- }
655
- break;
656
case INDEX_op_eqv_i32:
657
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
658
if (const_args[2]) {
659
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
660
}
661
break;
662
663
- case INDEX_op_orc_i64:
664
- a0 = args[0], a1 = args[1], a2 = args[2];
665
- if (const_args[2]) {
666
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
667
- tgen_ori(s, a0, ~a2);
668
- } else {
669
- tcg_out_insn(s, RRFa, OCGRK, a0, a1, a2);
670
- }
671
- break;
672
case INDEX_op_eqv_i64:
673
a0 = args[0], a1 = args[1], a2 = args[2];
674
if (const_args[2]) {
675
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
676
case INDEX_op_xor_i64:
677
return C_O1_I2(r, r, rK);
678
679
- case INDEX_op_orc_i32:
680
case INDEX_op_eqv_i32:
681
return C_O1_I2(r, r, ri);
682
- case INDEX_op_orc_i64:
683
case INDEX_op_eqv_i64:
684
return C_O1_I2(r, r, rNK);
685
686
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
687
index XXXXXXX..XXXXXXX 100644
688
--- a/tcg/sparc64/tcg-target.c.inc
689
+++ b/tcg/sparc64/tcg-target.c.inc
690
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
691
.out_rri = tgen_ori,
692
};
693
694
+static void tgen_orc(TCGContext *s, TCGType type,
695
+ TCGReg a0, TCGReg a1, TCGReg a2)
696
+{
697
+ tcg_out_arith(s, a0, a1, a2, ARITH_ORN);
698
+}
699
+
700
+static const TCGOutOpBinary outop_orc = {
701
+ .base.static_constraint = C_O1_I2(r, r, r),
702
+ .out_rrr = tgen_orc,
703
+};
704
+
705
706
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
707
const TCGArg args[TCG_MAX_OP_ARGS],
708
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
709
OP_32_64(sub):
710
c = ARITH_SUB;
711
goto gen_arith;
712
- OP_32_64(orc):
713
- c = ARITH_ORN;
714
- goto gen_arith;
715
OP_32_64(xor):
716
c = ARITH_XOR;
717
goto gen_arith;
718
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
719
case INDEX_op_divu_i64:
720
case INDEX_op_sub_i32:
721
case INDEX_op_sub_i64:
722
- case INDEX_op_orc_i32:
723
- case INDEX_op_orc_i64:
724
case INDEX_op_xor_i32:
725
case INDEX_op_xor_i64:
726
case INDEX_op_shl_i32:
727
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
728
index XXXXXXX..XXXXXXX 100644
729
--- a/tcg/tci/tcg-target.c.inc
730
+++ b/tcg/tci/tcg-target.c.inc
731
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
732
case INDEX_op_nand_i64:
733
case INDEX_op_nor_i32:
734
case INDEX_op_nor_i64:
735
- case INDEX_op_orc_i32:
736
- case INDEX_op_orc_i64:
737
case INDEX_op_xor_i32:
738
case INDEX_op_xor_i64:
739
case INDEX_op_shl_i32:
740
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
741
.out_rrr = tgen_or,
742
};
743
744
+static void tgen_orc(TCGContext *s, TCGType type,
745
+ TCGReg a0, TCGReg a1, TCGReg a2)
746
+{
747
+ tcg_out_op_rrr(s, glue(INDEX_op_orc_i,TCG_TARGET_REG_BITS), a0, a1, a2);
748
+}
749
+
750
+static const TCGOutOpBinary outop_orc = {
751
+ .base.static_constraint = C_O1_I2(r, r, r),
752
+ .out_rrr = tgen_orc,
753
+};
754
+
755
756
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
757
const TCGArg args[TCG_MAX_OP_ARGS],
758
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
759
CASE_32_64(sub)
760
CASE_32_64(mul)
761
CASE_32_64(xor)
762
- CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
763
CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
764
CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
765
CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
766
--
767
2.43.0
768
769
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
target/arm/tcg/translate-sve.c | 2 +-
6
target/tricore/translate.c | 2 +-
7
tcg/optimize.c | 6 ++++--
8
tcg/tcg-op.c | 8 ++++----
9
tcg/tcg.c | 6 ++----
10
tcg/tci.c | 5 ++---
11
docs/devel/tcg-ops.rst | 2 +-
12
tcg/tci/tcg-target.c.inc | 2 +-
13
9 files changed, 17 insertions(+), 19 deletions(-)
14
1
15
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg-opc.h
18
+++ b/include/tcg/tcg-opc.h
19
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
20
DEF(and, 1, 2, 0, TCG_OPF_INT)
21
DEF(andc, 1, 2, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
+DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
25
DEF(setcond_i32, 1, 2, 1, 0)
26
DEF(negsetcond_i32, 1, 2, 1, 0)
27
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
DEF(not_i32, 1, 1, 0, 0)
30
DEF(neg_i32, 1, 1, 0, 0)
31
-DEF(orc_i32, 1, 2, 0, 0)
32
DEF(eqv_i32, 1, 2, 0, 0)
33
DEF(nand_i32, 1, 2, 0, 0)
34
DEF(nor_i32, 1, 2, 0, 0)
35
@@ -XXX,XX +XXX,XX @@ DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
DEF(not_i64, 1, 1, 0, 0)
38
DEF(neg_i64, 1, 1, 0, 0)
39
-DEF(orc_i64, 1, 2, 0, 0)
40
DEF(eqv_i64, 1, 2, 0, 0)
41
DEF(nand_i64, 1, 2, 0, 0)
42
DEF(nor_i64, 1, 2, 0, 0)
43
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/arm/tcg/translate-sve.c
46
+++ b/target/arm/tcg/translate-sve.c
47
@@ -XXX,XX +XXX,XX @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
48
* = | ~(m | k)
49
*/
50
tcg_gen_and_i64(n, n, k);
51
- if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
52
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
53
tcg_gen_or_i64(m, m, k);
54
tcg_gen_orc_i64(d, n, m);
55
} else {
56
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/tricore/translate.c
59
+++ b/target/tricore/translate.c
60
@@ -XXX,XX +XXX,XX @@ static void decode_bit_orand(DisasContext *ctx)
61
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
62
break;
63
case OPC2_32_BIT_OR_NOR_T:
64
- if (tcg_op_supported(INDEX_op_orc_i32, TCG_TYPE_I32, 0)) {
65
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) {
66
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
67
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
68
} else {
69
diff --git a/tcg/optimize.c b/tcg/optimize.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/optimize.c
72
+++ b/tcg/optimize.c
73
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
74
case INDEX_op_andc_vec:
75
return x & ~y;
76
77
- CASE_OP_32_64_VEC(orc):
78
+ case INDEX_op_orc:
79
+ case INDEX_op_orc_vec:
80
return x | ~y;
81
82
CASE_OP_32_64_VEC(eqv):
83
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
84
case INDEX_op_or_vec:
85
done = fold_or(&ctx, op);
86
break;
87
- CASE_OP_32_64_VEC(orc):
88
+ case INDEX_op_orc:
89
+ case INDEX_op_orc_vec:
90
done = fold_orc(&ctx, op);
91
break;
92
case INDEX_op_qemu_ld_i32:
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
98
99
void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
100
{
101
- if (tcg_op_supported(INDEX_op_orc_i32, TCG_TYPE_I32, 0)) {
102
- tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2);
103
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I32, 0)) {
104
+ tcg_gen_op3_i32(INDEX_op_orc, ret, arg1, arg2);
105
} else {
106
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
107
tcg_gen_not_i32(t0, arg2);
108
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
109
if (TCG_TARGET_REG_BITS == 32) {
110
tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
111
tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
112
- } else if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
113
- tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2);
114
+ } else if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
115
+ tcg_gen_op3_i64(INDEX_op_orc, ret, arg1, arg2);
116
} else {
117
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
118
tcg_gen_not_i64(t0, arg2);
119
diff --git a/tcg/tcg.c b/tcg/tcg.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/tcg.c
122
+++ b/tcg/tcg.c
123
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
124
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
125
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
126
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
127
- OUTOP(INDEX_op_orc_i32, TCGOutOpBinary, outop_orc),
128
- OUTOP(INDEX_op_orc_i64, TCGOutOpBinary, outop_orc),
129
+ OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
130
};
131
132
#undef OUTOP
133
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
134
case INDEX_op_and:
135
case INDEX_op_andc:
136
case INDEX_op_or:
137
- case INDEX_op_orc_i32:
138
- case INDEX_op_orc_i64:
139
+ case INDEX_op_orc:
140
{
141
const TCGOutOpBinary *out =
142
container_of(all_outop[op->opc], TCGOutOpBinary, base);
143
diff --git a/tcg/tci.c b/tcg/tci.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/tci.c
146
+++ b/tcg/tci.c
147
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
148
tci_args_rrr(insn, &r0, &r1, &r2);
149
regs[r0] = regs[r1] & ~regs[r2];
150
break;
151
- CASE_32_64(orc)
152
+ case INDEX_op_orc:
153
tci_args_rrr(insn, &r0, &r1, &r2);
154
regs[r0] = regs[r1] | ~regs[r2];
155
break;
156
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
157
case INDEX_op_and:
158
case INDEX_op_andc:
159
case INDEX_op_or:
160
+ case INDEX_op_orc:
161
case INDEX_op_sub_i32:
162
case INDEX_op_sub_i64:
163
case INDEX_op_mul_i32:
164
case INDEX_op_mul_i64:
165
case INDEX_op_xor_i32:
166
case INDEX_op_xor_i64:
167
- case INDEX_op_orc_i32:
168
- case INDEX_op_orc_i64:
169
case INDEX_op_eqv_i32:
170
case INDEX_op_eqv_i64:
171
case INDEX_op_nand_i32:
172
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
173
index XXXXXXX..XXXXXXX 100644
174
--- a/docs/devel/tcg-ops.rst
175
+++ b/docs/devel/tcg-ops.rst
176
@@ -XXX,XX +XXX,XX @@ Logical
177
178
- | *t0* = ~(*t1* | *t2*)
179
180
- * - orc_i32/i64 *t0*, *t1*, *t2*
181
+ * - orc *t0*, *t1*, *t2*
182
183
- | *t0* = *t1* | ~\ *t2*
184
185
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
186
index XXXXXXX..XXXXXXX 100644
187
--- a/tcg/tci/tcg-target.c.inc
188
+++ b/tcg/tci/tcg-target.c.inc
189
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_or = {
190
static void tgen_orc(TCGContext *s, TCGType type,
191
TCGReg a0, TCGReg a1, TCGReg a2)
192
{
193
- tcg_out_op_rrr(s, glue(INDEX_op_orc_i,TCG_TARGET_REG_BITS), a0, a1, a2);
194
+ tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
195
}
196
197
static const TCGOutOpBinary outop_orc = {
198
--
199
2.43.0
200
201
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 4 +++
5
tcg/aarch64/tcg-target.c.inc | 31 +++++++++++---------
6
tcg/arm/tcg-target.c.inc | 25 +++++++++++-----
7
tcg/i386/tcg-target.c.inc | 27 ++++++++++++-----
8
tcg/loongarch64/tcg-target.c.inc | 29 +++++++++++-------
9
tcg/mips/tcg-target.c.inc | 28 +++++++++++-------
10
tcg/ppc/tcg-target.c.inc | 30 +++++++++++--------
11
tcg/riscv/tcg-target.c.inc | 29 +++++++++++-------
12
tcg/s390x/tcg-target.c.inc | 50 ++++++++++++++++----------------
13
tcg/sparc64/tcg-target.c.inc | 23 +++++++++++----
14
tcg/tci/tcg-target.c.inc | 14 +++++++--
15
11 files changed, 186 insertions(+), 104 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
23
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
24
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
25
+ OUTOP(INDEX_op_xor_i32, TCGOutOpBinary, outop_xor),
26
+ OUTOP(INDEX_op_xor_i64, TCGOutOpBinary, outop_xor),
27
};
28
29
#undef OUTOP
30
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
31
case INDEX_op_andc:
32
case INDEX_op_or:
33
case INDEX_op_orc:
34
+ case INDEX_op_xor_i32:
35
+ case INDEX_op_xor_i64:
36
{
37
const TCGOutOpBinary *out =
38
container_of(all_outop[op->opc], TCGOutOpBinary, base);
39
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/aarch64/tcg-target.c.inc
42
+++ b/tcg/aarch64/tcg-target.c.inc
43
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
44
.out_rrr = tgen_orc,
45
};
46
47
+static void tgen_xor(TCGContext *s, TCGType type,
48
+ TCGReg a0, TCGReg a1, TCGReg a2)
49
+{
50
+ tcg_out_insn(s, 3510, EOR, type, a0, a1, a2);
51
+}
52
+
53
+static void tgen_xori(TCGContext *s, TCGType type,
54
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
55
+{
56
+ tcg_out_logicali(s, I3404_EORI, type, a0, a1, a2);
57
+}
58
+
59
+static const TCGOutOpBinary outop_xor = {
60
+ .base.static_constraint = C_O1_I2(r, r, rL),
61
+ .out_rrr = tgen_xor,
62
+ .out_rri = tgen_xori,
63
+};
64
+
65
66
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
67
const TCGArg args[TCG_MAX_OP_ARGS],
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
69
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
70
break;
71
72
- case INDEX_op_xor_i32:
73
- a2 = (int32_t)a2;
74
- /* FALLTHRU */
75
- case INDEX_op_xor_i64:
76
- if (c2) {
77
- tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
78
- } else {
79
- tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
80
- }
81
- break;
82
-
83
case INDEX_op_eqv_i32:
84
a2 = (int32_t)a2;
85
/* FALLTHRU */
86
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
87
case INDEX_op_mulsh_i64:
88
return C_O1_I2(r, r, r);
89
90
- case INDEX_op_xor_i32:
91
- case INDEX_op_xor_i64:
92
case INDEX_op_eqv_i32:
93
case INDEX_op_eqv_i64:
94
return C_O1_I2(r, r, rL);
95
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/arm/tcg-target.c.inc
98
+++ b/tcg/arm/tcg-target.c.inc
99
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
100
.base.static_constraint = C_NotImplemented,
101
};
102
103
+static void tgen_xor(TCGContext *s, TCGType type,
104
+ TCGReg a0, TCGReg a1, TCGReg a2)
105
+{
106
+ tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0));
107
+}
108
+
109
+static void tgen_xori(TCGContext *s, TCGType type,
110
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
111
+{
112
+ tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2));
113
+}
114
+
115
+static const TCGOutOpBinary outop_xor = {
116
+ .base.static_constraint = C_O1_I2(r, r, rI),
117
+ .out_rrr = tgen_xor,
118
+ .out_rri = tgen_xori,
119
+};
120
+
121
122
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
123
const TCGArg args[TCG_MAX_OP_ARGS],
124
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
125
args[0], args[1], args[2], const_args[2]);
126
}
127
break;
128
- case INDEX_op_xor_i32:
129
- c = ARITH_EOR;
130
- tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
131
- break;
132
case INDEX_op_add2_i32:
133
a0 = args[0], a1 = args[1], a2 = args[2];
134
a3 = args[3], a4 = args[4], a5 = args[5];
135
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
136
case INDEX_op_muls2_i32:
137
return C_O2_I2(r, r, r, r);
138
139
- case INDEX_op_xor_i32:
140
- return C_O1_I2(r, r, rI);
141
-
142
case INDEX_op_shl_i32:
143
case INDEX_op_shr_i32:
144
case INDEX_op_sar_i32:
145
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/i386/tcg-target.c.inc
148
+++ b/tcg/i386/tcg-target.c.inc
149
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
150
.base.static_constraint = C_NotImplemented,
151
};
152
153
+static void tgen_xor(TCGContext *s, TCGType type,
154
+ TCGReg a0, TCGReg a1, TCGReg a2)
155
+{
156
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
157
+ tgen_arithr(s, ARITH_XOR + rexw, a0, a2);
158
+}
159
+
160
+static void tgen_xori(TCGContext *s, TCGType type,
161
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
162
+{
163
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
164
+ tgen_arithi(s, ARITH_XOR + rexw, a0, a2, false);
165
+}
166
+
167
+static const TCGOutOpBinary outop_xor = {
168
+ .base.static_constraint = C_O1_I2(r, 0, re),
169
+ .out_rrr = tgen_xor,
170
+ .out_rri = tgen_xori,
171
+};
172
+
173
174
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
175
const TCGArg args[TCG_MAX_OP_ARGS],
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
177
178
OP_32_64(sub):
179
c = ARITH_SUB;
180
- goto gen_arith;
181
- OP_32_64(xor):
182
- c = ARITH_XOR;
183
- goto gen_arith;
184
- gen_arith:
185
if (const_a2) {
186
tgen_arithi(s, c + rexw, a0, a2, 0);
187
} else {
188
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
189
case INDEX_op_sub_i64:
190
case INDEX_op_mul_i32:
191
case INDEX_op_mul_i64:
192
- case INDEX_op_xor_i32:
193
- case INDEX_op_xor_i64:
194
return C_O1_I2(r, 0, re);
195
196
case INDEX_op_shl_i32:
197
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
198
index XXXXXXX..XXXXXXX 100644
199
--- a/tcg/loongarch64/tcg-target.c.inc
200
+++ b/tcg/loongarch64/tcg-target.c.inc
201
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
202
.out_rrr = tgen_orc,
203
};
204
205
+static void tgen_xor(TCGContext *s, TCGType type,
206
+ TCGReg a0, TCGReg a1, TCGReg a2)
207
+{
208
+ tcg_out_opc_xor(s, a0, a1, a2);
209
+}
210
+
211
+static void tgen_xori(TCGContext *s, TCGType type,
212
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
213
+{
214
+ tcg_out_opc_xori(s, a0, a1, a2);
215
+}
216
+
217
+static const TCGOutOpBinary outop_xor = {
218
+ .base.static_constraint = C_O1_I2(r, r, rU),
219
+ .out_rrr = tgen_xor,
220
+ .out_rri = tgen_xori,
221
+};
222
+
223
224
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
225
const TCGArg args[TCG_MAX_OP_ARGS],
226
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
227
}
228
break;
229
230
- case INDEX_op_xor_i32:
231
- case INDEX_op_xor_i64:
232
- if (c2) {
233
- tcg_out_opc_xori(s, a0, a1, a2);
234
- } else {
235
- tcg_out_opc_xor(s, a0, a1, a2);
236
- }
237
- break;
238
-
239
case INDEX_op_extract_i32:
240
if (a2 == 0 && args[3] <= 12) {
241
tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
242
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
243
244
case INDEX_op_nor_i32:
245
case INDEX_op_nor_i64:
246
- case INDEX_op_xor_i32:
247
- case INDEX_op_xor_i64:
248
/* LoongArch reg-imm bitops have their imms ZERO-extended */
249
return C_O1_I2(r, r, rU);
250
251
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
252
index XXXXXXX..XXXXXXX 100644
253
--- a/tcg/mips/tcg-target.c.inc
254
+++ b/tcg/mips/tcg-target.c.inc
255
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
256
.base.static_constraint = C_NotImplemented,
257
};
258
259
+static void tgen_xor(TCGContext *s, TCGType type,
260
+ TCGReg a0, TCGReg a1, TCGReg a2)
261
+{
262
+ tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
263
+}
264
+
265
+static void tgen_xori(TCGContext *s, TCGType type,
266
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
267
+{
268
+ tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
269
+}
270
+
271
+static const TCGOutOpBinary outop_xor = {
272
+ .base.static_constraint = C_O1_I2(r, r, rI),
273
+ .out_rrr = tgen_xor,
274
+ .out_rri = tgen_xori,
275
+};
276
+
277
278
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
279
const TCGArg args[TCG_MAX_OP_ARGS],
280
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
281
tcg_out_ldst(s, i1, a0, a1, a2);
282
break;
283
284
- case INDEX_op_xor_i32:
285
- case INDEX_op_xor_i64:
286
- i1 = OPC_XOR, i2 = OPC_XORI;
287
- if (c2) {
288
- tcg_out_opc_imm(s, i2, a0, a1, a2);
289
- break;
290
- }
291
do_binaryv:
292
tcg_out_opc_reg(s, i1, a0, a1, a2);
293
break;
294
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
295
case INDEX_op_muls2_i64:
296
case INDEX_op_mulu2_i64:
297
return C_O2_I2(r, r, r, r);
298
- case INDEX_op_xor_i32:
299
- case INDEX_op_xor_i64:
300
- return C_O1_I2(r, r, rI);
301
case INDEX_op_shl_i32:
302
case INDEX_op_shr_i32:
303
case INDEX_op_sar_i32:
304
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
305
index XXXXXXX..XXXXXXX 100644
306
--- a/tcg/ppc/tcg-target.c.inc
307
+++ b/tcg/ppc/tcg-target.c.inc
308
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
309
.out_rrr = tgen_orc,
310
};
311
312
+static void tgen_xor(TCGContext *s, TCGType type,
313
+ TCGReg a0, TCGReg a1, TCGReg a2)
314
+{
315
+ tcg_out32(s, XOR | SAB(a1, a0, a2));
316
+}
317
+
318
+static void tgen_xori(TCGContext *s, TCGType type,
319
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
320
+{
321
+ tcg_out_xori32(s, a0, a1, a2);
322
+}
323
+
324
+static const TCGOutOpBinary outop_xor = {
325
+ .base.static_constraint = C_O1_I2(r, r, rU),
326
+ .out_rrr = tgen_xor,
327
+ .out_rri = tgen_xori,
328
+};
329
+
330
331
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
332
const TCGArg args[TCG_MAX_OP_ARGS],
333
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
334
}
335
break;
336
337
- case INDEX_op_xor_i64:
338
- case INDEX_op_xor_i32:
339
- a0 = args[0], a1 = args[1], a2 = args[2];
340
- if (const_args[2]) {
341
- tcg_out_xori32(s, a0, a1, a2);
342
- } else {
343
- tcg_out32(s, XOR | SAB(a1, a0, a2));
344
- }
345
- break;
346
case INDEX_op_eqv_i32:
347
if (const_args[2]) {
348
tcg_out_xori32(s, args[0], args[1], ~args[2]);
349
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
350
case INDEX_op_st_i64:
351
return C_O0_I2(r, r);
352
353
- case INDEX_op_xor_i32:
354
case INDEX_op_eqv_i32:
355
case INDEX_op_shl_i32:
356
case INDEX_op_shr_i32:
357
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
358
359
case INDEX_op_sub_i32:
360
return C_O1_I2(r, rI, ri);
361
- case INDEX_op_xor_i64:
362
- return C_O1_I2(r, r, rU);
363
case INDEX_op_sub_i64:
364
return C_O1_I2(r, rI, rT);
365
case INDEX_op_clz_i32:
366
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
367
index XXXXXXX..XXXXXXX 100644
368
--- a/tcg/riscv/tcg-target.c.inc
369
+++ b/tcg/riscv/tcg-target.c.inc
370
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
371
.out_rrr = tgen_orc,
372
};
373
374
+static void tgen_xor(TCGContext *s, TCGType type,
375
+ TCGReg a0, TCGReg a1, TCGReg a2)
376
+{
377
+ tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
378
+}
379
+
380
+static void tgen_xori(TCGContext *s, TCGType type,
381
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
382
+{
383
+ tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
384
+}
385
+
386
+static const TCGOutOpBinary outop_xor = {
387
+ .base.static_constraint = C_O1_I2(r, r, rI),
388
+ .out_rrr = tgen_xor,
389
+ .out_rri = tgen_xori,
390
+};
391
+
392
393
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
394
const TCGArg args[TCG_MAX_OP_ARGS],
395
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
396
}
397
break;
398
399
- case INDEX_op_xor_i32:
400
- case INDEX_op_xor_i64:
401
- if (c2) {
402
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
403
- } else {
404
- tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
405
- }
406
- break;
407
-
408
case INDEX_op_eqv_i32:
409
case INDEX_op_eqv_i64:
410
if (c2) {
411
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
412
case INDEX_op_st_i64:
413
return C_O0_I2(rz, r);
414
415
- case INDEX_op_xor_i32:
416
- case INDEX_op_xor_i64:
417
case INDEX_op_setcond_i32:
418
case INDEX_op_setcond_i64:
419
case INDEX_op_negsetcond_i32:
420
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
421
index XXXXXXX..XXXXXXX 100644
422
--- a/tcg/s390x/tcg-target.c.inc
423
+++ b/tcg/s390x/tcg-target.c.inc
424
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
425
.out_rrr = tgen_orc,
426
};
427
428
+static void tgen_xor(TCGContext *s, TCGType type,
429
+ TCGReg a0, TCGReg a1, TCGReg a2)
430
+{
431
+ if (type != TCG_TYPE_I32) {
432
+ tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
433
+ } else if (a0 == a1) {
434
+ tcg_out_insn(s, RR, XR, a0, a2);
435
+ } else {
436
+ tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
437
+ }
438
+}
439
+
440
+static void tgen_xori_3(TCGContext *s, TCGType type,
441
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
442
+{
443
+ tcg_out_mov(s, type, a0, a1);
444
+ tgen_xori(s, a0, type == TCG_TYPE_I32 ? (uint32_t)a2 : a2);
445
+}
446
+
447
+static const TCGOutOpBinary outop_xor = {
448
+ .base.static_constraint = C_O1_I2(r, r, rK),
449
+ .out_rrr = tgen_xor,
450
+ .out_rri = tgen_xori_3,
451
+};
452
+
453
454
# define OP_32_64(x) \
455
case glue(glue(INDEX_op_,x),_i32): \
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
457
}
458
break;
459
460
- case INDEX_op_xor_i32:
461
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
462
- if (const_args[2]) {
463
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
464
- tcg_out_insn(s, RIL, XILF, a0, a2);
465
- } else if (a0 == a1) {
466
- tcg_out_insn(s, RR, XR, args[0], args[2]);
467
- } else {
468
- tcg_out_insn(s, RRFa, XRK, a0, a1, a2);
469
- }
470
- break;
471
-
472
case INDEX_op_eqv_i32:
473
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
474
if (const_args[2]) {
475
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
476
}
477
break;
478
479
- case INDEX_op_xor_i64:
480
- a0 = args[0], a1 = args[1], a2 = args[2];
481
- if (const_args[2]) {
482
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
483
- tgen_xori(s, a0, a2);
484
- } else {
485
- tcg_out_insn(s, RRFa, XGRK, a0, a1, a2);
486
- }
487
- break;
488
-
489
case INDEX_op_eqv_i64:
490
a0 = args[0], a1 = args[1], a2 = args[2];
491
if (const_args[2]) {
492
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
493
494
case INDEX_op_sub_i32:
495
case INDEX_op_sub_i64:
496
- case INDEX_op_xor_i32:
497
return C_O1_I2(r, r, ri);
498
- case INDEX_op_xor_i64:
499
- return C_O1_I2(r, r, rK);
500
501
case INDEX_op_eqv_i32:
502
return C_O1_I2(r, r, ri);
503
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
504
index XXXXXXX..XXXXXXX 100644
505
--- a/tcg/sparc64/tcg-target.c.inc
506
+++ b/tcg/sparc64/tcg-target.c.inc
507
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
508
.out_rrr = tgen_orc,
509
};
510
511
+static void tgen_xor(TCGContext *s, TCGType type,
512
+ TCGReg a0, TCGReg a1, TCGReg a2)
513
+{
514
+ tcg_out_arith(s, a0, a1, a2, ARITH_XOR);
515
+}
516
+
517
+static void tgen_xori(TCGContext *s, TCGType type,
518
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
519
+{
520
+ tcg_out_arithi(s, a0, a1, a2, ARITH_XOR);
521
+}
522
+
523
+static const TCGOutOpBinary outop_xor = {
524
+ .base.static_constraint = C_O1_I2(r, r, rJ),
525
+ .out_rrr = tgen_xor,
526
+ .out_rri = tgen_xori,
527
+};
528
+
529
530
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
531
const TCGArg args[TCG_MAX_OP_ARGS],
532
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
533
OP_32_64(sub):
534
c = ARITH_SUB;
535
goto gen_arith;
536
- OP_32_64(xor):
537
- c = ARITH_XOR;
538
- goto gen_arith;
539
case INDEX_op_shl_i32:
540
c = SHIFT_SLL;
541
do_shift32:
542
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
543
case INDEX_op_divu_i64:
544
case INDEX_op_sub_i32:
545
case INDEX_op_sub_i64:
546
- case INDEX_op_xor_i32:
547
- case INDEX_op_xor_i64:
548
case INDEX_op_shl_i32:
549
case INDEX_op_shl_i64:
550
case INDEX_op_shr_i32:
551
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
552
index XXXXXXX..XXXXXXX 100644
553
--- a/tcg/tci/tcg-target.c.inc
554
+++ b/tcg/tci/tcg-target.c.inc
555
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
556
case INDEX_op_nand_i64:
557
case INDEX_op_nor_i32:
558
case INDEX_op_nor_i64:
559
- case INDEX_op_xor_i32:
560
- case INDEX_op_xor_i64:
561
case INDEX_op_shl_i32:
562
case INDEX_op_shl_i64:
563
case INDEX_op_shr_i32:
564
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
565
.out_rrr = tgen_orc,
566
};
567
568
+static void tgen_xor(TCGContext *s, TCGType type,
569
+ TCGReg a0, TCGReg a1, TCGReg a2)
570
+{
571
+ tcg_out_op_rrr(s, glue(INDEX_op_xor_i,TCG_TARGET_REG_BITS), a0, a1, a2);
572
+}
573
+
574
+static const TCGOutOpBinary outop_xor = {
575
+ .base.static_constraint = C_O1_I2(r, r, r),
576
+ .out_rrr = tgen_xor,
577
+};
578
+
579
580
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
581
const TCGArg args[TCG_MAX_OP_ARGS],
582
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
583
584
CASE_32_64(sub)
585
CASE_32_64(mul)
586
- CASE_32_64(xor)
587
CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
588
CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
589
CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
590
--
591
2.43.0
592
593
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
target/sh4/translate.c | 6 +++---
6
tcg/optimize.c | 18 ++++++++----------
7
tcg/tcg-op.c | 4 ++--
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 5 ++---
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 2 +-
12
8 files changed, 21 insertions(+), 28 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(or, 1, 2, 0, TCG_OPF_INT)
21
DEF(orc, 1, 2, 0, TCG_OPF_INT)
22
+DEF(xor, 1, 2, 0, TCG_OPF_INT)
23
24
DEF(setcond_i32, 1, 2, 1, 0)
25
DEF(negsetcond_i32, 1, 2, 1, 0)
26
@@ -XXX,XX +XXX,XX @@ DEF(rem_i32, 1, 2, 0, 0)
27
DEF(remu_i32, 1, 2, 0, 0)
28
DEF(div2_i32, 2, 3, 0, 0)
29
DEF(divu2_i32, 2, 3, 0, 0)
30
-DEF(xor_i32, 1, 2, 0, 0)
31
/* shifts/rotates */
32
DEF(shl_i32, 1, 2, 0, 0)
33
DEF(shr_i32, 1, 2, 0, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(rem_i64, 1, 2, 0, 0)
35
DEF(remu_i64, 1, 2, 0, 0)
36
DEF(div2_i64, 2, 3, 0, 0)
37
DEF(divu2_i64, 2, 3, 0, 0)
38
-DEF(xor_i64, 1, 2, 0, 0)
39
/* shifts/rotates */
40
DEF(shl_i64, 1, 2, 0, 0)
41
DEF(shr_i64, 1, 2, 0, 0)
42
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/sh4/translate.c
45
+++ b/target/sh4/translate.c
46
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
47
op_opc = INDEX_op_and;
48
goto do_reg_op;
49
case 0x200a: /* xor Rm,Rn */
50
- op_opc = INDEX_op_xor_i32;
51
+ op_opc = INDEX_op_xor;
52
goto do_reg_op;
53
case 0x200b: /* or Rm,Rn */
54
op_opc = INDEX_op_or;
55
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
56
goto fail;
57
}
58
op_dst = B11_8;
59
- op_opc = INDEX_op_xor_i32;
60
+ op_opc = INDEX_op_xor;
61
op_arg = tcg_constant_i32(-1);
62
break;
63
64
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
65
}
66
break;
67
68
- case INDEX_op_xor_i32:
69
+ case INDEX_op_xor:
70
if (op_dst != st_src) {
71
goto fail;
72
}
73
diff --git a/tcg/optimize.c b/tcg/optimize.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/optimize.c
76
+++ b/tcg/optimize.c
77
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
78
case INDEX_op_or_vec:
79
return x | y;
80
81
- CASE_OP_32_64_VEC(xor):
82
+ case INDEX_op_xor:
83
+ case INDEX_op_xor_vec:
84
return x ^ y;
85
86
case INDEX_op_shl_i32:
87
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
88
break;
89
}
90
if (convert) {
91
- TCGOpcode xor_opc, neg_opc;
92
+ TCGOpcode neg_opc;
93
94
if (!inv && !neg) {
95
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
96
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
97
switch (ctx->type) {
98
case TCG_TYPE_I32:
99
neg_opc = INDEX_op_neg_i32;
100
- xor_opc = INDEX_op_xor_i32;
101
break;
102
case TCG_TYPE_I64:
103
neg_opc = INDEX_op_neg_i64;
104
- xor_opc = INDEX_op_xor_i64;
105
break;
106
default:
107
g_assert_not_reached();
108
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
109
op->opc = INDEX_op_add;
110
op->args[2] = arg_new_constant(ctx, -1);
111
} else {
112
- op->opc = xor_opc;
113
+ op->opc = INDEX_op_xor;
114
op->args[2] = arg_new_constant(ctx, 1);
115
}
116
return -1;
117
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
118
119
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
120
{
121
- TCGOpcode xor_opc, neg_opc, shr_opc;
122
+ TCGOpcode neg_opc, shr_opc;
123
TCGOpcode uext_opc = 0, sext_opc = 0;
124
TCGCond cond = op->args[3];
125
TCGArg ret, src1, src2;
126
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
127
128
switch (ctx->type) {
129
case TCG_TYPE_I32:
130
- xor_opc = INDEX_op_xor_i32;
131
shr_opc = INDEX_op_shr_i32;
132
neg_opc = INDEX_op_neg_i32;
133
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
134
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
135
}
136
break;
137
case TCG_TYPE_I64:
138
- xor_opc = INDEX_op_xor_i64;
139
shr_opc = INDEX_op_shr_i64;
140
neg_opc = INDEX_op_neg_i64;
141
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
142
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
143
op2->args[1] = ret;
144
op2->args[2] = arg_new_constant(ctx, -1);
145
} else if (inv) {
146
- op2 = opt_insert_after(ctx, op, xor_opc, 3);
147
+ op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
148
op2->args[0] = ret;
149
op2->args[1] = ret;
150
op2->args[2] = arg_new_constant(ctx, 1);
151
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
152
CASE_OP_32_64(sub2):
153
done = fold_sub2(&ctx, op);
154
break;
155
- CASE_OP_32_64_VEC(xor):
156
+ case INDEX_op_xor:
157
+ case INDEX_op_xor_vec:
158
done = fold_xor(&ctx, op);
159
break;
160
case INDEX_op_set_label:
161
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/tcg/tcg-op.c
164
+++ b/tcg/tcg-op.c
165
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
166
167
void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
168
{
169
- tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
170
+ tcg_gen_op3_i32(INDEX_op_xor, ret, arg1, arg2);
171
}
172
173
void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
174
@@ -XXX,XX +XXX,XX @@ void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
175
void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
176
{
177
if (TCG_TARGET_REG_BITS == 64) {
178
- tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
179
+ tcg_gen_op3_i64(INDEX_op_xor, ret, arg1, arg2);
180
} else {
181
tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
182
tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
183
diff --git a/tcg/tcg.c b/tcg/tcg.c
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/tcg.c
186
+++ b/tcg/tcg.c
187
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
188
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
189
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
190
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
191
- OUTOP(INDEX_op_xor_i32, TCGOutOpBinary, outop_xor),
192
- OUTOP(INDEX_op_xor_i64, TCGOutOpBinary, outop_xor),
193
+ OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
194
};
195
196
#undef OUTOP
197
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
198
case INDEX_op_and:
199
case INDEX_op_mov:
200
case INDEX_op_or:
201
+ case INDEX_op_xor:
202
return has_type;
203
204
case INDEX_op_setcond_i32:
205
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
206
case INDEX_op_sub_i32:
207
case INDEX_op_neg_i32:
208
case INDEX_op_mul_i32:
209
- case INDEX_op_xor_i32:
210
case INDEX_op_shl_i32:
211
case INDEX_op_shr_i32:
212
case INDEX_op_sar_i32:
213
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
214
case INDEX_op_sub_i64:
215
case INDEX_op_neg_i64:
216
case INDEX_op_mul_i64:
217
- case INDEX_op_xor_i64:
218
case INDEX_op_shl_i64:
219
case INDEX_op_shr_i64:
220
case INDEX_op_sar_i64:
221
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
222
case INDEX_op_andc:
223
case INDEX_op_or:
224
case INDEX_op_orc:
225
- case INDEX_op_xor_i32:
226
- case INDEX_op_xor_i64:
227
+ case INDEX_op_xor:
228
{
229
const TCGOutOpBinary *out =
230
container_of(all_outop[op->opc], TCGOutOpBinary, base);
231
diff --git a/tcg/tci.c b/tcg/tci.c
232
index XXXXXXX..XXXXXXX 100644
233
--- a/tcg/tci.c
234
+++ b/tcg/tci.c
235
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
236
tci_args_rrr(insn, &r0, &r1, &r2);
237
regs[r0] = regs[r1] | regs[r2];
238
break;
239
- CASE_32_64(xor)
240
+ case INDEX_op_xor:
241
tci_args_rrr(insn, &r0, &r1, &r2);
242
regs[r0] = regs[r1] ^ regs[r2];
243
break;
244
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
245
case INDEX_op_andc:
246
case INDEX_op_or:
247
case INDEX_op_orc:
248
+ case INDEX_op_xor:
249
case INDEX_op_sub_i32:
250
case INDEX_op_sub_i64:
251
case INDEX_op_mul_i32:
252
case INDEX_op_mul_i64:
253
- case INDEX_op_xor_i32:
254
- case INDEX_op_xor_i64:
255
case INDEX_op_eqv_i32:
256
case INDEX_op_eqv_i64:
257
case INDEX_op_nand_i32:
258
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
259
index XXXXXXX..XXXXXXX 100644
260
--- a/docs/devel/tcg-ops.rst
261
+++ b/docs/devel/tcg-ops.rst
262
@@ -XXX,XX +XXX,XX @@ Logical
263
264
- | *t0* = *t1* | *t2*
265
266
- * - xor_i32/i64 *t0*, *t1*, *t2*
267
+ * - xor *t0*, *t1*, *t2*
268
269
- | *t0* = *t1* ^ *t2*
270
271
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
272
index XXXXXXX..XXXXXXX 100644
273
--- a/tcg/tci/tcg-target.c.inc
274
+++ b/tcg/tci/tcg-target.c.inc
275
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
276
static void tgen_xor(TCGContext *s, TCGType type,
277
TCGReg a0, TCGReg a1, TCGReg a2)
278
{
279
- tcg_out_op_rrr(s, glue(INDEX_op_xor_i,TCG_TARGET_REG_BITS), a0, a1, a2);
280
+ tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
281
}
282
283
static const TCGOutOpBinary outop_xor = {
284
--
285
2.43.0
286
287
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 25 +++++++++++++++++++++++--
5
1 file changed, 23 insertions(+), 2 deletions(-)
6
1
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
12
static bool fold_eqv(OptContext *ctx, TCGOp *op)
13
{
14
uint64_t s_mask;
15
+ TempOptInfo *t1, *t2;
16
17
if (fold_const2_commutative(ctx, op) ||
18
fold_xi_to_x(ctx, op, -1) ||
19
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
20
return true;
21
}
22
23
- s_mask = arg_info(op->args[1])->s_mask
24
- & arg_info(op->args[2])->s_mask;
25
+ t2 = arg_info(op->args[2]);
26
+ if (ti_is_const(t2)) {
27
+ /* Fold eqv r,x,i to xor r,x,~i. */
28
+ switch (ctx->type) {
29
+ case TCG_TYPE_I32:
30
+ case TCG_TYPE_I64:
31
+ op->opc = INDEX_op_xor;
32
+ break;
33
+ case TCG_TYPE_V64:
34
+ case TCG_TYPE_V128:
35
+ case TCG_TYPE_V256:
36
+ op->opc = INDEX_op_xor_vec;
37
+ break;
38
+ default:
39
+ g_assert_not_reached();
40
+ }
41
+ op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
42
+ return fold_xor(ctx, op);
43
+ }
44
+
45
+ t1 = arg_info(op->args[1]);
46
+ s_mask = t1->s_mask & t2->s_mask;
47
return fold_masks_s(ctx, op, s_mask);
48
}
49
50
--
51
2.43.0
52
53
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-con-set.h | 1 -
11
tcg/riscv/tcg-target-con-str.h | 1 -
12
tcg/riscv/tcg-target-has.h | 2 --
13
tcg/s390x/tcg-target-con-set.h | 1 -
14
tcg/s390x/tcg-target-has.h | 2 --
15
tcg/sparc64/tcg-target-has.h | 2 --
16
tcg/tcg-has.h | 1 -
17
tcg/tci/tcg-target-has.h | 2 --
18
tcg/tcg-op.c | 4 ++--
19
tcg/tcg.c | 8 +++----
20
tcg/tci.c | 2 --
21
tcg/aarch64/tcg-target.c.inc | 26 +++++++++------------
22
tcg/arm/tcg-target.c.inc | 4 ++++
23
tcg/i386/tcg-target.c.inc | 4 ++++
24
tcg/loongarch64/tcg-target.c.inc | 4 ++++
25
tcg/mips/tcg-target.c.inc | 4 ++++
26
tcg/ppc/tcg-target.c.inc | 22 +++++++++---------
27
tcg/riscv/tcg-target.c.inc | 37 ++++++++++++------------------
28
tcg/s390x/tcg-target.c.inc | 39 +++++++++++++-------------------
29
tcg/sparc64/tcg-target.c.inc | 4 ++++
30
tcg/tci/tcg-target.c.inc | 14 +++++++++---
31
27 files changed, 89 insertions(+), 106 deletions(-)
32
1
33
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/aarch64/tcg-target-has.h
36
+++ b/tcg/aarch64/tcg-target-has.h
37
@@ -XXX,XX +XXX,XX @@
38
#define TCG_TARGET_HAS_bswap32_i32 1
39
#define TCG_TARGET_HAS_not_i32 1
40
#define TCG_TARGET_HAS_rot_i32 1
41
-#define TCG_TARGET_HAS_eqv_i32 1
42
#define TCG_TARGET_HAS_nand_i32 0
43
#define TCG_TARGET_HAS_nor_i32 0
44
#define TCG_TARGET_HAS_clz_i32 1
45
@@ -XXX,XX +XXX,XX @@
46
#define TCG_TARGET_HAS_bswap64_i64 1
47
#define TCG_TARGET_HAS_not_i64 1
48
#define TCG_TARGET_HAS_rot_i64 1
49
-#define TCG_TARGET_HAS_eqv_i64 1
50
#define TCG_TARGET_HAS_nand_i64 0
51
#define TCG_TARGET_HAS_nor_i64 0
52
#define TCG_TARGET_HAS_clz_i64 1
53
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/arm/tcg-target-has.h
56
+++ b/tcg/arm/tcg-target-has.h
57
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
58
#define TCG_TARGET_HAS_bswap32_i32 1
59
#define TCG_TARGET_HAS_not_i32 1
60
#define TCG_TARGET_HAS_rot_i32 1
61
-#define TCG_TARGET_HAS_eqv_i32 0
62
#define TCG_TARGET_HAS_nand_i32 0
63
#define TCG_TARGET_HAS_nor_i32 0
64
#define TCG_TARGET_HAS_clz_i32 1
65
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/i386/tcg-target-has.h
68
+++ b/tcg/i386/tcg-target-has.h
69
@@ -XXX,XX +XXX,XX @@
70
#define TCG_TARGET_HAS_bswap16_i32 1
71
#define TCG_TARGET_HAS_bswap32_i32 1
72
#define TCG_TARGET_HAS_not_i32 1
73
-#define TCG_TARGET_HAS_eqv_i32 0
74
#define TCG_TARGET_HAS_nand_i32 0
75
#define TCG_TARGET_HAS_nor_i32 0
76
#define TCG_TARGET_HAS_clz_i32 1
77
@@ -XXX,XX +XXX,XX @@
78
#define TCG_TARGET_HAS_bswap32_i64 1
79
#define TCG_TARGET_HAS_bswap64_i64 1
80
#define TCG_TARGET_HAS_not_i64 1
81
-#define TCG_TARGET_HAS_eqv_i64 0
82
#define TCG_TARGET_HAS_nand_i64 0
83
#define TCG_TARGET_HAS_nor_i64 0
84
#define TCG_TARGET_HAS_clz_i64 1
85
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/tcg/loongarch64/tcg-target-has.h
88
+++ b/tcg/loongarch64/tcg-target-has.h
89
@@ -XXX,XX +XXX,XX @@
90
#define TCG_TARGET_HAS_bswap16_i32 1
91
#define TCG_TARGET_HAS_bswap32_i32 1
92
#define TCG_TARGET_HAS_not_i32 1
93
-#define TCG_TARGET_HAS_eqv_i32 0
94
#define TCG_TARGET_HAS_nand_i32 0
95
#define TCG_TARGET_HAS_nor_i32 1
96
#define TCG_TARGET_HAS_clz_i32 1
97
@@ -XXX,XX +XXX,XX @@
98
#define TCG_TARGET_HAS_bswap32_i64 1
99
#define TCG_TARGET_HAS_bswap64_i64 1
100
#define TCG_TARGET_HAS_not_i64 1
101
-#define TCG_TARGET_HAS_eqv_i64 0
102
#define TCG_TARGET_HAS_nand_i64 0
103
#define TCG_TARGET_HAS_nor_i64 1
104
#define TCG_TARGET_HAS_clz_i64 1
105
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/mips/tcg-target-has.h
108
+++ b/tcg/mips/tcg-target-has.h
109
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
110
#define TCG_TARGET_HAS_rem_i32 1
111
#define TCG_TARGET_HAS_not_i32 1
112
#define TCG_TARGET_HAS_nor_i32 1
113
-#define TCG_TARGET_HAS_eqv_i32 0
114
#define TCG_TARGET_HAS_nand_i32 0
115
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
116
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
117
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
118
#define TCG_TARGET_HAS_rem_i64 1
119
#define TCG_TARGET_HAS_not_i64 1
120
#define TCG_TARGET_HAS_nor_i64 1
121
-#define TCG_TARGET_HAS_eqv_i64 0
122
#define TCG_TARGET_HAS_nand_i64 0
123
#define TCG_TARGET_HAS_add2_i64 0
124
#define TCG_TARGET_HAS_sub2_i64 0
125
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/ppc/tcg-target-has.h
128
+++ b/tcg/ppc/tcg-target-has.h
129
@@ -XXX,XX +XXX,XX @@
130
#define TCG_TARGET_HAS_bswap16_i32 1
131
#define TCG_TARGET_HAS_bswap32_i32 1
132
#define TCG_TARGET_HAS_not_i32 1
133
-#define TCG_TARGET_HAS_eqv_i32 1
134
#define TCG_TARGET_HAS_nand_i32 1
135
#define TCG_TARGET_HAS_nor_i32 1
136
#define TCG_TARGET_HAS_clz_i32 1
137
@@ -XXX,XX +XXX,XX @@
138
#define TCG_TARGET_HAS_bswap32_i64 1
139
#define TCG_TARGET_HAS_bswap64_i64 1
140
#define TCG_TARGET_HAS_not_i64 1
141
-#define TCG_TARGET_HAS_eqv_i64 1
142
#define TCG_TARGET_HAS_nand_i64 1
143
#define TCG_TARGET_HAS_nor_i64 1
144
#define TCG_TARGET_HAS_clz_i64 1
145
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/riscv/tcg-target-con-set.h
148
+++ b/tcg/riscv/tcg-target-con-set.h
149
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
150
C_O1_I2(r, r, r)
151
C_O1_I2(r, r, ri)
152
C_O1_I2(r, r, rI)
153
-C_O1_I2(r, r, rJ)
154
C_O1_I2(r, rz, rN)
155
C_O1_I2(r, rz, rz)
156
C_N1_I2(r, r, rM)
157
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/riscv/tcg-target-con-str.h
160
+++ b/tcg/riscv/tcg-target-con-str.h
161
@@ -XXX,XX +XXX,XX @@ REGS('v', ALL_VECTOR_REGS)
162
* CONST(letter, TCG_CT_CONST_* bit set)
163
*/
164
CONST('I', TCG_CT_CONST_S12)
165
-CONST('J', TCG_CT_CONST_J12)
166
CONST('K', TCG_CT_CONST_S5)
167
CONST('L', TCG_CT_CONST_CMP_VI)
168
CONST('N', TCG_CT_CONST_N12)
169
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/riscv/tcg-target-has.h
172
+++ b/tcg/riscv/tcg-target-has.h
173
@@ -XXX,XX +XXX,XX @@
174
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
175
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
176
#define TCG_TARGET_HAS_not_i32 1
177
-#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
178
#define TCG_TARGET_HAS_nand_i32 0
179
#define TCG_TARGET_HAS_nor_i32 0
180
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
181
@@ -XXX,XX +XXX,XX @@
182
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
183
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
184
#define TCG_TARGET_HAS_not_i64 1
185
-#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
186
#define TCG_TARGET_HAS_nand_i64 0
187
#define TCG_TARGET_HAS_nor_i64 0
188
#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
189
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
190
index XXXXXXX..XXXXXXX 100644
191
--- a/tcg/s390x/tcg-target-con-set.h
192
+++ b/tcg/s390x/tcg-target-con-set.h
193
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rC)
194
C_O1_I2(r, r, rI)
195
C_O1_I2(r, r, rJ)
196
C_O1_I2(r, r, rK)
197
-C_O1_I2(r, r, rNK)
198
C_O1_I2(r, r, rNKR)
199
C_O1_I2(r, rZ, r)
200
C_O1_I2(v, v, r)
201
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/s390x/tcg-target-has.h
204
+++ b/tcg/s390x/tcg-target-has.h
205
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
206
#define TCG_TARGET_HAS_bswap16_i32 1
207
#define TCG_TARGET_HAS_bswap32_i32 1
208
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
209
-#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
210
#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
211
#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
212
#define TCG_TARGET_HAS_clz_i32 0
213
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
214
#define TCG_TARGET_HAS_bswap32_i64 1
215
#define TCG_TARGET_HAS_bswap64_i64 1
216
#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
217
-#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
218
#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
219
#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
220
#define TCG_TARGET_HAS_clz_i64 1
221
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
222
index XXXXXXX..XXXXXXX 100644
223
--- a/tcg/sparc64/tcg-target-has.h
224
+++ b/tcg/sparc64/tcg-target-has.h
225
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
226
#define TCG_TARGET_HAS_bswap16_i32 0
227
#define TCG_TARGET_HAS_bswap32_i32 0
228
#define TCG_TARGET_HAS_not_i32 1
229
-#define TCG_TARGET_HAS_eqv_i32 0
230
#define TCG_TARGET_HAS_nand_i32 0
231
#define TCG_TARGET_HAS_nor_i32 0
232
#define TCG_TARGET_HAS_clz_i32 0
233
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
234
#define TCG_TARGET_HAS_bswap32_i64 0
235
#define TCG_TARGET_HAS_bswap64_i64 0
236
#define TCG_TARGET_HAS_not_i64 1
237
-#define TCG_TARGET_HAS_eqv_i64 0
238
#define TCG_TARGET_HAS_nand_i64 0
239
#define TCG_TARGET_HAS_nor_i64 0
240
#define TCG_TARGET_HAS_clz_i64 0
241
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
242
index XXXXXXX..XXXXXXX 100644
243
--- a/tcg/tcg-has.h
244
+++ b/tcg/tcg-has.h
245
@@ -XXX,XX +XXX,XX @@
246
#define TCG_TARGET_HAS_bswap32_i64 0
247
#define TCG_TARGET_HAS_bswap64_i64 0
248
#define TCG_TARGET_HAS_not_i64 0
249
-#define TCG_TARGET_HAS_eqv_i64 0
250
#define TCG_TARGET_HAS_nand_i64 0
251
#define TCG_TARGET_HAS_nor_i64 0
252
#define TCG_TARGET_HAS_clz_i64 0
253
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
254
index XXXXXXX..XXXXXXX 100644
255
--- a/tcg/tci/tcg-target-has.h
256
+++ b/tcg/tci/tcg-target-has.h
257
@@ -XXX,XX +XXX,XX @@
258
#define TCG_TARGET_HAS_div_i32 1
259
#define TCG_TARGET_HAS_rem_i32 1
260
#define TCG_TARGET_HAS_extract2_i32 0
261
-#define TCG_TARGET_HAS_eqv_i32 1
262
#define TCG_TARGET_HAS_nand_i32 1
263
#define TCG_TARGET_HAS_nor_i32 1
264
#define TCG_TARGET_HAS_clz_i32 1
265
@@ -XXX,XX +XXX,XX @@
266
#define TCG_TARGET_HAS_extract2_i64 0
267
#define TCG_TARGET_HAS_div_i64 1
268
#define TCG_TARGET_HAS_rem_i64 1
269
-#define TCG_TARGET_HAS_eqv_i64 1
270
#define TCG_TARGET_HAS_nand_i64 1
271
#define TCG_TARGET_HAS_nor_i64 1
272
#define TCG_TARGET_HAS_clz_i64 1
273
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
274
index XXXXXXX..XXXXXXX 100644
275
--- a/tcg/tcg-op.c
276
+++ b/tcg/tcg-op.c
277
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
278
279
void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
280
{
281
- if (TCG_TARGET_HAS_eqv_i32) {
282
+ if (tcg_op_supported(INDEX_op_eqv_i32, TCG_TYPE_I32, 0)) {
283
tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
284
} else {
285
tcg_gen_xor_i32(ret, arg1, arg2);
286
@@ -XXX,XX +XXX,XX @@ void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
287
if (TCG_TARGET_REG_BITS == 32) {
288
tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
289
tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
290
- } else if (TCG_TARGET_HAS_eqv_i64) {
291
+ } else if (tcg_op_supported(INDEX_op_eqv_i64, TCG_TYPE_I64, 0)) {
292
tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
293
} else {
294
tcg_gen_xor_i64(ret, arg1, arg2);
295
diff --git a/tcg/tcg.c b/tcg/tcg.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/tcg.c
298
+++ b/tcg/tcg.c
299
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
300
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
301
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
302
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
303
+ OUTOP(INDEX_op_eqv_i32, TCGOutOpBinary, outop_eqv),
304
+ OUTOP(INDEX_op_eqv_i64, TCGOutOpBinary, outop_eqv),
305
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
306
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
307
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
308
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
309
return TCG_TARGET_HAS_bswap32_i32;
310
case INDEX_op_not_i32:
311
return TCG_TARGET_HAS_not_i32;
312
- case INDEX_op_eqv_i32:
313
- return TCG_TARGET_HAS_eqv_i32;
314
case INDEX_op_nand_i32:
315
return TCG_TARGET_HAS_nand_i32;
316
case INDEX_op_nor_i32:
317
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
318
return TCG_TARGET_HAS_bswap64_i64;
319
case INDEX_op_not_i64:
320
return TCG_TARGET_HAS_not_i64;
321
- case INDEX_op_eqv_i64:
322
- return TCG_TARGET_HAS_eqv_i64;
323
case INDEX_op_nand_i64:
324
return TCG_TARGET_HAS_nand_i64;
325
case INDEX_op_nor_i64:
326
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
327
case INDEX_op_add:
328
case INDEX_op_and:
329
case INDEX_op_andc:
330
+ case INDEX_op_eqv_i32:
331
+ case INDEX_op_eqv_i64:
332
case INDEX_op_or:
333
case INDEX_op_orc:
334
case INDEX_op_xor:
335
diff --git a/tcg/tci.c b/tcg/tci.c
336
index XXXXXXX..XXXXXXX 100644
337
--- a/tcg/tci.c
338
+++ b/tcg/tci.c
339
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
340
tci_args_rrr(insn, &r0, &r1, &r2);
341
regs[r0] = regs[r1] | ~regs[r2];
342
break;
343
-#if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
344
CASE_32_64(eqv)
345
tci_args_rrr(insn, &r0, &r1, &r2);
346
regs[r0] = ~(regs[r1] ^ regs[r2]);
347
break;
348
-#endif
349
#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
350
CASE_32_64(nand)
351
tci_args_rrr(insn, &r0, &r1, &r2);
352
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/aarch64/tcg-target.c.inc
355
+++ b/tcg/aarch64/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
357
.out_rrr = tgen_andc,
358
};
359
360
+static void tgen_eqv(TCGContext *s, TCGType type,
361
+ TCGReg a0, TCGReg a1, TCGReg a2)
362
+{
363
+ tcg_out_insn(s, 3510, EON, type, a0, a1, a2);
364
+}
365
+
366
+static const TCGOutOpBinary outop_eqv = {
367
+ .base.static_constraint = C_O1_I2(r, r, r),
368
+ .out_rrr = tgen_eqv,
369
+};
370
+
371
static void tgen_or(TCGContext *s, TCGType type,
372
TCGReg a0, TCGReg a1, TCGReg a2)
373
{
374
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
375
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
376
break;
377
378
- case INDEX_op_eqv_i32:
379
- a2 = (int32_t)a2;
380
- /* FALLTHRU */
381
- case INDEX_op_eqv_i64:
382
- if (c2) {
383
- tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
384
- } else {
385
- tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
386
- }
387
- break;
388
-
389
case INDEX_op_not_i64:
390
case INDEX_op_not_i32:
391
tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
392
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
393
case INDEX_op_mulsh_i64:
394
return C_O1_I2(r, r, r);
395
396
- case INDEX_op_eqv_i32:
397
- case INDEX_op_eqv_i64:
398
- return C_O1_I2(r, r, rL);
399
-
400
case INDEX_op_shl_i32:
401
case INDEX_op_shr_i32:
402
case INDEX_op_sar_i32:
403
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
404
index XXXXXXX..XXXXXXX 100644
405
--- a/tcg/arm/tcg-target.c.inc
406
+++ b/tcg/arm/tcg-target.c.inc
407
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
408
.out_rrr = tgen_andc,
409
};
410
411
+static const TCGOutOpBinary outop_eqv = {
412
+ .base.static_constraint = C_NotImplemented,
413
+};
414
+
415
static void tgen_or(TCGContext *s, TCGType type,
416
TCGReg a0, TCGReg a1, TCGReg a2)
417
{
418
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
419
index XXXXXXX..XXXXXXX 100644
420
--- a/tcg/i386/tcg-target.c.inc
421
+++ b/tcg/i386/tcg-target.c.inc
422
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
423
.out_rrr = tgen_andc,
424
};
425
426
+static const TCGOutOpBinary outop_eqv = {
427
+ .base.static_constraint = C_NotImplemented,
428
+};
429
+
430
static void tgen_or(TCGContext *s, TCGType type,
431
TCGReg a0, TCGReg a1, TCGReg a2)
432
{
433
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
434
index XXXXXXX..XXXXXXX 100644
435
--- a/tcg/loongarch64/tcg-target.c.inc
436
+++ b/tcg/loongarch64/tcg-target.c.inc
437
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
438
.out_rrr = tgen_andc,
439
};
440
441
+static const TCGOutOpBinary outop_eqv = {
442
+ .base.static_constraint = C_NotImplemented,
443
+};
444
+
445
static void tgen_or(TCGContext *s, TCGType type,
446
TCGReg a0, TCGReg a1, TCGReg a2)
447
{
448
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
449
index XXXXXXX..XXXXXXX 100644
450
--- a/tcg/mips/tcg-target.c.inc
451
+++ b/tcg/mips/tcg-target.c.inc
452
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
453
.base.static_constraint = C_NotImplemented,
454
};
455
456
+static const TCGOutOpBinary outop_eqv = {
457
+ .base.static_constraint = C_NotImplemented,
458
+};
459
+
460
static void tgen_or(TCGContext *s, TCGType type,
461
TCGReg a0, TCGReg a1, TCGReg a2)
462
{
463
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
464
index XXXXXXX..XXXXXXX 100644
465
--- a/tcg/ppc/tcg-target.c.inc
466
+++ b/tcg/ppc/tcg-target.c.inc
467
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
468
.out_rrr = tgen_andc,
469
};
470
471
+static void tgen_eqv(TCGContext *s, TCGType type,
472
+ TCGReg a0, TCGReg a1, TCGReg a2)
473
+{
474
+ tcg_out32(s, EQV | SAB(a1, a0, a2));
475
+}
476
+
477
+static const TCGOutOpBinary outop_eqv = {
478
+ .base.static_constraint = C_O1_I2(r, r, r),
479
+ .out_rrr = tgen_eqv,
480
+};
481
+
482
static void tgen_or(TCGContext *s, TCGType type,
483
TCGReg a0, TCGReg a1, TCGReg a2)
484
{
485
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
486
}
487
break;
488
489
- case INDEX_op_eqv_i32:
490
- if (const_args[2]) {
491
- tcg_out_xori32(s, args[0], args[1], ~args[2]);
492
- break;
493
- }
494
- /* FALLTHRU */
495
- case INDEX_op_eqv_i64:
496
- tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
497
- break;
498
case INDEX_op_nand_i32:
499
case INDEX_op_nand_i64:
500
tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
501
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
502
case INDEX_op_st_i64:
503
return C_O0_I2(r, r);
504
505
- case INDEX_op_eqv_i32:
506
case INDEX_op_shl_i32:
507
case INDEX_op_shr_i32:
508
case INDEX_op_sar_i32:
509
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
510
case INDEX_op_nor_i32:
511
case INDEX_op_muluh_i32:
512
case INDEX_op_mulsh_i32:
513
- case INDEX_op_eqv_i64:
514
case INDEX_op_nand_i64:
515
case INDEX_op_nor_i64:
516
case INDEX_op_div_i64:
517
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
518
index XXXXXXX..XXXXXXX 100644
519
--- a/tcg/riscv/tcg-target.c.inc
520
+++ b/tcg/riscv/tcg-target.c.inc
521
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
522
#define TCG_CT_CONST_S12 0x100
523
#define TCG_CT_CONST_N12 0x200
524
#define TCG_CT_CONST_M12 0x400
525
-#define TCG_CT_CONST_J12 0x800
526
-#define TCG_CT_CONST_S5 0x1000
527
-#define TCG_CT_CONST_CMP_VI 0x2000
528
+#define TCG_CT_CONST_S5 0x800
529
+#define TCG_CT_CONST_CMP_VI 0x1000
530
531
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
532
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
533
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
534
if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
535
return 1;
536
}
537
- /*
538
- * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
539
- * Used to map ANDN back to ANDI, etc.
540
- */
541
- if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
542
- return 1;
543
- }
544
/*
545
* Sign extended from 5 bits: [-0x10, 0x0f].
546
* Used for vector-immediate.
547
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
548
.out_rrr = tgen_andc,
549
};
550
551
+static void tgen_eqv(TCGContext *s, TCGType type,
552
+ TCGReg a0, TCGReg a1, TCGReg a2)
553
+{
554
+ tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
555
+}
556
+
557
+static const TCGOutOpBinary outop_eqv = {
558
+ .base.static_constraint = C_Dynamic,
559
+ .base.dynamic_constraint = cset_zbb_rrr,
560
+ .out_rrr = tgen_eqv,
561
+};
562
+
563
static void tgen_or(TCGContext *s, TCGType type,
564
TCGReg a0, TCGReg a1, TCGReg a2)
565
{
566
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
567
}
568
break;
569
570
- case INDEX_op_eqv_i32:
571
- case INDEX_op_eqv_i64:
572
- if (c2) {
573
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
574
- } else {
575
- tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
576
- }
577
- break;
578
-
579
case INDEX_op_not_i32:
580
case INDEX_op_not_i64:
581
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
582
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
583
case INDEX_op_negsetcond_i64:
584
return C_O1_I2(r, r, rI);
585
586
- case INDEX_op_eqv_i32:
587
- case INDEX_op_eqv_i64:
588
- return C_O1_I2(r, r, rJ);
589
-
590
case INDEX_op_sub_i32:
591
case INDEX_op_sub_i64:
592
return C_O1_I2(r, rz, rN);
593
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
594
index XXXXXXX..XXXXXXX 100644
595
--- a/tcg/s390x/tcg-target.c.inc
596
+++ b/tcg/s390x/tcg-target.c.inc
597
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
598
.out_rrr = tgen_andc,
599
};
600
601
+static void tgen_eqv(TCGContext *s, TCGType type,
602
+ TCGReg a0, TCGReg a1, TCGReg a2)
603
+{
604
+ if (type == TCG_TYPE_I32) {
605
+ tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
606
+ } else {
607
+ tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
608
+ }
609
+}
610
+
611
+static const TCGOutOpBinary outop_eqv = {
612
+ .base.static_constraint = C_Dynamic,
613
+ .base.dynamic_constraint = cset_misc3_rrr,
614
+ .out_rrr = tgen_eqv,
615
+};
616
+
617
static void tgen_or(TCGContext *s, TCGType type,
618
TCGReg a0, TCGReg a1, TCGReg a2)
619
{
620
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
621
}
622
break;
623
624
- case INDEX_op_eqv_i32:
625
- a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
626
- if (const_args[2]) {
627
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
628
- tcg_out_insn(s, RIL, XILF, a0, ~a2);
629
- } else {
630
- tcg_out_insn(s, RRFa, NXRK, a0, a1, a2);
631
- }
632
- break;
633
case INDEX_op_nand_i32:
634
tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
635
break;
636
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
637
}
638
break;
639
640
- case INDEX_op_eqv_i64:
641
- a0 = args[0], a1 = args[1], a2 = args[2];
642
- if (const_args[2]) {
643
- tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
644
- tgen_xori(s, a0, ~a2);
645
- } else {
646
- tcg_out_insn(s, RRFa, NXGRK, a0, a1, a2);
647
- }
648
- break;
649
case INDEX_op_nand_i64:
650
tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
651
break;
652
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
653
case INDEX_op_sub_i64:
654
return C_O1_I2(r, r, ri);
655
656
- case INDEX_op_eqv_i32:
657
- return C_O1_I2(r, r, ri);
658
- case INDEX_op_eqv_i64:
659
- return C_O1_I2(r, r, rNK);
660
-
661
case INDEX_op_nand_i32:
662
case INDEX_op_nand_i64:
663
case INDEX_op_nor_i32:
664
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
665
index XXXXXXX..XXXXXXX 100644
666
--- a/tcg/sparc64/tcg-target.c.inc
667
+++ b/tcg/sparc64/tcg-target.c.inc
668
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
669
.out_rrr = tgen_andc,
670
};
671
672
+static const TCGOutOpBinary outop_eqv = {
673
+ .base.static_constraint = C_NotImplemented,
674
+};
675
+
676
static void tgen_or(TCGContext *s, TCGType type,
677
TCGReg a0, TCGReg a1, TCGReg a2)
678
{
679
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
680
index XXXXXXX..XXXXXXX 100644
681
--- a/tcg/tci/tcg-target.c.inc
682
+++ b/tcg/tci/tcg-target.c.inc
683
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
684
case INDEX_op_sub_i64:
685
case INDEX_op_mul_i32:
686
case INDEX_op_mul_i64:
687
- case INDEX_op_eqv_i32:
688
- case INDEX_op_eqv_i64:
689
case INDEX_op_nand_i32:
690
case INDEX_op_nand_i64:
691
case INDEX_op_nor_i32:
692
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
693
.out_rrr = tgen_andc,
694
};
695
696
+static void tgen_eqv(TCGContext *s, TCGType type,
697
+ TCGReg a0, TCGReg a1, TCGReg a2)
698
+{
699
+ tcg_out_op_rrr(s, glue(INDEX_op_eqv_i,TCG_TARGET_REG_BITS), a0, a1, a2);
700
+}
701
+
702
+static const TCGOutOpBinary outop_eqv = {
703
+ .base.static_constraint = C_O1_I2(r, r, r),
704
+ .out_rrr = tgen_eqv,
705
+};
706
+
707
static void tgen_or(TCGContext *s, TCGType type,
708
TCGReg a0, TCGReg a1, TCGReg a2)
709
{
710
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
711
712
CASE_32_64(sub)
713
CASE_32_64(mul)
714
- CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
715
CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
716
CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
717
CASE_32_64(shl)
718
--
719
2.43.0
720
721
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 6 ++++--
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 15 insertions(+), 17 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
18
DEF(add, 1, 2, 0, TCG_OPF_INT)
19
DEF(and, 1, 2, 0, TCG_OPF_INT)
20
DEF(andc, 1, 2, 0, TCG_OPF_INT)
21
+DEF(eqv, 1, 2, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
DEF(xor, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i32, 1, 1, 1, 0)
26
DEF(bswap32_i32, 1, 1, 1, 0)
27
DEF(not_i32, 1, 1, 0, 0)
28
DEF(neg_i32, 1, 1, 0, 0)
29
-DEF(eqv_i32, 1, 2, 0, 0)
30
DEF(nand_i32, 1, 2, 0, 0)
31
DEF(nor_i32, 1, 2, 0, 0)
32
DEF(clz_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(bswap32_i64, 1, 1, 1, 0)
34
DEF(bswap64_i64, 1, 1, 1, 0)
35
DEF(not_i64, 1, 1, 0, 0)
36
DEF(neg_i64, 1, 1, 0, 0)
37
-DEF(eqv_i64, 1, 2, 0, 0)
38
DEF(nand_i64, 1, 2, 0, 0)
39
DEF(nor_i64, 1, 2, 0, 0)
40
DEF(clz_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_orc_vec:
47
return x | ~y;
48
49
- CASE_OP_32_64_VEC(eqv):
50
+ case INDEX_op_eqv:
51
+ case INDEX_op_eqv_vec:
52
return ~(x ^ y);
53
54
CASE_OP_32_64_VEC(nand):
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
case INDEX_op_dup2_vec:
57
done = fold_dup2(&ctx, op);
58
break;
59
- CASE_OP_32_64_VEC(eqv):
60
+ case INDEX_op_eqv:
61
+ case INDEX_op_eqv_vec:
62
done = fold_eqv(&ctx, op);
63
break;
64
CASE_OP_32_64(extract):
65
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/tcg-op.c
68
+++ b/tcg/tcg-op.c
69
@@ -XXX,XX +XXX,XX @@ void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
70
71
void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
72
{
73
- if (tcg_op_supported(INDEX_op_eqv_i32, TCG_TYPE_I32, 0)) {
74
- tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2);
75
+ if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I32, 0)) {
76
+ tcg_gen_op3_i32(INDEX_op_eqv, ret, arg1, arg2);
77
} else {
78
tcg_gen_xor_i32(ret, arg1, arg2);
79
tcg_gen_not_i32(ret, ret);
80
@@ -XXX,XX +XXX,XX @@ void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
81
if (TCG_TARGET_REG_BITS == 32) {
82
tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
83
tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
84
- } else if (tcg_op_supported(INDEX_op_eqv_i64, TCG_TYPE_I64, 0)) {
85
- tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2);
86
+ } else if (tcg_op_supported(INDEX_op_eqv, TCG_TYPE_I64, 0)) {
87
+ tcg_gen_op3_i64(INDEX_op_eqv, ret, arg1, arg2);
88
} else {
89
tcg_gen_xor_i64(ret, arg1, arg2);
90
tcg_gen_not_i64(ret, ret);
91
diff --git a/tcg/tcg.c b/tcg/tcg.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/tcg.c
94
+++ b/tcg/tcg.c
95
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
96
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
97
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
98
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
99
- OUTOP(INDEX_op_eqv_i32, TCGOutOpBinary, outop_eqv),
100
- OUTOP(INDEX_op_eqv_i64, TCGOutOpBinary, outop_eqv),
101
+ OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
102
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
103
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
104
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
105
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
106
case INDEX_op_add:
107
case INDEX_op_and:
108
case INDEX_op_andc:
109
- case INDEX_op_eqv_i32:
110
- case INDEX_op_eqv_i64:
111
+ case INDEX_op_eqv:
112
case INDEX_op_or:
113
case INDEX_op_orc:
114
case INDEX_op_xor:
115
diff --git a/tcg/tci.c b/tcg/tci.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/tci.c
118
+++ b/tcg/tci.c
119
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
tci_args_rrr(insn, &r0, &r1, &r2);
121
regs[r0] = regs[r1] | ~regs[r2];
122
break;
123
- CASE_32_64(eqv)
124
+ case INDEX_op_eqv:
125
tci_args_rrr(insn, &r0, &r1, &r2);
126
regs[r0] = ~(regs[r1] ^ regs[r2]);
127
break;
128
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
129
case INDEX_op_add:
130
case INDEX_op_and:
131
case INDEX_op_andc:
132
+ case INDEX_op_eqv:
133
case INDEX_op_or:
134
case INDEX_op_orc:
135
case INDEX_op_xor:
136
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
137
case INDEX_op_sub_i64:
138
case INDEX_op_mul_i32:
139
case INDEX_op_mul_i64:
140
- case INDEX_op_eqv_i32:
141
- case INDEX_op_eqv_i64:
142
case INDEX_op_nand_i32:
143
case INDEX_op_nand_i64:
144
case INDEX_op_nor_i32:
145
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
146
index XXXXXXX..XXXXXXX 100644
147
--- a/docs/devel/tcg-ops.rst
148
+++ b/docs/devel/tcg-ops.rst
149
@@ -XXX,XX +XXX,XX @@ Logical
150
151
- | *t0* = *t1* & ~\ *t2*
152
153
- * - eqv_i32/i64 *t0*, *t1*, *t2*
154
+ * - eqv *t0*, *t1*, *t2*
155
156
- | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
157
158
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/tci/tcg-target.c.inc
161
+++ b/tcg/tci/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
163
static void tgen_eqv(TCGContext *s, TCGType type,
164
TCGReg a0, TCGReg a1, TCGReg a2)
165
{
166
- tcg_out_op_rrr(s, glue(INDEX_op_eqv_i,TCG_TARGET_REG_BITS), a0, a1, a2);
167
+ tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
168
}
169
170
static const TCGOutOpBinary outop_eqv = {
171
--
172
2.43.0
173
174
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/tcg-op.c | 4 ++--
16
tcg/tcg.c | 8 ++++----
17
tcg/tci.c | 2 --
18
tcg/aarch64/tcg-target.c.inc | 4 ++++
19
tcg/arm/tcg-target.c.inc | 4 ++++
20
tcg/i386/tcg-target.c.inc | 4 ++++
21
tcg/loongarch64/tcg-target.c.inc | 4 ++++
22
tcg/mips/tcg-target.c.inc | 4 ++++
23
tcg/ppc/tcg-target.c.inc | 17 +++++++++++------
24
tcg/riscv/tcg-target.c.inc | 4 ++++
25
tcg/s390x/tcg-target.c.inc | 24 ++++++++++++++++--------
26
tcg/sparc64/tcg-target.c.inc | 4 ++++
27
tcg/tci/tcg-target.c.inc | 14 +++++++++++---
28
24 files changed, 72 insertions(+), 45 deletions(-)
29
1
30
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target-has.h
33
+++ b/tcg/aarch64/tcg-target-has.h
34
@@ -XXX,XX +XXX,XX @@
35
#define TCG_TARGET_HAS_bswap32_i32 1
36
#define TCG_TARGET_HAS_not_i32 1
37
#define TCG_TARGET_HAS_rot_i32 1
38
-#define TCG_TARGET_HAS_nand_i32 0
39
#define TCG_TARGET_HAS_nor_i32 0
40
#define TCG_TARGET_HAS_clz_i32 1
41
#define TCG_TARGET_HAS_ctz_i32 1
42
@@ -XXX,XX +XXX,XX @@
43
#define TCG_TARGET_HAS_bswap64_i64 1
44
#define TCG_TARGET_HAS_not_i64 1
45
#define TCG_TARGET_HAS_rot_i64 1
46
-#define TCG_TARGET_HAS_nand_i64 0
47
#define TCG_TARGET_HAS_nor_i64 0
48
#define TCG_TARGET_HAS_clz_i64 1
49
#define TCG_TARGET_HAS_ctz_i64 1
50
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target-has.h
53
+++ b/tcg/arm/tcg-target-has.h
54
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
55
#define TCG_TARGET_HAS_bswap32_i32 1
56
#define TCG_TARGET_HAS_not_i32 1
57
#define TCG_TARGET_HAS_rot_i32 1
58
-#define TCG_TARGET_HAS_nand_i32 0
59
#define TCG_TARGET_HAS_nor_i32 0
60
#define TCG_TARGET_HAS_clz_i32 1
61
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
62
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target-has.h
65
+++ b/tcg/i386/tcg-target-has.h
66
@@ -XXX,XX +XXX,XX @@
67
#define TCG_TARGET_HAS_bswap16_i32 1
68
#define TCG_TARGET_HAS_bswap32_i32 1
69
#define TCG_TARGET_HAS_not_i32 1
70
-#define TCG_TARGET_HAS_nand_i32 0
71
#define TCG_TARGET_HAS_nor_i32 0
72
#define TCG_TARGET_HAS_clz_i32 1
73
#define TCG_TARGET_HAS_ctz_i32 1
74
@@ -XXX,XX +XXX,XX @@
75
#define TCG_TARGET_HAS_bswap32_i64 1
76
#define TCG_TARGET_HAS_bswap64_i64 1
77
#define TCG_TARGET_HAS_not_i64 1
78
-#define TCG_TARGET_HAS_nand_i64 0
79
#define TCG_TARGET_HAS_nor_i64 0
80
#define TCG_TARGET_HAS_clz_i64 1
81
#define TCG_TARGET_HAS_ctz_i64 1
82
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target-has.h
85
+++ b/tcg/loongarch64/tcg-target-has.h
86
@@ -XXX,XX +XXX,XX @@
87
#define TCG_TARGET_HAS_bswap16_i32 1
88
#define TCG_TARGET_HAS_bswap32_i32 1
89
#define TCG_TARGET_HAS_not_i32 1
90
-#define TCG_TARGET_HAS_nand_i32 0
91
#define TCG_TARGET_HAS_nor_i32 1
92
#define TCG_TARGET_HAS_clz_i32 1
93
#define TCG_TARGET_HAS_ctz_i32 1
94
@@ -XXX,XX +XXX,XX @@
95
#define TCG_TARGET_HAS_bswap32_i64 1
96
#define TCG_TARGET_HAS_bswap64_i64 1
97
#define TCG_TARGET_HAS_not_i64 1
98
-#define TCG_TARGET_HAS_nand_i64 0
99
#define TCG_TARGET_HAS_nor_i64 1
100
#define TCG_TARGET_HAS_clz_i64 1
101
#define TCG_TARGET_HAS_ctz_i64 1
102
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/mips/tcg-target-has.h
105
+++ b/tcg/mips/tcg-target-has.h
106
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
107
#define TCG_TARGET_HAS_rem_i32 1
108
#define TCG_TARGET_HAS_not_i32 1
109
#define TCG_TARGET_HAS_nor_i32 1
110
-#define TCG_TARGET_HAS_nand_i32 0
111
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
112
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
113
#define TCG_TARGET_HAS_muluh_i32 1
114
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
115
#define TCG_TARGET_HAS_rem_i64 1
116
#define TCG_TARGET_HAS_not_i64 1
117
#define TCG_TARGET_HAS_nor_i64 1
118
-#define TCG_TARGET_HAS_nand_i64 0
119
#define TCG_TARGET_HAS_add2_i64 0
120
#define TCG_TARGET_HAS_sub2_i64 0
121
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
122
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/ppc/tcg-target-has.h
125
+++ b/tcg/ppc/tcg-target-has.h
126
@@ -XXX,XX +XXX,XX @@
127
#define TCG_TARGET_HAS_bswap16_i32 1
128
#define TCG_TARGET_HAS_bswap32_i32 1
129
#define TCG_TARGET_HAS_not_i32 1
130
-#define TCG_TARGET_HAS_nand_i32 1
131
#define TCG_TARGET_HAS_nor_i32 1
132
#define TCG_TARGET_HAS_clz_i32 1
133
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
134
@@ -XXX,XX +XXX,XX @@
135
#define TCG_TARGET_HAS_bswap32_i64 1
136
#define TCG_TARGET_HAS_bswap64_i64 1
137
#define TCG_TARGET_HAS_not_i64 1
138
-#define TCG_TARGET_HAS_nand_i64 1
139
#define TCG_TARGET_HAS_nor_i64 1
140
#define TCG_TARGET_HAS_clz_i64 1
141
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
142
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/riscv/tcg-target-has.h
145
+++ b/tcg/riscv/tcg-target-has.h
146
@@ -XXX,XX +XXX,XX @@
147
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
148
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
149
#define TCG_TARGET_HAS_not_i32 1
150
-#define TCG_TARGET_HAS_nand_i32 0
151
#define TCG_TARGET_HAS_nor_i32 0
152
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
154
@@ -XXX,XX +XXX,XX @@
155
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
156
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_not_i64 1
158
-#define TCG_TARGET_HAS_nand_i64 0
159
#define TCG_TARGET_HAS_nor_i64 0
160
#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
161
#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
162
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/s390x/tcg-target-has.h
165
+++ b/tcg/s390x/tcg-target-has.h
166
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
167
#define TCG_TARGET_HAS_bswap16_i32 1
168
#define TCG_TARGET_HAS_bswap32_i32 1
169
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
170
-#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
171
#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
172
#define TCG_TARGET_HAS_clz_i32 0
173
#define TCG_TARGET_HAS_ctz_i32 0
174
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
175
#define TCG_TARGET_HAS_bswap32_i64 1
176
#define TCG_TARGET_HAS_bswap64_i64 1
177
#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
178
-#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
179
#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
180
#define TCG_TARGET_HAS_clz_i64 1
181
#define TCG_TARGET_HAS_ctz_i64 0
182
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/sparc64/tcg-target-has.h
185
+++ b/tcg/sparc64/tcg-target-has.h
186
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
187
#define TCG_TARGET_HAS_bswap16_i32 0
188
#define TCG_TARGET_HAS_bswap32_i32 0
189
#define TCG_TARGET_HAS_not_i32 1
190
-#define TCG_TARGET_HAS_nand_i32 0
191
#define TCG_TARGET_HAS_nor_i32 0
192
#define TCG_TARGET_HAS_clz_i32 0
193
#define TCG_TARGET_HAS_ctz_i32 0
194
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
195
#define TCG_TARGET_HAS_bswap32_i64 0
196
#define TCG_TARGET_HAS_bswap64_i64 0
197
#define TCG_TARGET_HAS_not_i64 1
198
-#define TCG_TARGET_HAS_nand_i64 0
199
#define TCG_TARGET_HAS_nor_i64 0
200
#define TCG_TARGET_HAS_clz_i64 0
201
#define TCG_TARGET_HAS_ctz_i64 0
202
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/tcg-has.h
205
+++ b/tcg/tcg-has.h
206
@@ -XXX,XX +XXX,XX @@
207
#define TCG_TARGET_HAS_bswap32_i64 0
208
#define TCG_TARGET_HAS_bswap64_i64 0
209
#define TCG_TARGET_HAS_not_i64 0
210
-#define TCG_TARGET_HAS_nand_i64 0
211
#define TCG_TARGET_HAS_nor_i64 0
212
#define TCG_TARGET_HAS_clz_i64 0
213
#define TCG_TARGET_HAS_ctz_i64 0
214
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tci/tcg-target-has.h
217
+++ b/tcg/tci/tcg-target-has.h
218
@@ -XXX,XX +XXX,XX @@
219
#define TCG_TARGET_HAS_div_i32 1
220
#define TCG_TARGET_HAS_rem_i32 1
221
#define TCG_TARGET_HAS_extract2_i32 0
222
-#define TCG_TARGET_HAS_nand_i32 1
223
#define TCG_TARGET_HAS_nor_i32 1
224
#define TCG_TARGET_HAS_clz_i32 1
225
#define TCG_TARGET_HAS_ctz_i32 1
226
@@ -XXX,XX +XXX,XX @@
227
#define TCG_TARGET_HAS_extract2_i64 0
228
#define TCG_TARGET_HAS_div_i64 1
229
#define TCG_TARGET_HAS_rem_i64 1
230
-#define TCG_TARGET_HAS_nand_i64 1
231
#define TCG_TARGET_HAS_nor_i64 1
232
#define TCG_TARGET_HAS_clz_i64 1
233
#define TCG_TARGET_HAS_ctz_i64 1
234
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/tcg/tcg-op.c
237
+++ b/tcg/tcg-op.c
238
@@ -XXX,XX +XXX,XX @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
239
240
void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
241
{
242
- if (TCG_TARGET_HAS_nand_i32) {
243
+ if (tcg_op_supported(INDEX_op_nand_i32, TCG_TYPE_I32, 0)) {
244
tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
245
} else {
246
tcg_gen_and_i32(ret, arg1, arg2);
247
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
248
if (TCG_TARGET_REG_BITS == 32) {
249
tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
250
tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
251
- } else if (TCG_TARGET_HAS_nand_i64) {
252
+ } else if (tcg_op_supported(INDEX_op_nand_i64, TCG_TYPE_I64, 0)) {
253
tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
254
} else {
255
tcg_gen_and_i64(ret, arg1, arg2);
256
diff --git a/tcg/tcg.c b/tcg/tcg.c
257
index XXXXXXX..XXXXXXX 100644
258
--- a/tcg/tcg.c
259
+++ b/tcg/tcg.c
260
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
261
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
262
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
263
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
264
+ OUTOP(INDEX_op_nand_i32, TCGOutOpBinary, outop_nand),
265
+ OUTOP(INDEX_op_nand_i64, TCGOutOpBinary, outop_nand),
266
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
267
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
268
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
269
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
270
return TCG_TARGET_HAS_bswap32_i32;
271
case INDEX_op_not_i32:
272
return TCG_TARGET_HAS_not_i32;
273
- case INDEX_op_nand_i32:
274
- return TCG_TARGET_HAS_nand_i32;
275
case INDEX_op_nor_i32:
276
return TCG_TARGET_HAS_nor_i32;
277
case INDEX_op_clz_i32:
278
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
279
return TCG_TARGET_HAS_bswap64_i64;
280
case INDEX_op_not_i64:
281
return TCG_TARGET_HAS_not_i64;
282
- case INDEX_op_nand_i64:
283
- return TCG_TARGET_HAS_nand_i64;
284
case INDEX_op_nor_i64:
285
return TCG_TARGET_HAS_nor_i64;
286
case INDEX_op_clz_i64:
287
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
288
case INDEX_op_and:
289
case INDEX_op_andc:
290
case INDEX_op_eqv:
291
+ case INDEX_op_nand_i32:
292
+ case INDEX_op_nand_i64:
293
case INDEX_op_or:
294
case INDEX_op_orc:
295
case INDEX_op_xor:
296
diff --git a/tcg/tci.c b/tcg/tci.c
297
index XXXXXXX..XXXXXXX 100644
298
--- a/tcg/tci.c
299
+++ b/tcg/tci.c
300
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
301
tci_args_rrr(insn, &r0, &r1, &r2);
302
regs[r0] = ~(regs[r1] ^ regs[r2]);
303
break;
304
-#if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
305
CASE_32_64(nand)
306
tci_args_rrr(insn, &r0, &r1, &r2);
307
regs[r0] = ~(regs[r1] & regs[r2]);
308
break;
309
-#endif
310
#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
311
CASE_32_64(nor)
312
tci_args_rrr(insn, &r0, &r1, &r2);
313
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
314
index XXXXXXX..XXXXXXX 100644
315
--- a/tcg/aarch64/tcg-target.c.inc
316
+++ b/tcg/aarch64/tcg-target.c.inc
317
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
318
.out_rrr = tgen_eqv,
319
};
320
321
+static const TCGOutOpBinary outop_nand = {
322
+ .base.static_constraint = C_NotImplemented,
323
+};
324
+
325
static void tgen_or(TCGContext *s, TCGType type,
326
TCGReg a0, TCGReg a1, TCGReg a2)
327
{
328
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
329
index XXXXXXX..XXXXXXX 100644
330
--- a/tcg/arm/tcg-target.c.inc
331
+++ b/tcg/arm/tcg-target.c.inc
332
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
333
.base.static_constraint = C_NotImplemented,
334
};
335
336
+static const TCGOutOpBinary outop_nand = {
337
+ .base.static_constraint = C_NotImplemented,
338
+};
339
+
340
static void tgen_or(TCGContext *s, TCGType type,
341
TCGReg a0, TCGReg a1, TCGReg a2)
342
{
343
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/i386/tcg-target.c.inc
346
+++ b/tcg/i386/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
348
.base.static_constraint = C_NotImplemented,
349
};
350
351
+static const TCGOutOpBinary outop_nand = {
352
+ .base.static_constraint = C_NotImplemented,
353
+};
354
+
355
static void tgen_or(TCGContext *s, TCGType type,
356
TCGReg a0, TCGReg a1, TCGReg a2)
357
{
358
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
359
index XXXXXXX..XXXXXXX 100644
360
--- a/tcg/loongarch64/tcg-target.c.inc
361
+++ b/tcg/loongarch64/tcg-target.c.inc
362
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
363
.base.static_constraint = C_NotImplemented,
364
};
365
366
+static const TCGOutOpBinary outop_nand = {
367
+ .base.static_constraint = C_NotImplemented,
368
+};
369
+
370
static void tgen_or(TCGContext *s, TCGType type,
371
TCGReg a0, TCGReg a1, TCGReg a2)
372
{
373
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
374
index XXXXXXX..XXXXXXX 100644
375
--- a/tcg/mips/tcg-target.c.inc
376
+++ b/tcg/mips/tcg-target.c.inc
377
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
378
.base.static_constraint = C_NotImplemented,
379
};
380
381
+static const TCGOutOpBinary outop_nand = {
382
+ .base.static_constraint = C_NotImplemented,
383
+};
384
+
385
static void tgen_or(TCGContext *s, TCGType type,
386
TCGReg a0, TCGReg a1, TCGReg a2)
387
{
388
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
389
index XXXXXXX..XXXXXXX 100644
390
--- a/tcg/ppc/tcg-target.c.inc
391
+++ b/tcg/ppc/tcg-target.c.inc
392
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
393
.out_rrr = tgen_eqv,
394
};
395
396
+static void tgen_nand(TCGContext *s, TCGType type,
397
+ TCGReg a0, TCGReg a1, TCGReg a2)
398
+{
399
+ tcg_out32(s, NAND | SAB(a1, a0, a2));
400
+}
401
+
402
+static const TCGOutOpBinary outop_nand = {
403
+ .base.static_constraint = C_O1_I2(r, r, r),
404
+ .out_rrr = tgen_nand,
405
+};
406
+
407
static void tgen_or(TCGContext *s, TCGType type,
408
TCGReg a0, TCGReg a1, TCGReg a2)
409
{
410
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
411
}
412
break;
413
414
- case INDEX_op_nand_i32:
415
- case INDEX_op_nand_i64:
416
- tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
417
- break;
418
case INDEX_op_nor_i32:
419
case INDEX_op_nor_i64:
420
tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
421
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
422
case INDEX_op_divu_i32:
423
case INDEX_op_rem_i32:
424
case INDEX_op_remu_i32:
425
- case INDEX_op_nand_i32:
426
case INDEX_op_nor_i32:
427
case INDEX_op_muluh_i32:
428
case INDEX_op_mulsh_i32:
429
- case INDEX_op_nand_i64:
430
case INDEX_op_nor_i64:
431
case INDEX_op_div_i64:
432
case INDEX_op_divu_i64:
433
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
434
index XXXXXXX..XXXXXXX 100644
435
--- a/tcg/riscv/tcg-target.c.inc
436
+++ b/tcg/riscv/tcg-target.c.inc
437
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
438
.out_rrr = tgen_eqv,
439
};
440
441
+static const TCGOutOpBinary outop_nand = {
442
+ .base.static_constraint = C_NotImplemented,
443
+};
444
+
445
static void tgen_or(TCGContext *s, TCGType type,
446
TCGReg a0, TCGReg a1, TCGReg a2)
447
{
448
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
449
index XXXXXXX..XXXXXXX 100644
450
--- a/tcg/s390x/tcg-target.c.inc
451
+++ b/tcg/s390x/tcg-target.c.inc
452
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
453
.out_rrr = tgen_eqv,
454
};
455
456
+static void tgen_nand(TCGContext *s, TCGType type,
457
+ TCGReg a0, TCGReg a1, TCGReg a2)
458
+{
459
+ if (type == TCG_TYPE_I32) {
460
+ tcg_out_insn(s, RRFa, NNRK, a0, a1, a2);
461
+ } else {
462
+ tcg_out_insn(s, RRFa, NNGRK, a0, a1, a2);
463
+ }
464
+}
465
+
466
+static const TCGOutOpBinary outop_nand = {
467
+ .base.static_constraint = C_Dynamic,
468
+ .base.dynamic_constraint = cset_misc3_rrr,
469
+ .out_rrr = tgen_nand,
470
+};
471
+
472
static void tgen_or(TCGContext *s, TCGType type,
473
TCGReg a0, TCGReg a1, TCGReg a2)
474
{
475
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
476
}
477
break;
478
479
- case INDEX_op_nand_i32:
480
- tcg_out_insn(s, RRFa, NNRK, args[0], args[1], args[2]);
481
- break;
482
case INDEX_op_nor_i32:
483
tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
484
break;
485
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
486
}
487
break;
488
489
- case INDEX_op_nand_i64:
490
- tcg_out_insn(s, RRFa, NNGRK, args[0], args[1], args[2]);
491
- break;
492
case INDEX_op_nor_i64:
493
tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
494
break;
495
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
496
case INDEX_op_sub_i64:
497
return C_O1_I2(r, r, ri);
498
499
- case INDEX_op_nand_i32:
500
- case INDEX_op_nand_i64:
501
case INDEX_op_nor_i32:
502
case INDEX_op_nor_i64:
503
return C_O1_I2(r, r, r);
504
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
505
index XXXXXXX..XXXXXXX 100644
506
--- a/tcg/sparc64/tcg-target.c.inc
507
+++ b/tcg/sparc64/tcg-target.c.inc
508
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
509
.base.static_constraint = C_NotImplemented,
510
};
511
512
+static const TCGOutOpBinary outop_nand = {
513
+ .base.static_constraint = C_NotImplemented,
514
+};
515
+
516
static void tgen_or(TCGContext *s, TCGType type,
517
TCGReg a0, TCGReg a1, TCGReg a2)
518
{
519
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
520
index XXXXXXX..XXXXXXX 100644
521
--- a/tcg/tci/tcg-target.c.inc
522
+++ b/tcg/tci/tcg-target.c.inc
523
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
524
case INDEX_op_sub_i64:
525
case INDEX_op_mul_i32:
526
case INDEX_op_mul_i64:
527
- case INDEX_op_nand_i32:
528
- case INDEX_op_nand_i64:
529
case INDEX_op_nor_i32:
530
case INDEX_op_nor_i64:
531
case INDEX_op_shl_i32:
532
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
533
.out_rrr = tgen_eqv,
534
};
535
536
+static void tgen_nand(TCGContext *s, TCGType type,
537
+ TCGReg a0, TCGReg a1, TCGReg a2)
538
+{
539
+ tcg_out_op_rrr(s, glue(INDEX_op_nand_i,TCG_TARGET_REG_BITS), a0, a1, a2);
540
+}
541
+
542
+static const TCGOutOpBinary outop_nand = {
543
+ .base.static_constraint = C_O1_I2(r, r, r),
544
+ .out_rrr = tgen_nand,
545
+};
546
+
547
static void tgen_or(TCGContext *s, TCGType type,
548
TCGReg a0, TCGReg a1, TCGReg a2)
549
{
550
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
551
552
CASE_32_64(sub)
553
CASE_32_64(mul)
554
- CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
555
CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
556
CASE_32_64(shl)
557
CASE_32_64(shr)
558
--
559
2.43.0
560
561
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 6 ++++--
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 15 insertions(+), 17 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
21
+DEF(nand, 1, 2, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
DEF(xor, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i32, 1, 1, 1, 0)
26
DEF(bswap32_i32, 1, 1, 1, 0)
27
DEF(not_i32, 1, 1, 0, 0)
28
DEF(neg_i32, 1, 1, 0, 0)
29
-DEF(nand_i32, 1, 2, 0, 0)
30
DEF(nor_i32, 1, 2, 0, 0)
31
DEF(clz_i32, 1, 2, 0, 0)
32
DEF(ctz_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(bswap32_i64, 1, 1, 1, 0)
34
DEF(bswap64_i64, 1, 1, 1, 0)
35
DEF(not_i64, 1, 1, 0, 0)
36
DEF(neg_i64, 1, 1, 0, 0)
37
-DEF(nand_i64, 1, 2, 0, 0)
38
DEF(nor_i64, 1, 2, 0, 0)
39
DEF(clz_i64, 1, 2, 0, 0)
40
DEF(ctz_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_eqv_vec:
47
return ~(x ^ y);
48
49
- CASE_OP_32_64_VEC(nand):
50
+ case INDEX_op_nand:
51
+ case INDEX_op_nand_vec:
52
return ~(x & y);
53
54
CASE_OP_32_64_VEC(nor):
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
CASE_OP_32_64(mulu2):
57
done = fold_multiply2(&ctx, op);
58
break;
59
- CASE_OP_32_64_VEC(nand):
60
+ case INDEX_op_nand:
61
+ case INDEX_op_nand_vec:
62
done = fold_nand(&ctx, op);
63
break;
64
CASE_OP_32_64(neg):
65
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/tcg-op.c
68
+++ b/tcg/tcg-op.c
69
@@ -XXX,XX +XXX,XX @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
70
71
void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
72
{
73
- if (tcg_op_supported(INDEX_op_nand_i32, TCG_TYPE_I32, 0)) {
74
- tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2);
75
+ if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I32, 0)) {
76
+ tcg_gen_op3_i32(INDEX_op_nand, ret, arg1, arg2);
77
} else {
78
tcg_gen_and_i32(ret, arg1, arg2);
79
tcg_gen_not_i32(ret, ret);
80
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
81
if (TCG_TARGET_REG_BITS == 32) {
82
tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
83
tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
84
- } else if (tcg_op_supported(INDEX_op_nand_i64, TCG_TYPE_I64, 0)) {
85
- tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2);
86
+ } else if (tcg_op_supported(INDEX_op_nand, TCG_TYPE_I64, 0)) {
87
+ tcg_gen_op3_i64(INDEX_op_nand, ret, arg1, arg2);
88
} else {
89
tcg_gen_and_i64(ret, arg1, arg2);
90
tcg_gen_not_i64(ret, ret);
91
diff --git a/tcg/tcg.c b/tcg/tcg.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/tcg.c
94
+++ b/tcg/tcg.c
95
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
96
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
97
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
98
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
99
- OUTOP(INDEX_op_nand_i32, TCGOutOpBinary, outop_nand),
100
- OUTOP(INDEX_op_nand_i64, TCGOutOpBinary, outop_nand),
101
+ OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
102
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
103
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
104
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
105
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
106
case INDEX_op_and:
107
case INDEX_op_andc:
108
case INDEX_op_eqv:
109
- case INDEX_op_nand_i32:
110
- case INDEX_op_nand_i64:
111
+ case INDEX_op_nand:
112
case INDEX_op_or:
113
case INDEX_op_orc:
114
case INDEX_op_xor:
115
diff --git a/tcg/tci.c b/tcg/tci.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/tci.c
118
+++ b/tcg/tci.c
119
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
tci_args_rrr(insn, &r0, &r1, &r2);
121
regs[r0] = ~(regs[r1] ^ regs[r2]);
122
break;
123
- CASE_32_64(nand)
124
+ case INDEX_op_nand:
125
tci_args_rrr(insn, &r0, &r1, &r2);
126
regs[r0] = ~(regs[r1] & regs[r2]);
127
break;
128
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
129
case INDEX_op_and:
130
case INDEX_op_andc:
131
case INDEX_op_eqv:
132
+ case INDEX_op_nand:
133
case INDEX_op_or:
134
case INDEX_op_orc:
135
case INDEX_op_xor:
136
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
137
case INDEX_op_sub_i64:
138
case INDEX_op_mul_i32:
139
case INDEX_op_mul_i64:
140
- case INDEX_op_nand_i32:
141
- case INDEX_op_nand_i64:
142
case INDEX_op_nor_i32:
143
case INDEX_op_nor_i64:
144
case INDEX_op_div_i32:
145
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
146
index XXXXXXX..XXXXXXX 100644
147
--- a/docs/devel/tcg-ops.rst
148
+++ b/docs/devel/tcg-ops.rst
149
@@ -XXX,XX +XXX,XX @@ Logical
150
151
- | *t0* = ~(*t1* ^ *t2*), or equivalently, *t0* = *t1* ^ ~\ *t2*
152
153
- * - nand_i32/i64 *t0*, *t1*, *t2*
154
+ * - nand *t0*, *t1*, *t2*
155
156
- | *t0* = ~(*t1* & *t2*)
157
158
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/tci/tcg-target.c.inc
161
+++ b/tcg/tci/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
163
static void tgen_nand(TCGContext *s, TCGType type,
164
TCGReg a0, TCGReg a1, TCGReg a2)
165
{
166
- tcg_out_op_rrr(s, glue(INDEX_op_nand_i,TCG_TARGET_REG_BITS), a0, a1, a2);
167
+ tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
168
}
169
170
static const TCGOutOpBinary outop_nand = {
171
--
172
2.43.0
173
174
diff view generated by jsdifflib
Deleted patch
1
The instruction set does not implement nor with immediate.
2
There is no reason to pretend that we do.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/loongarch64/tcg-target.c.inc | 10 ++--------
8
1 file changed, 2 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/loongarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
15
16
case INDEX_op_nor_i32:
17
case INDEX_op_nor_i64:
18
- if (c2) {
19
- tcg_out_opc_ori(s, a0, a1, a2);
20
- tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
21
- } else {
22
- tcg_out_opc_nor(s, a0, a1, a2);
23
- }
24
+ tcg_out_opc_nor(s, a0, a1, a2);
25
break;
26
27
case INDEX_op_extract_i32:
28
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
29
30
case INDEX_op_nor_i32:
31
case INDEX_op_nor_i64:
32
- /* LoongArch reg-imm bitops have their imms ZERO-extended */
33
- return C_O1_I2(r, r, rU);
34
+ return C_O1_I2(r, r, r);
35
36
case INDEX_op_clz_i32:
37
case INDEX_op_clz_i64:
38
--
39
2.43.0
40
41
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/tcg-op.c | 4 ++--
16
tcg/tcg.c | 8 ++++----
17
tcg/tci.c | 2 --
18
tcg/aarch64/tcg-target.c.inc | 4 ++++
19
tcg/arm/tcg-target.c.inc | 4 ++++
20
tcg/i386/tcg-target.c.inc | 4 ++++
21
tcg/loongarch64/tcg-target.c.inc | 20 +++++++++++---------
22
tcg/mips/tcg-target.c.inc | 17 +++++++++++------
23
tcg/ppc/tcg-target.c.inc | 18 +++++++++++-------
24
tcg/riscv/tcg-target.c.inc | 4 ++++
25
tcg/s390x/tcg-target.c.inc | 28 ++++++++++++++++------------
26
tcg/sparc64/tcg-target.c.inc | 4 ++++
27
tcg/tci/tcg-target.c.inc | 14 +++++++++++---
28
24 files changed, 86 insertions(+), 65 deletions(-)
29
1
30
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target-has.h
33
+++ b/tcg/aarch64/tcg-target-has.h
34
@@ -XXX,XX +XXX,XX @@
35
#define TCG_TARGET_HAS_bswap32_i32 1
36
#define TCG_TARGET_HAS_not_i32 1
37
#define TCG_TARGET_HAS_rot_i32 1
38
-#define TCG_TARGET_HAS_nor_i32 0
39
#define TCG_TARGET_HAS_clz_i32 1
40
#define TCG_TARGET_HAS_ctz_i32 1
41
#define TCG_TARGET_HAS_ctpop_i32 0
42
@@ -XXX,XX +XXX,XX @@
43
#define TCG_TARGET_HAS_bswap64_i64 1
44
#define TCG_TARGET_HAS_not_i64 1
45
#define TCG_TARGET_HAS_rot_i64 1
46
-#define TCG_TARGET_HAS_nor_i64 0
47
#define TCG_TARGET_HAS_clz_i64 1
48
#define TCG_TARGET_HAS_ctz_i64 1
49
#define TCG_TARGET_HAS_ctpop_i64 0
50
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target-has.h
53
+++ b/tcg/arm/tcg-target-has.h
54
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
55
#define TCG_TARGET_HAS_bswap32_i32 1
56
#define TCG_TARGET_HAS_not_i32 1
57
#define TCG_TARGET_HAS_rot_i32 1
58
-#define TCG_TARGET_HAS_nor_i32 0
59
#define TCG_TARGET_HAS_clz_i32 1
60
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
61
#define TCG_TARGET_HAS_ctpop_i32 0
62
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target-has.h
65
+++ b/tcg/i386/tcg-target-has.h
66
@@ -XXX,XX +XXX,XX @@
67
#define TCG_TARGET_HAS_bswap16_i32 1
68
#define TCG_TARGET_HAS_bswap32_i32 1
69
#define TCG_TARGET_HAS_not_i32 1
70
-#define TCG_TARGET_HAS_nor_i32 0
71
#define TCG_TARGET_HAS_clz_i32 1
72
#define TCG_TARGET_HAS_ctz_i32 1
73
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
74
@@ -XXX,XX +XXX,XX @@
75
#define TCG_TARGET_HAS_bswap32_i64 1
76
#define TCG_TARGET_HAS_bswap64_i64 1
77
#define TCG_TARGET_HAS_not_i64 1
78
-#define TCG_TARGET_HAS_nor_i64 0
79
#define TCG_TARGET_HAS_clz_i64 1
80
#define TCG_TARGET_HAS_ctz_i64 1
81
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
82
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target-has.h
85
+++ b/tcg/loongarch64/tcg-target-has.h
86
@@ -XXX,XX +XXX,XX @@
87
#define TCG_TARGET_HAS_bswap16_i32 1
88
#define TCG_TARGET_HAS_bswap32_i32 1
89
#define TCG_TARGET_HAS_not_i32 1
90
-#define TCG_TARGET_HAS_nor_i32 1
91
#define TCG_TARGET_HAS_clz_i32 1
92
#define TCG_TARGET_HAS_ctz_i32 1
93
#define TCG_TARGET_HAS_ctpop_i32 0
94
@@ -XXX,XX +XXX,XX @@
95
#define TCG_TARGET_HAS_bswap32_i64 1
96
#define TCG_TARGET_HAS_bswap64_i64 1
97
#define TCG_TARGET_HAS_not_i64 1
98
-#define TCG_TARGET_HAS_nor_i64 1
99
#define TCG_TARGET_HAS_clz_i64 1
100
#define TCG_TARGET_HAS_ctz_i64 1
101
#define TCG_TARGET_HAS_ctpop_i64 0
102
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/mips/tcg-target-has.h
105
+++ b/tcg/mips/tcg-target-has.h
106
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
107
#define TCG_TARGET_HAS_div_i32 1
108
#define TCG_TARGET_HAS_rem_i32 1
109
#define TCG_TARGET_HAS_not_i32 1
110
-#define TCG_TARGET_HAS_nor_i32 1
111
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
112
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
113
#define TCG_TARGET_HAS_muluh_i32 1
114
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
115
#define TCG_TARGET_HAS_div_i64 1
116
#define TCG_TARGET_HAS_rem_i64 1
117
#define TCG_TARGET_HAS_not_i64 1
118
-#define TCG_TARGET_HAS_nor_i64 1
119
#define TCG_TARGET_HAS_add2_i64 0
120
#define TCG_TARGET_HAS_sub2_i64 0
121
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
122
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/ppc/tcg-target-has.h
125
+++ b/tcg/ppc/tcg-target-has.h
126
@@ -XXX,XX +XXX,XX @@
127
#define TCG_TARGET_HAS_bswap16_i32 1
128
#define TCG_TARGET_HAS_bswap32_i32 1
129
#define TCG_TARGET_HAS_not_i32 1
130
-#define TCG_TARGET_HAS_nor_i32 1
131
#define TCG_TARGET_HAS_clz_i32 1
132
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
133
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
134
@@ -XXX,XX +XXX,XX @@
135
#define TCG_TARGET_HAS_bswap32_i64 1
136
#define TCG_TARGET_HAS_bswap64_i64 1
137
#define TCG_TARGET_HAS_not_i64 1
138
-#define TCG_TARGET_HAS_nor_i64 1
139
#define TCG_TARGET_HAS_clz_i64 1
140
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
141
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
142
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/riscv/tcg-target-has.h
145
+++ b/tcg/riscv/tcg-target-has.h
146
@@ -XXX,XX +XXX,XX @@
147
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
148
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
149
#define TCG_TARGET_HAS_not_i32 1
150
-#define TCG_TARGET_HAS_nor_i32 0
151
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
152
#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
154
@@ -XXX,XX +XXX,XX @@
155
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
156
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_not_i64 1
158
-#define TCG_TARGET_HAS_nor_i64 0
159
#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
160
#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
161
#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
162
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/s390x/tcg-target-has.h
165
+++ b/tcg/s390x/tcg-target-has.h
166
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
167
#define TCG_TARGET_HAS_bswap16_i32 1
168
#define TCG_TARGET_HAS_bswap32_i32 1
169
#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
170
-#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
171
#define TCG_TARGET_HAS_clz_i32 0
172
#define TCG_TARGET_HAS_ctz_i32 0
173
#define TCG_TARGET_HAS_ctpop_i32 1
174
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
175
#define TCG_TARGET_HAS_bswap32_i64 1
176
#define TCG_TARGET_HAS_bswap64_i64 1
177
#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
178
-#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
179
#define TCG_TARGET_HAS_clz_i64 1
180
#define TCG_TARGET_HAS_ctz_i64 0
181
#define TCG_TARGET_HAS_ctpop_i64 1
182
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/sparc64/tcg-target-has.h
185
+++ b/tcg/sparc64/tcg-target-has.h
186
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
187
#define TCG_TARGET_HAS_bswap16_i32 0
188
#define TCG_TARGET_HAS_bswap32_i32 0
189
#define TCG_TARGET_HAS_not_i32 1
190
-#define TCG_TARGET_HAS_nor_i32 0
191
#define TCG_TARGET_HAS_clz_i32 0
192
#define TCG_TARGET_HAS_ctz_i32 0
193
#define TCG_TARGET_HAS_ctpop_i32 0
194
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
195
#define TCG_TARGET_HAS_bswap32_i64 0
196
#define TCG_TARGET_HAS_bswap64_i64 0
197
#define TCG_TARGET_HAS_not_i64 1
198
-#define TCG_TARGET_HAS_nor_i64 0
199
#define TCG_TARGET_HAS_clz_i64 0
200
#define TCG_TARGET_HAS_ctz_i64 0
201
#define TCG_TARGET_HAS_ctpop_i64 0
202
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/tcg-has.h
205
+++ b/tcg/tcg-has.h
206
@@ -XXX,XX +XXX,XX @@
207
#define TCG_TARGET_HAS_bswap32_i64 0
208
#define TCG_TARGET_HAS_bswap64_i64 0
209
#define TCG_TARGET_HAS_not_i64 0
210
-#define TCG_TARGET_HAS_nor_i64 0
211
#define TCG_TARGET_HAS_clz_i64 0
212
#define TCG_TARGET_HAS_ctz_i64 0
213
#define TCG_TARGET_HAS_ctpop_i64 0
214
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tci/tcg-target-has.h
217
+++ b/tcg/tci/tcg-target-has.h
218
@@ -XXX,XX +XXX,XX @@
219
#define TCG_TARGET_HAS_div_i32 1
220
#define TCG_TARGET_HAS_rem_i32 1
221
#define TCG_TARGET_HAS_extract2_i32 0
222
-#define TCG_TARGET_HAS_nor_i32 1
223
#define TCG_TARGET_HAS_clz_i32 1
224
#define TCG_TARGET_HAS_ctz_i32 1
225
#define TCG_TARGET_HAS_ctpop_i32 1
226
@@ -XXX,XX +XXX,XX @@
227
#define TCG_TARGET_HAS_extract2_i64 0
228
#define TCG_TARGET_HAS_div_i64 1
229
#define TCG_TARGET_HAS_rem_i64 1
230
-#define TCG_TARGET_HAS_nor_i64 1
231
#define TCG_TARGET_HAS_clz_i64 1
232
#define TCG_TARGET_HAS_ctz_i64 1
233
#define TCG_TARGET_HAS_ctpop_i64 1
234
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/tcg/tcg-op.c
237
+++ b/tcg/tcg-op.c
238
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
239
240
void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
241
{
242
- if (TCG_TARGET_HAS_nor_i32) {
243
+ if (tcg_op_supported(INDEX_op_nor_i32, TCG_TYPE_I32, 0)) {
244
tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
245
} else {
246
tcg_gen_or_i32(ret, arg1, arg2);
247
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
248
if (TCG_TARGET_REG_BITS == 32) {
249
tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
250
tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
251
- } else if (TCG_TARGET_HAS_nor_i64) {
252
+ } else if (tcg_op_supported(INDEX_op_nor_i64, TCG_TYPE_I64, 0)) {
253
tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
254
} else {
255
tcg_gen_or_i64(ret, arg1, arg2);
256
diff --git a/tcg/tcg.c b/tcg/tcg.c
257
index XXXXXXX..XXXXXXX 100644
258
--- a/tcg/tcg.c
259
+++ b/tcg/tcg.c
260
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
261
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
262
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
263
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
264
+ OUTOP(INDEX_op_nor_i32, TCGOutOpBinary, outop_nor),
265
+ OUTOP(INDEX_op_nor_i64, TCGOutOpBinary, outop_nor),
266
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
267
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
268
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
269
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
270
return TCG_TARGET_HAS_bswap32_i32;
271
case INDEX_op_not_i32:
272
return TCG_TARGET_HAS_not_i32;
273
- case INDEX_op_nor_i32:
274
- return TCG_TARGET_HAS_nor_i32;
275
case INDEX_op_clz_i32:
276
return TCG_TARGET_HAS_clz_i32;
277
case INDEX_op_ctz_i32:
278
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
279
return TCG_TARGET_HAS_bswap64_i64;
280
case INDEX_op_not_i64:
281
return TCG_TARGET_HAS_not_i64;
282
- case INDEX_op_nor_i64:
283
- return TCG_TARGET_HAS_nor_i64;
284
case INDEX_op_clz_i64:
285
return TCG_TARGET_HAS_clz_i64;
286
case INDEX_op_ctz_i64:
287
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
288
case INDEX_op_andc:
289
case INDEX_op_eqv:
290
case INDEX_op_nand:
291
+ case INDEX_op_nor_i32:
292
+ case INDEX_op_nor_i64:
293
case INDEX_op_or:
294
case INDEX_op_orc:
295
case INDEX_op_xor:
296
diff --git a/tcg/tci.c b/tcg/tci.c
297
index XXXXXXX..XXXXXXX 100644
298
--- a/tcg/tci.c
299
+++ b/tcg/tci.c
300
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
301
tci_args_rrr(insn, &r0, &r1, &r2);
302
regs[r0] = ~(regs[r1] & regs[r2]);
303
break;
304
-#if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
305
CASE_32_64(nor)
306
tci_args_rrr(insn, &r0, &r1, &r2);
307
regs[r0] = ~(regs[r1] | regs[r2]);
308
break;
309
-#endif
310
311
/* Arithmetic operations (32 bit). */
312
313
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
314
index XXXXXXX..XXXXXXX 100644
315
--- a/tcg/aarch64/tcg-target.c.inc
316
+++ b/tcg/aarch64/tcg-target.c.inc
317
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
318
.base.static_constraint = C_NotImplemented,
319
};
320
321
+static const TCGOutOpBinary outop_nor = {
322
+ .base.static_constraint = C_NotImplemented,
323
+};
324
+
325
static void tgen_or(TCGContext *s, TCGType type,
326
TCGReg a0, TCGReg a1, TCGReg a2)
327
{
328
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
329
index XXXXXXX..XXXXXXX 100644
330
--- a/tcg/arm/tcg-target.c.inc
331
+++ b/tcg/arm/tcg-target.c.inc
332
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
333
.base.static_constraint = C_NotImplemented,
334
};
335
336
+static const TCGOutOpBinary outop_nor = {
337
+ .base.static_constraint = C_NotImplemented,
338
+};
339
+
340
static void tgen_or(TCGContext *s, TCGType type,
341
TCGReg a0, TCGReg a1, TCGReg a2)
342
{
343
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/i386/tcg-target.c.inc
346
+++ b/tcg/i386/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
348
.base.static_constraint = C_NotImplemented,
349
};
350
351
+static const TCGOutOpBinary outop_nor = {
352
+ .base.static_constraint = C_NotImplemented,
353
+};
354
+
355
static void tgen_or(TCGContext *s, TCGType type,
356
TCGReg a0, TCGReg a1, TCGReg a2)
357
{
358
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
359
index XXXXXXX..XXXXXXX 100644
360
--- a/tcg/loongarch64/tcg-target.c.inc
361
+++ b/tcg/loongarch64/tcg-target.c.inc
362
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
363
.base.static_constraint = C_NotImplemented,
364
};
365
366
+static void tgen_nor(TCGContext *s, TCGType type,
367
+ TCGReg a0, TCGReg a1, TCGReg a2)
368
+{
369
+ tcg_out_opc_nor(s, a0, a1, a2);
370
+}
371
+
372
+static const TCGOutOpBinary outop_nor = {
373
+ .base.static_constraint = C_O1_I2(r, r, r),
374
+ .out_rrr = tgen_nor,
375
+};
376
+
377
static void tgen_or(TCGContext *s, TCGType type,
378
TCGReg a0, TCGReg a1, TCGReg a2)
379
{
380
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
381
tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
382
break;
383
384
- case INDEX_op_nor_i32:
385
- case INDEX_op_nor_i64:
386
- tcg_out_opc_nor(s, a0, a1, a2);
387
- break;
388
-
389
case INDEX_op_extract_i32:
390
if (a2 == 0 && args[3] <= 12) {
391
tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
392
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
393
case INDEX_op_rotr_i64:
394
return C_O1_I2(r, r, ri);
395
396
- case INDEX_op_nor_i32:
397
- case INDEX_op_nor_i64:
398
- return C_O1_I2(r, r, r);
399
-
400
case INDEX_op_clz_i32:
401
case INDEX_op_clz_i64:
402
case INDEX_op_ctz_i32:
403
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
404
index XXXXXXX..XXXXXXX 100644
405
--- a/tcg/mips/tcg-target.c.inc
406
+++ b/tcg/mips/tcg-target.c.inc
407
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
408
.base.static_constraint = C_NotImplemented,
409
};
410
411
+static void tgen_nor(TCGContext *s, TCGType type,
412
+ TCGReg a0, TCGReg a1, TCGReg a2)
413
+{
414
+ tcg_out_opc_reg(s, OPC_NOR, a0, a1, a2);
415
+}
416
+
417
+static const TCGOutOpBinary outop_nor = {
418
+ .base.static_constraint = C_O1_I2(r, r, r),
419
+ .out_rrr = tgen_nor,
420
+};
421
+
422
static void tgen_or(TCGContext *s, TCGType type,
423
TCGReg a0, TCGReg a1, TCGReg a2)
424
{
425
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
426
break;
427
}
428
goto do_binaryv;
429
- case INDEX_op_nor_i32:
430
- case INDEX_op_nor_i64:
431
- i1 = OPC_NOR;
432
- goto do_binaryv;
433
434
case INDEX_op_mul_i32:
435
if (use_mips32_instructions) {
436
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
437
case INDEX_op_divu_i32:
438
case INDEX_op_rem_i32:
439
case INDEX_op_remu_i32:
440
- case INDEX_op_nor_i32:
441
case INDEX_op_setcond_i32:
442
case INDEX_op_mul_i64:
443
case INDEX_op_mulsh_i64:
444
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
445
case INDEX_op_divu_i64:
446
case INDEX_op_rem_i64:
447
case INDEX_op_remu_i64:
448
- case INDEX_op_nor_i64:
449
case INDEX_op_setcond_i64:
450
return C_O1_I2(r, rz, rz);
451
case INDEX_op_muls2_i32:
452
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
453
index XXXXXXX..XXXXXXX 100644
454
--- a/tcg/ppc/tcg-target.c.inc
455
+++ b/tcg/ppc/tcg-target.c.inc
456
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
457
.out_rrr = tgen_nand,
458
};
459
460
+static void tgen_nor(TCGContext *s, TCGType type,
461
+ TCGReg a0, TCGReg a1, TCGReg a2)
462
+{
463
+ tcg_out32(s, NOR | SAB(a1, a0, a2));
464
+}
465
+
466
+static const TCGOutOpBinary outop_nor = {
467
+ .base.static_constraint = C_O1_I2(r, r, r),
468
+ .out_rrr = tgen_nor,
469
+};
470
+
471
static void tgen_or(TCGContext *s, TCGType type,
472
TCGReg a0, TCGReg a1, TCGReg a2)
473
{
474
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
475
}
476
break;
477
478
- case INDEX_op_nor_i32:
479
- case INDEX_op_nor_i64:
480
- tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
481
- break;
482
-
483
case INDEX_op_clz_i32:
484
tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
485
args[2], const_args[2]);
486
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
487
case INDEX_op_divu_i32:
488
case INDEX_op_rem_i32:
489
case INDEX_op_remu_i32:
490
- case INDEX_op_nor_i32:
491
case INDEX_op_muluh_i32:
492
case INDEX_op_mulsh_i32:
493
- case INDEX_op_nor_i64:
494
case INDEX_op_div_i64:
495
case INDEX_op_divu_i64:
496
case INDEX_op_rem_i64:
497
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
498
index XXXXXXX..XXXXXXX 100644
499
--- a/tcg/riscv/tcg-target.c.inc
500
+++ b/tcg/riscv/tcg-target.c.inc
501
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
502
.base.static_constraint = C_NotImplemented,
503
};
504
505
+static const TCGOutOpBinary outop_nor = {
506
+ .base.static_constraint = C_NotImplemented,
507
+};
508
+
509
static void tgen_or(TCGContext *s, TCGType type,
510
TCGReg a0, TCGReg a1, TCGReg a2)
511
{
512
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
513
index XXXXXXX..XXXXXXX 100644
514
--- a/tcg/s390x/tcg-target.c.inc
515
+++ b/tcg/s390x/tcg-target.c.inc
516
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
517
.out_rrr = tgen_nand,
518
};
519
520
+static void tgen_nor(TCGContext *s, TCGType type,
521
+ TCGReg a0, TCGReg a1, TCGReg a2)
522
+{
523
+ if (type == TCG_TYPE_I32) {
524
+ tcg_out_insn(s, RRFa, NORK, a0, a1, a2);
525
+ } else {
526
+ tcg_out_insn(s, RRFa, NOGRK, a0, a1, a2);
527
+ }
528
+}
529
+
530
+static const TCGOutOpBinary outop_nor = {
531
+ .base.static_constraint = C_Dynamic,
532
+ .base.dynamic_constraint = cset_misc3_rrr,
533
+ .out_rrr = tgen_nor,
534
+};
535
+
536
static void tgen_or(TCGContext *s, TCGType type,
537
TCGReg a0, TCGReg a1, TCGReg a2)
538
{
539
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
540
}
541
break;
542
543
- case INDEX_op_nor_i32:
544
- tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[2]);
545
- break;
546
-
547
case INDEX_op_neg_i32:
548
tcg_out_insn(s, RR, LCR, args[0], args[1]);
549
break;
550
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
551
}
552
break;
553
554
- case INDEX_op_nor_i64:
555
- tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[2]);
556
- break;
557
-
558
case INDEX_op_neg_i64:
559
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
560
break;
561
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
562
case INDEX_op_sub_i64:
563
return C_O1_I2(r, r, ri);
564
565
- case INDEX_op_nor_i32:
566
- case INDEX_op_nor_i64:
567
- return C_O1_I2(r, r, r);
568
-
569
case INDEX_op_mul_i32:
570
return (HAVE_FACILITY(MISC_INSN_EXT2)
571
? C_O1_I2(r, r, ri)
572
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
573
index XXXXXXX..XXXXXXX 100644
574
--- a/tcg/sparc64/tcg-target.c.inc
575
+++ b/tcg/sparc64/tcg-target.c.inc
576
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
577
.base.static_constraint = C_NotImplemented,
578
};
579
580
+static const TCGOutOpBinary outop_nor = {
581
+ .base.static_constraint = C_NotImplemented,
582
+};
583
+
584
static void tgen_or(TCGContext *s, TCGType type,
585
TCGReg a0, TCGReg a1, TCGReg a2)
586
{
587
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
588
index XXXXXXX..XXXXXXX 100644
589
--- a/tcg/tci/tcg-target.c.inc
590
+++ b/tcg/tci/tcg-target.c.inc
591
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
592
case INDEX_op_sub_i64:
593
case INDEX_op_mul_i32:
594
case INDEX_op_mul_i64:
595
- case INDEX_op_nor_i32:
596
- case INDEX_op_nor_i64:
597
case INDEX_op_shl_i32:
598
case INDEX_op_shl_i64:
599
case INDEX_op_shr_i32:
600
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
601
.out_rrr = tgen_nand,
602
};
603
604
+static void tgen_nor(TCGContext *s, TCGType type,
605
+ TCGReg a0, TCGReg a1, TCGReg a2)
606
+{
607
+ tcg_out_op_rrr(s, glue(INDEX_op_nor_i,TCG_TARGET_REG_BITS), a0, a1, a2);
608
+}
609
+
610
+static const TCGOutOpBinary outop_nor = {
611
+ .base.static_constraint = C_O1_I2(r, r, r),
612
+ .out_rrr = tgen_nor,
613
+};
614
+
615
static void tgen_or(TCGContext *s, TCGType type,
616
TCGReg a0, TCGReg a1, TCGReg a2)
617
{
618
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
619
620
CASE_32_64(sub)
621
CASE_32_64(mul)
622
- CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
623
CASE_32_64(shl)
624
CASE_32_64(shr)
625
CASE_32_64(sar)
626
--
627
2.43.0
628
629
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 6 ++++--
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 15 insertions(+), 17 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
18
DEF(andc, 1, 2, 0, TCG_OPF_INT)
19
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
20
DEF(nand, 1, 2, 0, TCG_OPF_INT)
21
+DEF(nor, 1, 2, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
DEF(xor, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i32, 1, 1, 1, 0)
26
DEF(bswap32_i32, 1, 1, 1, 0)
27
DEF(not_i32, 1, 1, 0, 0)
28
DEF(neg_i32, 1, 1, 0, 0)
29
-DEF(nor_i32, 1, 2, 0, 0)
30
DEF(clz_i32, 1, 2, 0, 0)
31
DEF(ctz_i32, 1, 2, 0, 0)
32
DEF(ctpop_i32, 1, 1, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(bswap32_i64, 1, 1, 1, 0)
34
DEF(bswap64_i64, 1, 1, 1, 0)
35
DEF(not_i64, 1, 1, 0, 0)
36
DEF(neg_i64, 1, 1, 0, 0)
37
-DEF(nor_i64, 1, 2, 0, 0)
38
DEF(clz_i64, 1, 2, 0, 0)
39
DEF(ctz_i64, 1, 2, 0, 0)
40
DEF(ctpop_i64, 1, 1, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_nand_vec:
47
return ~(x & y);
48
49
- CASE_OP_32_64_VEC(nor):
50
+ case INDEX_op_nor:
51
+ case INDEX_op_nor_vec:
52
return ~(x | y);
53
54
case INDEX_op_clz_i32:
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
CASE_OP_32_64(neg):
57
done = fold_neg(&ctx, op);
58
break;
59
- CASE_OP_32_64_VEC(nor):
60
+ case INDEX_op_nor:
61
+ case INDEX_op_nor_vec:
62
done = fold_nor(&ctx, op);
63
break;
64
CASE_OP_32_64_VEC(not):
65
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/tcg-op.c
68
+++ b/tcg/tcg-op.c
69
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
70
71
void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
72
{
73
- if (tcg_op_supported(INDEX_op_nor_i32, TCG_TYPE_I32, 0)) {
74
- tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2);
75
+ if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I32, 0)) {
76
+ tcg_gen_op3_i32(INDEX_op_nor, ret, arg1, arg2);
77
} else {
78
tcg_gen_or_i32(ret, arg1, arg2);
79
tcg_gen_not_i32(ret, ret);
80
@@ -XXX,XX +XXX,XX @@ void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
81
if (TCG_TARGET_REG_BITS == 32) {
82
tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2));
83
tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2));
84
- } else if (tcg_op_supported(INDEX_op_nor_i64, TCG_TYPE_I64, 0)) {
85
- tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2);
86
+ } else if (tcg_op_supported(INDEX_op_nor, TCG_TYPE_I64, 0)) {
87
+ tcg_gen_op3_i64(INDEX_op_nor, ret, arg1, arg2);
88
} else {
89
tcg_gen_or_i64(ret, arg1, arg2);
90
tcg_gen_not_i64(ret, ret);
91
diff --git a/tcg/tcg.c b/tcg/tcg.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/tcg.c
94
+++ b/tcg/tcg.c
95
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
96
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
97
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
98
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
99
- OUTOP(INDEX_op_nor_i32, TCGOutOpBinary, outop_nor),
100
- OUTOP(INDEX_op_nor_i64, TCGOutOpBinary, outop_nor),
101
+ OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
102
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
103
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
104
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
105
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
106
case INDEX_op_andc:
107
case INDEX_op_eqv:
108
case INDEX_op_nand:
109
- case INDEX_op_nor_i32:
110
- case INDEX_op_nor_i64:
111
+ case INDEX_op_nor:
112
case INDEX_op_or:
113
case INDEX_op_orc:
114
case INDEX_op_xor:
115
diff --git a/tcg/tci.c b/tcg/tci.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/tci.c
118
+++ b/tcg/tci.c
119
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
tci_args_rrr(insn, &r0, &r1, &r2);
121
regs[r0] = ~(regs[r1] & regs[r2]);
122
break;
123
- CASE_32_64(nor)
124
+ case INDEX_op_nor:
125
tci_args_rrr(insn, &r0, &r1, &r2);
126
regs[r0] = ~(regs[r1] | regs[r2]);
127
break;
128
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
129
case INDEX_op_andc:
130
case INDEX_op_eqv:
131
case INDEX_op_nand:
132
+ case INDEX_op_nor:
133
case INDEX_op_or:
134
case INDEX_op_orc:
135
case INDEX_op_xor:
136
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
137
case INDEX_op_sub_i64:
138
case INDEX_op_mul_i32:
139
case INDEX_op_mul_i64:
140
- case INDEX_op_nor_i32:
141
- case INDEX_op_nor_i64:
142
case INDEX_op_div_i32:
143
case INDEX_op_div_i64:
144
case INDEX_op_rem_i32:
145
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
146
index XXXXXXX..XXXXXXX 100644
147
--- a/docs/devel/tcg-ops.rst
148
+++ b/docs/devel/tcg-ops.rst
149
@@ -XXX,XX +XXX,XX @@ Logical
150
151
- | *t0* = ~(*t1* & *t2*)
152
153
- * - nor_i32/i64 *t0*, *t1*, *t2*
154
+ * - nor *t0*, *t1*, *t2*
155
156
- | *t0* = ~(*t1* | *t2*)
157
158
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/tci/tcg-target.c.inc
161
+++ b/tcg/tci/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_nand = {
163
static void tgen_nor(TCGContext *s, TCGType type,
164
TCGReg a0, TCGReg a1, TCGReg a2)
165
{
166
- tcg_out_op_rrr(s, glue(INDEX_op_nor_i,TCG_TARGET_REG_BITS), a0, a1, a2);
167
+ tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
168
}
169
170
static const TCGOutOpBinary outop_nor = {
171
--
172
2.43.0
173
174
diff view generated by jsdifflib
Deleted patch
1
In 7536b82d288 we lost the rI constraint that allowed the use of
2
RSB to perform reg = imm - reg. At the same time, drop support
3
for reg = reg - imm, which is now transformed generically to
4
addition, and need not be handled by the backend.
5
1
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/arm/tcg-target-con-set.h | 1 +
10
tcg/arm/tcg-target.c.inc | 11 ++++-------
11
2 files changed, 5 insertions(+), 7 deletions(-)
12
13
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target-con-set.h
16
+++ b/tcg/arm/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
18
C_O1_I2(r, r, rIK)
19
C_O1_I2(r, r, rIN)
20
C_O1_I2(r, r, ri)
21
+C_O1_I2(r, rI, r)
22
C_O1_I2(r, rZ, rZ)
23
C_O1_I2(w, 0, w)
24
C_O1_I2(w, w, w)
25
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/arm/tcg-target.c.inc
28
+++ b/tcg/arm/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
30
break;
31
case INDEX_op_sub_i32:
32
if (const_args[1]) {
33
- if (const_args[2]) {
34
- tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
35
- } else {
36
- tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
37
- args[0], args[2], args[1], 1);
38
- }
39
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB,
40
+ args[0], args[2], encode_imm_nofail(args[1]));
41
} else {
42
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
43
args[0], args[1], args[2], const_args[2]);
44
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
45
case INDEX_op_st_i32:
46
return C_O0_I2(r, r);
47
48
- case INDEX_op_sub_i32:
49
case INDEX_op_setcond_i32:
50
case INDEX_op_negsetcond_i32:
51
return C_O1_I2(r, r, rIN);
52
+ case INDEX_op_sub_i32:
53
+ return C_O1_I2(r, rI, r);
54
55
case INDEX_op_clz_i32:
56
case INDEX_op_ctz_i32:
57
--
58
2.43.0
59
60
diff view generated by jsdifflib
Deleted patch
1
Create a special subclass for sub, because two backends can
2
support "subtract from immediate". Drop all backend support
3
for an immediate as the second operand, as we transform sub
4
to add during optimize.
5
1
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/mips/tcg-target-con-set.h | 1 -
10
tcg/ppc/tcg-target-con-set.h | 3 +-
11
tcg/riscv/tcg-target-con-set.h | 1 -
12
tcg/riscv/tcg-target-con-str.h | 1 -
13
tcg/tcg.c | 30 ++++++++++++++++--
14
tcg/aarch64/tcg-target.c.inc | 24 +++++++--------
15
tcg/arm/tcg-target.c.inc | 29 +++++++++++-------
16
tcg/i386/tcg-target.c.inc | 23 +++++++-------
17
tcg/loongarch64/tcg-target.c.inc | 32 +++++++++-----------
18
tcg/mips/tcg-target.c.inc | 31 ++++++++-----------
19
tcg/ppc/tcg-target.c.inc | 52 +++++++++++---------------------
20
tcg/riscv/tcg-target.c.inc | 45 +++++++++------------------
21
tcg/s390x/tcg-target.c.inc | 41 +++++++++++--------------
22
tcg/sparc64/tcg-target.c.inc | 16 +++++++---
23
tcg/tci/tcg-target.c.inc | 14 +++++++--
24
15 files changed, 169 insertions(+), 174 deletions(-)
25
26
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/mips/tcg-target-con-set.h
29
+++ b/tcg/mips/tcg-target-con-set.h
30
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
31
C_O1_I2(r, r, rIK)
32
C_O1_I2(r, r, rJ)
33
C_O1_I2(r, r, rzW)
34
-C_O1_I2(r, rz, rN)
35
C_O1_I2(r, rz, rz)
36
C_O1_I4(r, rz, rz, rz, 0)
37
C_O1_I4(r, rz, rz, rz, rz)
38
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/ppc/tcg-target-con-set.h
41
+++ b/tcg/ppc/tcg-target-con-set.h
42
@@ -XXX,XX +XXX,XX @@ C_O1_I1(v, r)
43
C_O1_I1(v, v)
44
C_O1_I1(v, vr)
45
C_O1_I2(r, 0, rZ)
46
-C_O1_I2(r, rI, ri)
47
-C_O1_I2(r, rI, rT)
48
+C_O1_I2(r, rI, r)
49
C_O1_I2(r, r, r)
50
C_O1_I2(r, r, ri)
51
C_O1_I2(r, r, rC)
52
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/riscv/tcg-target-con-set.h
55
+++ b/tcg/riscv/tcg-target-con-set.h
56
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
57
C_O1_I2(r, r, r)
58
C_O1_I2(r, r, ri)
59
C_O1_I2(r, r, rI)
60
-C_O1_I2(r, rz, rN)
61
C_O1_I2(r, rz, rz)
62
C_N1_I2(r, r, rM)
63
C_O1_I4(r, r, rI, rM, rM)
64
diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/riscv/tcg-target-con-str.h
67
+++ b/tcg/riscv/tcg-target-con-str.h
68
@@ -XXX,XX +XXX,XX @@ REGS('v', ALL_VECTOR_REGS)
69
CONST('I', TCG_CT_CONST_S12)
70
CONST('K', TCG_CT_CONST_S5)
71
CONST('L', TCG_CT_CONST_CMP_VI)
72
-CONST('N', TCG_CT_CONST_N12)
73
CONST('M', TCG_CT_CONST_M12)
74
diff --git a/tcg/tcg.c b/tcg/tcg.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/tcg.c
77
+++ b/tcg/tcg.c
78
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBinary {
79
TCGReg a0, TCGReg a1, tcg_target_long a2);
80
} TCGOutOpBinary;
81
82
+typedef struct TCGOutOpSubtract {
83
+ TCGOutOp base;
84
+ void (*out_rrr)(TCGContext *s, TCGType type,
85
+ TCGReg a0, TCGReg a1, TCGReg a2);
86
+ void (*out_rir)(TCGContext *s, TCGType type,
87
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
88
+} TCGOutOpSubtract;
89
+
90
#include "tcg-target.c.inc"
91
92
#ifndef CONFIG_TCG_INTERPRETER
93
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
94
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
95
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
96
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
97
+ OUTOP(INDEX_op_sub_i32, TCGOutOpSubtract, outop_sub),
98
+ OUTOP(INDEX_op_sub_i64, TCGOutOpSubtract, outop_sub),
99
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
100
};
101
102
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
103
case INDEX_op_st8_i32:
104
case INDEX_op_st16_i32:
105
case INDEX_op_st_i32:
106
- case INDEX_op_sub_i32:
107
case INDEX_op_neg_i32:
108
case INDEX_op_mul_i32:
109
case INDEX_op_shl_i32:
110
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
111
case INDEX_op_st16_i64:
112
case INDEX_op_st32_i64:
113
case INDEX_op_st_i64:
114
- case INDEX_op_sub_i64:
115
case INDEX_op_neg_i64:
116
case INDEX_op_mul_i64:
117
case INDEX_op_shl_i64:
118
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
119
}
120
break;
121
122
+ case INDEX_op_sub_i32:
123
+ case INDEX_op_sub_i64:
124
+ {
125
+ const TCGOutOpSubtract *out = &outop_sub;
126
+
127
+ /*
128
+ * Constants should never appear in the second source operand.
129
+ * These are folded to add with negative constant.
130
+ */
131
+ tcg_debug_assert(!const_args[2]);
132
+ if (const_args[1]) {
133
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
134
+ } else {
135
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
136
+ }
137
+ }
138
+ break;
139
+
140
default:
141
if (def->flags & TCG_OPF_VECTOR) {
142
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
143
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/aarch64/tcg-target.c.inc
146
+++ b/tcg/aarch64/tcg-target.c.inc
147
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
148
.out_rrr = tgen_orc,
149
};
150
151
+static void tgen_sub(TCGContext *s, TCGType type,
152
+ TCGReg a0, TCGReg a1, TCGReg a2)
153
+{
154
+ tcg_out_insn(s, 3502, SUB, type, a0, a1, a2);
155
+}
156
+
157
+static const TCGOutOpSubtract outop_sub = {
158
+ .base.static_constraint = C_O1_I2(r, r, r),
159
+ .out_rrr = tgen_sub,
160
+};
161
+
162
static void tgen_xor(TCGContext *s, TCGType type,
163
TCGReg a0, TCGReg a1, TCGReg a2)
164
{
165
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
166
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
167
break;
168
169
- case INDEX_op_sub_i32:
170
- case INDEX_op_sub_i64:
171
- if (c2) {
172
- tgen_addi(s, ext, a0, a1, -a2);
173
- } else {
174
- tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
175
- }
176
- break;
177
-
178
case INDEX_op_neg_i64:
179
case INDEX_op_neg_i32:
180
tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
181
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
182
case INDEX_op_st_i64:
183
return C_O0_I2(rz, r);
184
185
- case INDEX_op_sub_i32:
186
- case INDEX_op_sub_i64:
187
- return C_O1_I2(r, r, rA);
188
-
189
case INDEX_op_setcond_i32:
190
case INDEX_op_setcond_i64:
191
case INDEX_op_negsetcond_i32:
192
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
193
index XXXXXXX..XXXXXXX 100644
194
--- a/tcg/arm/tcg-target.c.inc
195
+++ b/tcg/arm/tcg-target.c.inc
196
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
197
.base.static_constraint = C_NotImplemented,
198
};
199
200
+static void tgen_sub(TCGContext *s, TCGType type,
201
+ TCGReg a0, TCGReg a1, TCGReg a2)
202
+{
203
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
204
+}
205
+
206
+static void tgen_subfi(TCGContext *s, TCGType type,
207
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
208
+{
209
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
210
+}
211
+
212
+static const TCGOutOpSubtract outop_sub = {
213
+ .base.static_constraint = C_O1_I2(r, rI, r),
214
+ .out_rrr = tgen_sub,
215
+ .out_rir = tgen_subfi,
216
+};
217
+
218
static void tgen_xor(TCGContext *s, TCGType type,
219
TCGReg a0, TCGReg a1, TCGReg a2)
220
{
221
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
222
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
223
ARITH_MVN, args[0], 0, args[3], const_args[3]);
224
break;
225
- case INDEX_op_sub_i32:
226
- if (const_args[1]) {
227
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB,
228
- args[0], args[2], encode_imm_nofail(args[1]));
229
- } else {
230
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
231
- args[0], args[1], args[2], const_args[2]);
232
- }
233
- break;
234
case INDEX_op_add2_i32:
235
a0 = args[0], a1 = args[1], a2 = args[2];
236
a3 = args[3], a4 = args[4], a5 = args[5];
237
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
238
case INDEX_op_setcond_i32:
239
case INDEX_op_negsetcond_i32:
240
return C_O1_I2(r, r, rIN);
241
- case INDEX_op_sub_i32:
242
- return C_O1_I2(r, rI, r);
243
244
case INDEX_op_clz_i32:
245
case INDEX_op_ctz_i32:
246
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tcg/i386/tcg-target.c.inc
249
+++ b/tcg/i386/tcg-target.c.inc
250
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
251
.base.static_constraint = C_NotImplemented,
252
};
253
254
+static void tgen_sub(TCGContext *s, TCGType type,
255
+ TCGReg a0, TCGReg a1, TCGReg a2)
256
+{
257
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
258
+ tgen_arithr(s, ARITH_SUB + rexw, a0, a2);
259
+}
260
+
261
+static const TCGOutOpSubtract outop_sub = {
262
+ .base.static_constraint = C_O1_I2(r, 0, r),
263
+ .out_rrr = tgen_sub,
264
+};
265
+
266
static void tgen_xor(TCGContext *s, TCGType type,
267
TCGReg a0, TCGReg a1, TCGReg a2)
268
{
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
270
}
271
break;
272
273
- OP_32_64(sub):
274
- c = ARITH_SUB;
275
- if (const_a2) {
276
- tgen_arithi(s, c + rexw, a0, a2, 0);
277
- } else {
278
- tgen_arithr(s, c + rexw, a0, a2);
279
- }
280
- break;
281
-
282
OP_32_64(mul):
283
if (const_a2) {
284
int32_t val;
285
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
286
case INDEX_op_st_i64:
287
return C_O0_I2(re, r);
288
289
- case INDEX_op_sub_i32:
290
- case INDEX_op_sub_i64:
291
case INDEX_op_mul_i32:
292
case INDEX_op_mul_i64:
293
return C_O1_I2(r, 0, re);
294
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
295
index XXXXXXX..XXXXXXX 100644
296
--- a/tcg/loongarch64/tcg-target.c.inc
297
+++ b/tcg/loongarch64/tcg-target.c.inc
298
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
299
.out_rrr = tgen_orc,
300
};
301
302
+static void tgen_sub(TCGContext *s, TCGType type,
303
+ TCGReg a0, TCGReg a1, TCGReg a2)
304
+{
305
+ if (type == TCG_TYPE_I32) {
306
+ tcg_out_opc_sub_w(s, a0, a1, a2);
307
+ } else {
308
+ tcg_out_opc_sub_d(s, a0, a1, a2);
309
+ }
310
+}
311
+
312
+static const TCGOutOpSubtract outop_sub = {
313
+ .base.static_constraint = C_O1_I2(r, r, r),
314
+ .out_rrr = tgen_sub,
315
+};
316
+
317
static void tgen_xor(TCGContext *s, TCGType type,
318
TCGReg a0, TCGReg a1, TCGReg a2)
319
{
320
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
321
}
322
break;
323
324
- case INDEX_op_sub_i32:
325
- if (c2) {
326
- tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
327
- } else {
328
- tcg_out_opc_sub_w(s, a0, a1, a2);
329
- }
330
- break;
331
- case INDEX_op_sub_i64:
332
- if (c2) {
333
- tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
334
- } else {
335
- tcg_out_opc_sub_d(s, a0, a1, a2);
336
- }
337
- break;
338
-
339
case INDEX_op_neg_i32:
340
tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
341
break;
342
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
343
/* Must deposit into the same register as input */
344
return C_O1_I2(r, 0, rz);
345
346
- case INDEX_op_sub_i32:
347
case INDEX_op_setcond_i32:
348
return C_O1_I2(r, rz, ri);
349
- case INDEX_op_sub_i64:
350
case INDEX_op_setcond_i64:
351
return C_O1_I2(r, rz, rJ);
352
353
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
354
index XXXXXXX..XXXXXXX 100644
355
--- a/tcg/mips/tcg-target.c.inc
356
+++ b/tcg/mips/tcg-target.c.inc
357
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
358
.base.static_constraint = C_NotImplemented,
359
};
360
361
+static void tgen_sub(TCGContext *s, TCGType type,
362
+ TCGReg a0, TCGReg a1, TCGReg a2)
363
+{
364
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SUBU : OPC_DSUBU;
365
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
366
+}
367
+
368
+static const TCGOutOpSubtract outop_sub = {
369
+ .base.static_constraint = C_O1_I2(r, r, r),
370
+ .out_rrr = tgen_sub,
371
+};
372
+
373
static void tgen_xor(TCGContext *s, TCGType type,
374
TCGReg a0, TCGReg a1, TCGReg a2)
375
{
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
377
tcg_out_ldst(s, i1, a0, a1, a2);
378
break;
379
380
- do_binaryv:
381
- tcg_out_opc_reg(s, i1, a0, a1, a2);
382
- break;
383
-
384
- case INDEX_op_sub_i32:
385
- i1 = OPC_SUBU, i2 = OPC_ADDIU;
386
- goto do_subtract;
387
- case INDEX_op_sub_i64:
388
- i1 = OPC_DSUBU, i2 = OPC_DADDIU;
389
- do_subtract:
390
- if (c2) {
391
- tcg_out_opc_imm(s, i2, a0, a1, -a2);
392
- break;
393
- }
394
- goto do_binaryv;
395
-
396
case INDEX_op_mul_i32:
397
if (use_mips32_instructions) {
398
tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
399
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
400
case INDEX_op_st_i64:
401
return C_O0_I2(rz, r);
402
403
- case INDEX_op_sub_i32:
404
- case INDEX_op_sub_i64:
405
- return C_O1_I2(r, rz, rN);
406
case INDEX_op_mul_i32:
407
case INDEX_op_mulsh_i32:
408
case INDEX_op_muluh_i32:
409
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
410
index XXXXXXX..XXXXXXX 100644
411
--- a/tcg/ppc/tcg-target.c.inc
412
+++ b/tcg/ppc/tcg-target.c.inc
413
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
414
.out_rrr = tgen_orc,
415
};
416
417
+static void tgen_sub(TCGContext *s, TCGType type,
418
+ TCGReg a0, TCGReg a1, TCGReg a2)
419
+{
420
+ tcg_out32(s, SUBF | TAB(a0, a2, a1));
421
+}
422
+
423
+static void tgen_subfi(TCGContext *s, TCGType type,
424
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
425
+{
426
+ tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
427
+}
428
+
429
+static const TCGOutOpSubtract outop_sub = {
430
+ .base.static_constraint = C_O1_I2(r, rI, r),
431
+ .out_rrr = tgen_sub,
432
+ .out_rir = tgen_subfi,
433
+};
434
+
435
static void tgen_xor(TCGContext *s, TCGType type,
436
TCGReg a0, TCGReg a1, TCGReg a2)
437
{
438
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
439
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
440
break;
441
442
- case INDEX_op_sub_i32:
443
- a0 = args[0], a1 = args[1], a2 = args[2];
444
- if (const_args[1]) {
445
- if (const_args[2]) {
446
- tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
447
- } else {
448
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
449
- }
450
- } else if (const_args[2]) {
451
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
452
- } else {
453
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
454
- }
455
- break;
456
-
457
case INDEX_op_clz_i32:
458
tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
459
args[2], const_args[2]);
460
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
461
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
462
break;
463
464
- case INDEX_op_sub_i64:
465
- a0 = args[0], a1 = args[1], a2 = args[2];
466
- if (const_args[1]) {
467
- if (const_args[2]) {
468
- tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
469
- } else {
470
- tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
471
- }
472
- } else if (const_args[2]) {
473
- tgen_addi(s, type, a0, a1, -a2);
474
- } else {
475
- tcg_out32(s, SUBF | TAB(a0, a2, a1));
476
- }
477
- break;
478
-
479
case INDEX_op_shl_i64:
480
if (const_args[2]) {
481
/* Limit immediate shift count lest we create an illegal insn. */
482
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
483
case INDEX_op_muluh_i64:
484
return C_O1_I2(r, r, r);
485
486
- case INDEX_op_sub_i32:
487
- return C_O1_I2(r, rI, ri);
488
- case INDEX_op_sub_i64:
489
- return C_O1_I2(r, rI, rT);
490
case INDEX_op_clz_i32:
491
case INDEX_op_ctz_i32:
492
case INDEX_op_clz_i64:
493
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
494
index XXXXXXX..XXXXXXX 100644
495
--- a/tcg/riscv/tcg-target.c.inc
496
+++ b/tcg/riscv/tcg-target.c.inc
497
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
498
}
499
500
#define TCG_CT_CONST_S12 0x100
501
-#define TCG_CT_CONST_N12 0x200
502
-#define TCG_CT_CONST_M12 0x400
503
-#define TCG_CT_CONST_S5 0x800
504
-#define TCG_CT_CONST_CMP_VI 0x1000
505
+#define TCG_CT_CONST_M12 0x200
506
+#define TCG_CT_CONST_S5 0x400
507
+#define TCG_CT_CONST_CMP_VI 0x800
508
509
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
510
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
511
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
512
if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
513
return 1;
514
}
515
- /*
516
- * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
517
- * Used for subtraction, where a constant must be handled by ADDI.
518
- */
519
- if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
520
- return 1;
521
- }
522
/*
523
* Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
524
* Used by addsub2 and movcond, which may need the negative value,
525
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
526
.out_rrr = tgen_orc,
527
};
528
529
+static void tgen_sub(TCGContext *s, TCGType type,
530
+ TCGReg a0, TCGReg a1, TCGReg a2)
531
+{
532
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB;
533
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
534
+}
535
+
536
+static const TCGOutOpSubtract outop_sub = {
537
+ .base.static_constraint = C_O1_I2(r, r, r),
538
+ .out_rrr = tgen_sub,
539
+};
540
+
541
static void tgen_xor(TCGContext *s, TCGType type,
542
TCGReg a0, TCGReg a1, TCGReg a2)
543
{
544
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
545
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
546
break;
547
548
- case INDEX_op_sub_i32:
549
- if (c2) {
550
- tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
551
- } else {
552
- tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
553
- }
554
- break;
555
- case INDEX_op_sub_i64:
556
- if (c2) {
557
- tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
558
- } else {
559
- tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
560
- }
561
- break;
562
-
563
case INDEX_op_not_i32:
564
case INDEX_op_not_i64:
565
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
566
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
567
case INDEX_op_negsetcond_i64:
568
return C_O1_I2(r, r, rI);
569
570
- case INDEX_op_sub_i32:
571
- case INDEX_op_sub_i64:
572
- return C_O1_I2(r, rz, rN);
573
-
574
case INDEX_op_mul_i32:
575
case INDEX_op_mulsh_i32:
576
case INDEX_op_muluh_i32:
577
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
578
index XXXXXXX..XXXXXXX 100644
579
--- a/tcg/s390x/tcg-target.c.inc
580
+++ b/tcg/s390x/tcg-target.c.inc
581
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
582
.out_rrr = tgen_orc,
583
};
584
585
+static void tgen_sub(TCGContext *s, TCGType type,
586
+ TCGReg a0, TCGReg a1, TCGReg a2)
587
+{
588
+ if (type != TCG_TYPE_I32) {
589
+ tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
590
+ } else if (a0 == a1) {
591
+ tcg_out_insn(s, RR, SR, a0, a2);
592
+ } else {
593
+ tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
594
+ }
595
+}
596
+
597
+static const TCGOutOpSubtract outop_sub = {
598
+ .base.static_constraint = C_O1_I2(r, r, r),
599
+ .out_rrr = tgen_sub,
600
+};
601
+
602
static void tgen_xor(TCGContext *s, TCGType type,
603
TCGReg a0, TCGReg a1, TCGReg a2)
604
{
605
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
606
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
607
break;
608
609
- case INDEX_op_sub_i32:
610
- a0 = args[0], a1 = args[1], a2 = args[2];
611
- if (const_args[2]) {
612
- tgen_addi(s, type, a0, a1, (int32_t)-a2);
613
- } else if (a0 == a1) {
614
- tcg_out_insn(s, RR, SR, a0, a2);
615
- } else {
616
- tcg_out_insn(s, RRFa, SRK, a0, a1, a2);
617
- }
618
- break;
619
-
620
case INDEX_op_neg_i32:
621
tcg_out_insn(s, RR, LCR, args[0], args[1]);
622
break;
623
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
624
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
625
break;
626
627
- case INDEX_op_sub_i64:
628
- a0 = args[0], a1 = args[1], a2 = args[2];
629
- if (const_args[2]) {
630
- tgen_addi(s, type, a0, a1, -a2);
631
- } else {
632
- tcg_out_insn(s, RRFa, SGRK, a0, a1, a2);
633
- }
634
- break;
635
-
636
case INDEX_op_neg_i64:
637
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
638
break;
639
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
640
case INDEX_op_clz_i64:
641
return C_O1_I2(r, r, rI);
642
643
- case INDEX_op_sub_i32:
644
- case INDEX_op_sub_i64:
645
- return C_O1_I2(r, r, ri);
646
-
647
case INDEX_op_mul_i32:
648
return (HAVE_FACILITY(MISC_INSN_EXT2)
649
? C_O1_I2(r, r, ri)
650
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
651
index XXXXXXX..XXXXXXX 100644
652
--- a/tcg/sparc64/tcg-target.c.inc
653
+++ b/tcg/sparc64/tcg-target.c.inc
654
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
655
.out_rrr = tgen_orc,
656
};
657
658
+static void tgen_sub(TCGContext *s, TCGType type,
659
+ TCGReg a0, TCGReg a1, TCGReg a2)
660
+{
661
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
662
+}
663
+
664
+static const TCGOutOpSubtract outop_sub = {
665
+ .base.static_constraint = C_O1_I2(r, r, r),
666
+ .out_rrr = tgen_sub,
667
+};
668
+
669
static void tgen_xor(TCGContext *s, TCGType type,
670
TCGReg a0, TCGReg a1, TCGReg a2)
671
{
672
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
673
case INDEX_op_st32_i64:
674
tcg_out_ldst(s, a0, a1, a2, STW);
675
break;
676
- OP_32_64(sub):
677
- c = ARITH_SUB;
678
- goto gen_arith;
679
case INDEX_op_shl_i32:
680
c = SHIFT_SLL;
681
do_shift32:
682
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
683
case INDEX_op_div_i64:
684
case INDEX_op_divu_i32:
685
case INDEX_op_divu_i64:
686
- case INDEX_op_sub_i32:
687
- case INDEX_op_sub_i64:
688
case INDEX_op_shl_i32:
689
case INDEX_op_shl_i64:
690
case INDEX_op_shr_i32:
691
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
692
index XXXXXXX..XXXXXXX 100644
693
--- a/tcg/tci/tcg-target.c.inc
694
+++ b/tcg/tci/tcg-target.c.inc
695
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
696
case INDEX_op_rem_i64:
697
case INDEX_op_remu_i32:
698
case INDEX_op_remu_i64:
699
- case INDEX_op_sub_i32:
700
- case INDEX_op_sub_i64:
701
case INDEX_op_mul_i32:
702
case INDEX_op_mul_i64:
703
case INDEX_op_shl_i32:
704
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
705
.out_rrr = tgen_orc,
706
};
707
708
+static void tgen_sub(TCGContext *s, TCGType type,
709
+ TCGReg a0, TCGReg a1, TCGReg a2)
710
+{
711
+ tcg_out_op_rrr(s, glue(INDEX_op_sub_i,TCG_TARGET_REG_BITS), a0, a1, a2);
712
+}
713
+
714
+static const TCGOutOpSubtract outop_sub = {
715
+ .base.static_constraint = C_O1_I2(r, r, r),
716
+ .out_rrr = tgen_sub,
717
+};
718
+
719
static void tgen_xor(TCGContext *s, TCGType type,
720
TCGReg a0, TCGReg a1, TCGReg a2)
721
{
722
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
723
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
724
break;
725
726
- CASE_32_64(sub)
727
CASE_32_64(mul)
728
CASE_32_64(shl)
729
CASE_32_64(shr)
730
--
731
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 4 ++--
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 10 +++-------
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 12 insertions(+), 18 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(nand, 1, 2, 0, TCG_OPF_INT)
18
DEF(nor, 1, 2, 0, TCG_OPF_INT)
19
DEF(or, 1, 2, 0, TCG_OPF_INT)
20
DEF(orc, 1, 2, 0, TCG_OPF_INT)
21
+DEF(sub, 1, 2, 0, TCG_OPF_INT)
22
DEF(xor, 1, 2, 0, TCG_OPF_INT)
23
24
DEF(setcond_i32, 1, 2, 1, 0)
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* arith */
29
-DEF(sub_i32, 1, 2, 0, 0)
30
DEF(mul_i32, 1, 2, 0, 0)
31
DEF(div_i32, 1, 2, 0, 0)
32
DEF(divu_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* arith */
37
-DEF(sub_i64, 1, 2, 0, 0)
38
DEF(mul_i64, 1, 2, 0, 0)
39
DEF(div_i64, 1, 2, 0, 0)
40
DEF(divu_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_add:
47
return x + y;
48
49
- CASE_OP_32_64(sub):
50
+ case INDEX_op_sub:
51
return x - y;
52
53
CASE_OP_32_64(mul):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
CASE_OP_32_64(sextract):
56
done = fold_sextract(&ctx, op);
57
break;
58
- CASE_OP_32_64(sub):
59
+ case INDEX_op_sub:
60
done = fold_sub(&ctx, op);
61
break;
62
case INDEX_op_sub_vec:
63
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/tcg-op.c
66
+++ b/tcg/tcg-op.c
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
68
69
void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
70
{
71
- tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
72
+ tcg_gen_op3_i32(INDEX_op_sub, ret, arg1, arg2);
73
}
74
75
void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2)
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
77
void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
78
{
79
if (TCG_TARGET_REG_BITS == 64) {
80
- tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
81
+ tcg_gen_op3_i64(INDEX_op_sub, ret, arg1, arg2);
82
} else {
83
tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
84
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
85
diff --git a/tcg/tcg.c b/tcg/tcg.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/tcg/tcg.c
88
+++ b/tcg/tcg.c
89
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
90
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
91
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
92
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
93
- OUTOP(INDEX_op_sub_i32, TCGOutOpSubtract, outop_sub),
94
- OUTOP(INDEX_op_sub_i64, TCGOutOpSubtract, outop_sub),
95
+ OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
96
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
97
};
98
99
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
100
opc_new = INDEX_op_add;
101
goto do_addsub2;
102
case INDEX_op_sub2_i32:
103
- opc_new = INDEX_op_sub_i32;
104
- goto do_addsub2;
105
case INDEX_op_sub2_i64:
106
- opc_new = INDEX_op_sub_i64;
107
+ opc_new = INDEX_op_sub;
108
do_addsub2:
109
nb_iargs = 4;
110
nb_oargs = 2;
111
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
112
}
113
break;
114
115
- case INDEX_op_sub_i32:
116
- case INDEX_op_sub_i64:
117
+ case INDEX_op_sub:
118
{
119
const TCGOutOpSubtract *out = &outop_sub;
120
121
diff --git a/tcg/tci.c b/tcg/tci.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/tci.c
124
+++ b/tcg/tci.c
125
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
126
tci_args_rrr(insn, &r0, &r1, &r2);
127
regs[r0] = regs[r1] + regs[r2];
128
break;
129
- CASE_32_64(sub)
130
+ case INDEX_op_sub:
131
tci_args_rrr(insn, &r0, &r1, &r2);
132
regs[r0] = regs[r1] - regs[r2];
133
break;
134
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
135
case INDEX_op_nor:
136
case INDEX_op_or:
137
case INDEX_op_orc:
138
+ case INDEX_op_sub:
139
case INDEX_op_xor:
140
- case INDEX_op_sub_i32:
141
- case INDEX_op_sub_i64:
142
case INDEX_op_mul_i32:
143
case INDEX_op_mul_i64:
144
case INDEX_op_div_i32:
145
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
146
index XXXXXXX..XXXXXXX 100644
147
--- a/docs/devel/tcg-ops.rst
148
+++ b/docs/devel/tcg-ops.rst
149
@@ -XXX,XX +XXX,XX @@ Arithmetic
150
151
- | *t0* = *t1* + *t2*
152
153
- * - sub_i32/i64 *t0*, *t1*, *t2*
154
+ * - sub *t0*, *t1*, *t2*
155
156
- | *t0* = *t1* - *t2*
157
158
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/tci/tcg-target.c.inc
161
+++ b/tcg/tci/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
163
static void tgen_sub(TCGContext *s, TCGType type,
164
TCGReg a0, TCGReg a1, TCGReg a2)
165
{
166
- tcg_out_op_rrr(s, glue(INDEX_op_sub_i,TCG_TARGET_REG_BITS), a0, a1, a2);
167
+ tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
168
}
169
170
static const TCGOutOpSubtract outop_sub = {
171
--
172
2.43.0
173
174
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 21 +++++++++++++++++++--
5
tcg/aarch64/tcg-target.c.inc | 18 +++++++++++-------
6
tcg/arm/tcg-target.c.inc | 14 ++++++++++----
7
tcg/i386/tcg-target.c.inc | 16 +++++++++++-----
8
tcg/loongarch64/tcg-target.c.inc | 19 ++++++++++---------
9
tcg/mips/tcg-target.c.inc | 18 ++++++++++--------
10
tcg/ppc/tcg-target.c.inc | 17 ++++++++++-------
11
tcg/riscv/tcg-target.c.inc | 19 ++++++++++---------
12
tcg/s390x/tcg-target.c.inc | 22 ++++++++++++++--------
13
tcg/sparc64/tcg-target.c.inc | 15 ++++++++++-----
14
tcg/tci/tcg-target.c.inc | 13 ++++++++++---
15
11 files changed, 125 insertions(+), 67 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBinary {
22
TCGReg a0, TCGReg a1, tcg_target_long a2);
23
} TCGOutOpBinary;
24
25
+typedef struct TCGOutOpUnary {
26
+ TCGOutOp base;
27
+ void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
28
+} TCGOutOpUnary;
29
+
30
typedef struct TCGOutOpSubtract {
31
TCGOutOp base;
32
void (*out_rrr)(TCGContext *s, TCGType type,
33
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
34
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
35
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
36
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
37
+ OUTOP(INDEX_op_neg_i32, TCGOutOpUnary, outop_neg),
38
+ OUTOP(INDEX_op_neg_i64, TCGOutOpUnary, outop_neg),
39
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
40
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
41
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
42
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
43
case INDEX_op_st8_i32:
44
case INDEX_op_st16_i32:
45
case INDEX_op_st_i32:
46
- case INDEX_op_neg_i32:
47
case INDEX_op_mul_i32:
48
case INDEX_op_shl_i32:
49
case INDEX_op_shr_i32:
50
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
51
case INDEX_op_st16_i64:
52
case INDEX_op_st32_i64:
53
case INDEX_op_st_i64:
54
- case INDEX_op_neg_i64:
55
case INDEX_op_mul_i64:
56
case INDEX_op_shl_i64:
57
case INDEX_op_shr_i64:
58
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
59
}
60
break;
61
62
+ case INDEX_op_neg_i32:
63
+ case INDEX_op_neg_i64:
64
+ {
65
+ const TCGOutOpUnary *out =
66
+ container_of(all_outop[op->opc], TCGOutOpUnary, base);
67
+
68
+ /* Constants should have been folded. */
69
+ tcg_debug_assert(!const_args[1]);
70
+ out->out_rr(s, type, new_args[0], new_args[1]);
71
+ }
72
+ break;
73
+
74
default:
75
if (def->flags & TCG_OPF_VECTOR) {
76
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
77
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/aarch64/tcg-target.c.inc
80
+++ b/tcg/aarch64/tcg-target.c.inc
81
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
82
};
83
84
85
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
86
+{
87
+ tgen_sub(s, type, a0, TCG_REG_XZR, a1);
88
+}
89
+
90
+static const TCGOutOpUnary outop_neg = {
91
+ .base.static_constraint = C_O1_I1(r, r),
92
+ .out_rr = tgen_neg,
93
+};
94
+
95
+
96
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
97
const TCGArg args[TCG_MAX_OP_ARGS],
98
const int const_args[TCG_MAX_OP_ARGS])
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
100
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
101
break;
102
103
- case INDEX_op_neg_i64:
104
- case INDEX_op_neg_i32:
105
- tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
106
- break;
107
-
108
case INDEX_op_not_i64:
109
case INDEX_op_not_i32:
110
tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
111
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
112
case INDEX_op_ld32u_i64:
113
case INDEX_op_ld32s_i64:
114
case INDEX_op_ld_i64:
115
- case INDEX_op_neg_i32:
116
- case INDEX_op_neg_i64:
117
case INDEX_op_not_i32:
118
case INDEX_op_not_i64:
119
case INDEX_op_bswap16_i32:
120
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
121
index XXXXXXX..XXXXXXX 100644
122
--- a/tcg/arm/tcg-target.c.inc
123
+++ b/tcg/arm/tcg-target.c.inc
124
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
125
.out_rri = tgen_xori,
126
};
127
128
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
129
+{
130
+ tgen_subfi(s, type, a0, 0, a1);
131
+}
132
+
133
+static const TCGOutOpUnary outop_neg = {
134
+ .base.static_constraint = C_O1_I1(r, r),
135
+ .out_rr = tgen_neg,
136
+};
137
+
138
139
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
140
const TCGArg args[TCG_MAX_OP_ARGS],
141
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
142
}
143
tcg_out_mov_reg(s, COND_AL, args[0], a0);
144
break;
145
- case INDEX_op_neg_i32:
146
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
147
- break;
148
case INDEX_op_not_i32:
149
tcg_out_dat_reg(s, COND_AL,
150
ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
151
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
152
case INDEX_op_ld16u_i32:
153
case INDEX_op_ld16s_i32:
154
case INDEX_op_ld_i32:
155
- case INDEX_op_neg_i32:
156
case INDEX_op_not_i32:
157
case INDEX_op_bswap16_i32:
158
case INDEX_op_bswap32_i32:
159
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/i386/tcg-target.c.inc
162
+++ b/tcg/i386/tcg-target.c.inc
163
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
164
.out_rri = tgen_xori,
165
};
166
167
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
168
+{
169
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
170
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
171
+}
172
+
173
+static const TCGOutOpUnary outop_neg = {
174
+ .base.static_constraint = C_O1_I1(r, 0),
175
+ .out_rr = tgen_neg,
176
+};
177
+
178
179
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
180
const TCGArg args[TCG_MAX_OP_ARGS],
181
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
182
}
183
break;
184
185
- OP_32_64(neg):
186
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
187
- break;
188
OP_32_64(not):
189
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
190
break;
191
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
192
case INDEX_op_bswap32_i32:
193
case INDEX_op_bswap32_i64:
194
case INDEX_op_bswap64_i64:
195
- case INDEX_op_neg_i32:
196
- case INDEX_op_neg_i64:
197
case INDEX_op_not_i32:
198
case INDEX_op_not_i64:
199
case INDEX_op_extrh_i64_i32:
200
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
201
index XXXXXXX..XXXXXXX 100644
202
--- a/tcg/loongarch64/tcg-target.c.inc
203
+++ b/tcg/loongarch64/tcg-target.c.inc
204
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
205
.out_rri = tgen_xori,
206
};
207
208
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
209
+{
210
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
211
+}
212
+
213
+static const TCGOutOpUnary outop_neg = {
214
+ .base.static_constraint = C_O1_I1(r, r),
215
+ .out_rr = tgen_neg,
216
+};
217
+
218
219
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
220
const TCGArg args[TCG_MAX_OP_ARGS],
221
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
222
}
223
break;
224
225
- case INDEX_op_neg_i32:
226
- tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
227
- break;
228
- case INDEX_op_neg_i64:
229
- tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
230
- break;
231
-
232
case INDEX_op_mul_i32:
233
tcg_out_opc_mul_w(s, a0, a1, a2);
234
break;
235
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
236
case INDEX_op_extrl_i64_i32:
237
case INDEX_op_extrh_i64_i32:
238
case INDEX_op_ext_i32_i64:
239
- case INDEX_op_neg_i32:
240
- case INDEX_op_neg_i64:
241
case INDEX_op_not_i32:
242
case INDEX_op_not_i64:
243
case INDEX_op_extract_i32:
244
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
245
index XXXXXXX..XXXXXXX 100644
246
--- a/tcg/mips/tcg-target.c.inc
247
+++ b/tcg/mips/tcg-target.c.inc
248
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
249
.out_rri = tgen_xori,
250
};
251
252
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
253
+{
254
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
255
+}
256
+
257
+static const TCGOutOpUnary outop_neg = {
258
+ .base.static_constraint = C_O1_I1(r, r),
259
+ .out_rr = tgen_neg,
260
+};
261
+
262
263
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
264
const TCGArg args[TCG_MAX_OP_ARGS],
265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
266
tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
267
break;
268
269
- case INDEX_op_neg_i32:
270
- i1 = OPC_SUBU;
271
- goto do_unary;
272
- case INDEX_op_neg_i64:
273
- i1 = OPC_DSUBU;
274
- goto do_unary;
275
case INDEX_op_not_i32:
276
case INDEX_op_not_i64:
277
i1 = OPC_NOR;
278
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
279
case INDEX_op_ld16u_i32:
280
case INDEX_op_ld16s_i32:
281
case INDEX_op_ld_i32:
282
- case INDEX_op_neg_i32:
283
case INDEX_op_not_i32:
284
case INDEX_op_bswap16_i32:
285
case INDEX_op_bswap32_i32:
286
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
287
case INDEX_op_ld32s_i64:
288
case INDEX_op_ld32u_i64:
289
case INDEX_op_ld_i64:
290
- case INDEX_op_neg_i64:
291
case INDEX_op_not_i64:
292
case INDEX_op_bswap16_i64:
293
case INDEX_op_bswap32_i64:
294
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
295
index XXXXXXX..XXXXXXX 100644
296
--- a/tcg/ppc/tcg-target.c.inc
297
+++ b/tcg/ppc/tcg-target.c.inc
298
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
299
.out_rri = tgen_xori,
300
};
301
302
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
303
+{
304
+ tcg_out32(s, NEG | RT(a0) | RA(a1));
305
+}
306
+
307
+static const TCGOutOpUnary outop_neg = {
308
+ .base.static_constraint = C_O1_I1(r, r),
309
+ .out_rr = tgen_neg,
310
+};
311
+
312
313
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
314
const TCGArg args[TCG_MAX_OP_ARGS],
315
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
316
tcg_out_brcond2(s, args, const_args);
317
break;
318
319
- case INDEX_op_neg_i32:
320
- case INDEX_op_neg_i64:
321
- tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
322
- break;
323
-
324
case INDEX_op_not_i32:
325
case INDEX_op_not_i64:
326
tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
327
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
328
case INDEX_op_ld16s_i32:
329
case INDEX_op_ld_i32:
330
case INDEX_op_ctpop_i32:
331
- case INDEX_op_neg_i32:
332
case INDEX_op_not_i32:
333
case INDEX_op_bswap16_i32:
334
case INDEX_op_bswap32_i32:
335
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
336
case INDEX_op_ld32s_i64:
337
case INDEX_op_ld_i64:
338
case INDEX_op_ctpop_i64:
339
- case INDEX_op_neg_i64:
340
case INDEX_op_not_i64:
341
case INDEX_op_ext_i32_i64:
342
case INDEX_op_extu_i32_i64:
343
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/riscv/tcg-target.c.inc
346
+++ b/tcg/riscv/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
348
.out_rri = tgen_xori,
349
};
350
351
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
352
+{
353
+ tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
354
+}
355
+
356
+static const TCGOutOpUnary outop_neg = {
357
+ .base.static_constraint = C_O1_I1(r, r),
358
+ .out_rr = tgen_neg,
359
+};
360
+
361
362
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
363
const TCGArg args[TCG_MAX_OP_ARGS],
364
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
365
tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
366
break;
367
368
- case INDEX_op_neg_i32:
369
- tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
370
- break;
371
- case INDEX_op_neg_i64:
372
- tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
373
- break;
374
-
375
case INDEX_op_mul_i32:
376
tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
377
break;
378
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
379
case INDEX_op_ld16s_i32:
380
case INDEX_op_ld_i32:
381
case INDEX_op_not_i32:
382
- case INDEX_op_neg_i32:
383
case INDEX_op_ld8u_i64:
384
case INDEX_op_ld8s_i64:
385
case INDEX_op_ld16u_i64:
386
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
387
case INDEX_op_ld32u_i64:
388
case INDEX_op_ld_i64:
389
case INDEX_op_not_i64:
390
- case INDEX_op_neg_i64:
391
case INDEX_op_extu_i32_i64:
392
case INDEX_op_extrl_i64_i32:
393
case INDEX_op_extrh_i64_i32:
394
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
395
index XXXXXXX..XXXXXXX 100644
396
--- a/tcg/s390x/tcg-target.c.inc
397
+++ b/tcg/s390x/tcg-target.c.inc
398
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
399
.out_rri = tgen_xori_3,
400
};
401
402
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
403
+{
404
+ if (type == TCG_TYPE_I32) {
405
+ tcg_out_insn(s, RR, LCR, a0, a1);
406
+ } else {
407
+ tcg_out_insn(s, RRE, LCGR, a0, a1);
408
+ }
409
+}
410
+
411
+static const TCGOutOpUnary outop_neg = {
412
+ .base.static_constraint = C_O1_I1(r, r),
413
+ .out_rr = tgen_neg,
414
+};
415
+
416
417
# define OP_32_64(x) \
418
case glue(glue(INDEX_op_,x),_i32): \
419
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
420
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
421
break;
422
423
- case INDEX_op_neg_i32:
424
- tcg_out_insn(s, RR, LCR, args[0], args[1]);
425
- break;
426
case INDEX_op_not_i32:
427
tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
428
break;
429
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
430
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
431
break;
432
433
- case INDEX_op_neg_i64:
434
- tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
435
- break;
436
case INDEX_op_not_i64:
437
tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
438
break;
439
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
440
case INDEX_op_bswap32_i32:
441
case INDEX_op_bswap32_i64:
442
case INDEX_op_bswap64_i64:
443
- case INDEX_op_neg_i32:
444
- case INDEX_op_neg_i64:
445
case INDEX_op_not_i32:
446
case INDEX_op_not_i64:
447
case INDEX_op_ext_i32_i64:
448
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
449
index XXXXXXX..XXXXXXX 100644
450
--- a/tcg/sparc64/tcg-target.c.inc
451
+++ b/tcg/sparc64/tcg-target.c.inc
452
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
453
.out_rri = tgen_xori,
454
};
455
456
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
457
+{
458
+ tgen_sub(s, type, a0, TCG_REG_G0, a1);
459
+}
460
+
461
+static const TCGOutOpUnary outop_neg = {
462
+ .base.static_constraint = C_O1_I1(r, r),
463
+ .out_rr = tgen_neg,
464
+};
465
+
466
467
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
468
const TCGArg args[TCG_MAX_OP_ARGS],
469
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
470
c = ARITH_UMUL;
471
goto gen_arith;
472
473
- OP_32_64(neg):
474
-    c = ARITH_SUB;
475
-    goto gen_arith1;
476
OP_32_64(not):
477
    c = ARITH_ORN;
478
    goto gen_arith1;
479
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
480
case INDEX_op_ld32u_i64:
481
case INDEX_op_ld32s_i64:
482
case INDEX_op_ld_i64:
483
- case INDEX_op_neg_i32:
484
- case INDEX_op_neg_i64:
485
case INDEX_op_not_i32:
486
case INDEX_op_not_i64:
487
case INDEX_op_ext_i32_i64:
488
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
489
index XXXXXXX..XXXXXXX 100644
490
--- a/tcg/tci/tcg-target.c.inc
491
+++ b/tcg/tci/tcg-target.c.inc
492
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
493
case INDEX_op_ld_i64:
494
case INDEX_op_not_i32:
495
case INDEX_op_not_i64:
496
- case INDEX_op_neg_i32:
497
- case INDEX_op_neg_i64:
498
case INDEX_op_ext_i32_i64:
499
case INDEX_op_extu_i32_i64:
500
case INDEX_op_bswap16_i32:
501
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
502
.out_rrr = tgen_xor,
503
};
504
505
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
506
+{
507
+ tcg_out_op_rr(s, glue(INDEX_op_neg_i,TCG_TARGET_REG_BITS), a0, a1);
508
+}
509
+
510
+static const TCGOutOpUnary outop_neg = {
511
+ .base.static_constraint = C_O1_I1(r, r),
512
+ .out_rr = tgen_neg,
513
+};
514
+
515
516
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
517
const TCGArg args[TCG_MAX_OP_ARGS],
518
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
519
tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
520
break;
521
522
- CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
523
CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
524
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
525
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
526
--
527
2.43.0
528
529
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 30 ++++++------------------------
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 11 +++++------
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 18 insertions(+), 40 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
18
DEF(andc, 1, 2, 0, TCG_OPF_INT)
19
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
20
DEF(nand, 1, 2, 0, TCG_OPF_INT)
21
+DEF(neg, 1, 1, 0, TCG_OPF_INT)
22
DEF(nor, 1, 2, 0, TCG_OPF_INT)
23
DEF(or, 1, 2, 0, TCG_OPF_INT)
24
DEF(orc, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
26
DEF(bswap16_i32, 1, 1, 1, 0)
27
DEF(bswap32_i32, 1, 1, 1, 0)
28
DEF(not_i32, 1, 1, 0, 0)
29
-DEF(neg_i32, 1, 1, 0, 0)
30
DEF(clz_i32, 1, 2, 0, 0)
31
DEF(ctz_i32, 1, 2, 0, 0)
32
DEF(ctpop_i32, 1, 1, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(bswap16_i64, 1, 1, 1, 0)
34
DEF(bswap32_i64, 1, 1, 1, 0)
35
DEF(bswap64_i64, 1, 1, 1, 0)
36
DEF(not_i64, 1, 1, 0, 0)
37
-DEF(neg_i64, 1, 1, 0, 0)
38
DEF(clz_i64, 1, 2, 0, 0)
39
DEF(ctz_i64, 1, 2, 0, 0)
40
DEF(ctpop_i64, 1, 1, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
CASE_OP_32_64_VEC(not):
47
return ~x;
48
49
- CASE_OP_32_64(neg):
50
+ case INDEX_op_neg:
51
return -x;
52
53
case INDEX_op_andc:
54
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
55
break;
56
}
57
if (convert) {
58
- TCGOpcode neg_opc;
59
-
60
if (!inv && !neg) {
61
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
62
}
63
64
- switch (ctx->type) {
65
- case TCG_TYPE_I32:
66
- neg_opc = INDEX_op_neg_i32;
67
- break;
68
- case TCG_TYPE_I64:
69
- neg_opc = INDEX_op_neg_i64;
70
- break;
71
- default:
72
- g_assert_not_reached();
73
- }
74
-
75
if (!inv) {
76
- op->opc = neg_opc;
77
+ op->opc = INDEX_op_neg;
78
} else if (neg) {
79
op->opc = INDEX_op_add;
80
op->args[2] = arg_new_constant(ctx, -1);
81
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
82
83
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
84
{
85
- TCGOpcode neg_opc, shr_opc;
86
+ TCGOpcode shr_opc;
87
TCGOpcode uext_opc = 0, sext_opc = 0;
88
TCGCond cond = op->args[3];
89
TCGArg ret, src1, src2;
90
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
91
switch (ctx->type) {
92
case TCG_TYPE_I32:
93
shr_opc = INDEX_op_shr_i32;
94
- neg_opc = INDEX_op_neg_i32;
95
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
96
uext_opc = INDEX_op_extract_i32;
97
}
98
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
99
break;
100
case TCG_TYPE_I64:
101
shr_opc = INDEX_op_shr_i64;
102
- neg_opc = INDEX_op_neg_i64;
103
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
104
uext_opc = INDEX_op_extract_i64;
105
}
106
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
107
op2->args[1] = ret;
108
op2->args[2] = arg_new_constant(ctx, 1);
109
} else if (neg) {
110
- op2 = opt_insert_after(ctx, op, neg_opc, 2);
111
+ op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
112
op2->args[0] = ret;
113
op2->args[1] = ret;
114
}
115
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
116
117
switch (ctx->type) {
118
case TCG_TYPE_I32:
119
- neg_op = INDEX_op_neg_i32;
120
- have_neg = true;
121
- break;
122
case TCG_TYPE_I64:
123
- neg_op = INDEX_op_neg_i64;
124
+ neg_op = INDEX_op_neg;
125
have_neg = true;
126
break;
127
case TCG_TYPE_V64:
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
case INDEX_op_nand_vec:
130
done = fold_nand(&ctx, op);
131
break;
132
- CASE_OP_32_64(neg):
133
+ case INDEX_op_neg:
134
done = fold_neg(&ctx, op);
135
break;
136
case INDEX_op_nor:
137
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/tcg-op.c
140
+++ b/tcg/tcg-op.c
141
@@ -XXX,XX +XXX,XX @@ void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
142
143
void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
144
{
145
- tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
146
+ tcg_gen_op2_i32(INDEX_op_neg, ret, arg);
147
}
148
149
void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
150
@@ -XXX,XX +XXX,XX @@ void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
151
void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
152
{
153
if (TCG_TARGET_REG_BITS == 64) {
154
- tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
155
+ tcg_gen_op2_i64(INDEX_op_neg, ret, arg);
156
} else {
157
TCGv_i32 zero = tcg_constant_i32(0);
158
tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret),
159
diff --git a/tcg/tcg.c b/tcg/tcg.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/tcg.c
162
+++ b/tcg/tcg.c
163
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
164
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
165
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
166
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
167
- OUTOP(INDEX_op_neg_i32, TCGOutOpUnary, outop_neg),
168
- OUTOP(INDEX_op_neg_i64, TCGOutOpUnary, outop_neg),
169
+ OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
170
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
171
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
172
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
173
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
174
}
175
break;
176
177
- case INDEX_op_neg_i32:
178
- case INDEX_op_neg_i64:
179
+ case INDEX_op_neg:
180
{
181
const TCGOutOpUnary *out =
182
container_of(all_outop[op->opc], TCGOutOpUnary, base);
183
diff --git a/tcg/tci.c b/tcg/tci.c
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/tci.c
186
+++ b/tcg/tci.c
187
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
188
tci_args_rrr(insn, &r0, &r1, &r2);
189
regs[r0] = ~(regs[r1] | regs[r2]);
190
break;
191
+ case INDEX_op_neg:
192
+ tci_args_rr(insn, &r0, &r1);
193
+ regs[r0] = -regs[r1];
194
+ break;
195
196
/* Arithmetic operations (32 bit). */
197
198
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
199
regs[r0] = ~regs[r1];
200
break;
201
#endif
202
- CASE_32_64(neg)
203
- tci_args_rr(insn, &r0, &r1);
204
- regs[r0] = -regs[r1];
205
- break;
206
#if TCG_TARGET_REG_BITS == 64
207
/* Load/store operations (64 bit). */
208
209
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
210
break;
211
212
case INDEX_op_mov:
213
+ case INDEX_op_neg:
214
case INDEX_op_ext_i32_i64:
215
case INDEX_op_extu_i32_i64:
216
case INDEX_op_bswap16_i32:
217
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
218
case INDEX_op_bswap64_i64:
219
case INDEX_op_not_i32:
220
case INDEX_op_not_i64:
221
- case INDEX_op_neg_i32:
222
- case INDEX_op_neg_i64:
223
case INDEX_op_ctpop_i32:
224
case INDEX_op_ctpop_i64:
225
tci_args_rr(insn, &r0, &r1);
226
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
227
index XXXXXXX..XXXXXXX 100644
228
--- a/docs/devel/tcg-ops.rst
229
+++ b/docs/devel/tcg-ops.rst
230
@@ -XXX,XX +XXX,XX @@ Arithmetic
231
232
- | *t0* = *t1* - *t2*
233
234
- * - neg_i32/i64 *t0*, *t1*
235
+ * - neg *t0*, *t1*
236
237
- | *t0* = -*t1* (two's complement)
238
239
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/tcg/tci/tcg-target.c.inc
242
+++ b/tcg/tci/tcg-target.c.inc
243
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
244
245
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
246
{
247
- tcg_out_op_rr(s, glue(INDEX_op_neg_i,TCG_TARGET_REG_BITS), a0, a1);
248
+ tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
249
}
250
251
static const TCGOutOpUnary outop_neg = {
252
--
253
2.43.0
254
255
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/optimize.c | 4 ++--
16
tcg/tcg-op.c | 10 ++++++----
17
tcg/tcg.c | 8 ++++----
18
tcg/tci.c | 2 --
19
tcg/aarch64/tcg-target.c.inc | 17 ++++++++++-------
20
tcg/arm/tcg-target.c.inc | 15 ++++++++++-----
21
tcg/i386/tcg-target.c.inc | 17 +++++++++++------
22
tcg/loongarch64/tcg-target.c.inc | 17 ++++++++++-------
23
tcg/mips/tcg-target.c.inc | 20 ++++++++++----------
24
tcg/ppc/tcg-target.c.inc | 17 ++++++++++-------
25
tcg/riscv/tcg-target.c.inc | 17 ++++++++++-------
26
tcg/s390x/tcg-target.c.inc | 25 ++++++++++++++++---------
27
tcg/sparc64/tcg-target.c.inc | 20 ++++++++++----------
28
tcg/tci/tcg-target.c.inc | 13 ++++++++++---
29
25 files changed, 119 insertions(+), 103 deletions(-)
30
1
31
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target-has.h
34
+++ b/tcg/aarch64/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@
36
#define TCG_TARGET_HAS_rem_i32 1
37
#define TCG_TARGET_HAS_bswap16_i32 1
38
#define TCG_TARGET_HAS_bswap32_i32 1
39
-#define TCG_TARGET_HAS_not_i32 1
40
#define TCG_TARGET_HAS_rot_i32 1
41
#define TCG_TARGET_HAS_clz_i32 1
42
#define TCG_TARGET_HAS_ctz_i32 1
43
@@ -XXX,XX +XXX,XX @@
44
#define TCG_TARGET_HAS_bswap16_i64 1
45
#define TCG_TARGET_HAS_bswap32_i64 1
46
#define TCG_TARGET_HAS_bswap64_i64 1
47
-#define TCG_TARGET_HAS_not_i64 1
48
#define TCG_TARGET_HAS_rot_i64 1
49
#define TCG_TARGET_HAS_clz_i64 1
50
#define TCG_TARGET_HAS_ctz_i64 1
51
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/arm/tcg-target-has.h
54
+++ b/tcg/arm/tcg-target-has.h
55
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
56
/* optional instructions */
57
#define TCG_TARGET_HAS_bswap16_i32 1
58
#define TCG_TARGET_HAS_bswap32_i32 1
59
-#define TCG_TARGET_HAS_not_i32 1
60
#define TCG_TARGET_HAS_rot_i32 1
61
#define TCG_TARGET_HAS_clz_i32 1
62
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
63
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target-has.h
66
+++ b/tcg/i386/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
#define TCG_TARGET_HAS_rot_i32 1
69
#define TCG_TARGET_HAS_bswap16_i32 1
70
#define TCG_TARGET_HAS_bswap32_i32 1
71
-#define TCG_TARGET_HAS_not_i32 1
72
#define TCG_TARGET_HAS_clz_i32 1
73
#define TCG_TARGET_HAS_ctz_i32 1
74
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_bswap16_i64 1
77
#define TCG_TARGET_HAS_bswap32_i64 1
78
#define TCG_TARGET_HAS_bswap64_i64 1
79
-#define TCG_TARGET_HAS_not_i64 1
80
#define TCG_TARGET_HAS_clz_i64 1
81
#define TCG_TARGET_HAS_ctz_i64 1
82
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
83
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target-has.h
86
+++ b/tcg/loongarch64/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@
88
#define TCG_TARGET_HAS_mulsh_i32 1
89
#define TCG_TARGET_HAS_bswap16_i32 1
90
#define TCG_TARGET_HAS_bswap32_i32 1
91
-#define TCG_TARGET_HAS_not_i32 1
92
#define TCG_TARGET_HAS_clz_i32 1
93
#define TCG_TARGET_HAS_ctz_i32 1
94
#define TCG_TARGET_HAS_ctpop_i32 0
95
@@ -XXX,XX +XXX,XX @@
96
#define TCG_TARGET_HAS_bswap16_i64 1
97
#define TCG_TARGET_HAS_bswap32_i64 1
98
#define TCG_TARGET_HAS_bswap64_i64 1
99
-#define TCG_TARGET_HAS_not_i64 1
100
#define TCG_TARGET_HAS_clz_i64 1
101
#define TCG_TARGET_HAS_ctz_i64 1
102
#define TCG_TARGET_HAS_ctpop_i64 0
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
/* optional instructions */
109
#define TCG_TARGET_HAS_div_i32 1
110
#define TCG_TARGET_HAS_rem_i32 1
111
-#define TCG_TARGET_HAS_not_i32 1
112
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
113
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
114
#define TCG_TARGET_HAS_muluh_i32 1
115
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
116
#define TCG_TARGET_HAS_extr_i64_i32 1
117
#define TCG_TARGET_HAS_div_i64 1
118
#define TCG_TARGET_HAS_rem_i64 1
119
-#define TCG_TARGET_HAS_not_i64 1
120
#define TCG_TARGET_HAS_add2_i64 0
121
#define TCG_TARGET_HAS_sub2_i64 0
122
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
123
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target-has.h
126
+++ b/tcg/ppc/tcg-target-has.h
127
@@ -XXX,XX +XXX,XX @@
128
#define TCG_TARGET_HAS_rot_i32 1
129
#define TCG_TARGET_HAS_bswap16_i32 1
130
#define TCG_TARGET_HAS_bswap32_i32 1
131
-#define TCG_TARGET_HAS_not_i32 1
132
#define TCG_TARGET_HAS_clz_i32 1
133
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
134
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
135
@@ -XXX,XX +XXX,XX @@
136
#define TCG_TARGET_HAS_bswap16_i64 1
137
#define TCG_TARGET_HAS_bswap32_i64 1
138
#define TCG_TARGET_HAS_bswap64_i64 1
139
-#define TCG_TARGET_HAS_not_i64 1
140
#define TCG_TARGET_HAS_clz_i64 1
141
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
142
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
143
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/riscv/tcg-target-has.h
146
+++ b/tcg/riscv/tcg-target-has.h
147
@@ -XXX,XX +XXX,XX @@
148
#define TCG_TARGET_HAS_mulsh_i32 0
149
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
150
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
151
-#define TCG_TARGET_HAS_not_i32 1
152
#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
158
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
159
-#define TCG_TARGET_HAS_not_i64 1
160
#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
161
#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
162
#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
163
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target-has.h
166
+++ b/tcg/s390x/tcg-target-has.h
167
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
168
#define TCG_TARGET_HAS_rot_i32 1
169
#define TCG_TARGET_HAS_bswap16_i32 1
170
#define TCG_TARGET_HAS_bswap32_i32 1
171
-#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
172
#define TCG_TARGET_HAS_clz_i32 0
173
#define TCG_TARGET_HAS_ctz_i32 0
174
#define TCG_TARGET_HAS_ctpop_i32 1
175
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
176
#define TCG_TARGET_HAS_bswap16_i64 1
177
#define TCG_TARGET_HAS_bswap32_i64 1
178
#define TCG_TARGET_HAS_bswap64_i64 1
179
-#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
180
#define TCG_TARGET_HAS_clz_i64 1
181
#define TCG_TARGET_HAS_ctz_i64 0
182
#define TCG_TARGET_HAS_ctpop_i64 1
183
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/sparc64/tcg-target-has.h
186
+++ b/tcg/sparc64/tcg-target-has.h
187
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
188
#define TCG_TARGET_HAS_rot_i32 0
189
#define TCG_TARGET_HAS_bswap16_i32 0
190
#define TCG_TARGET_HAS_bswap32_i32 0
191
-#define TCG_TARGET_HAS_not_i32 1
192
#define TCG_TARGET_HAS_clz_i32 0
193
#define TCG_TARGET_HAS_ctz_i32 0
194
#define TCG_TARGET_HAS_ctpop_i32 0
195
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
196
#define TCG_TARGET_HAS_bswap16_i64 0
197
#define TCG_TARGET_HAS_bswap32_i64 0
198
#define TCG_TARGET_HAS_bswap64_i64 0
199
-#define TCG_TARGET_HAS_not_i64 1
200
#define TCG_TARGET_HAS_clz_i64 0
201
#define TCG_TARGET_HAS_ctz_i64 0
202
#define TCG_TARGET_HAS_ctpop_i64 0
203
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/tcg-has.h
206
+++ b/tcg/tcg-has.h
207
@@ -XXX,XX +XXX,XX @@
208
#define TCG_TARGET_HAS_bswap16_i64 0
209
#define TCG_TARGET_HAS_bswap32_i64 0
210
#define TCG_TARGET_HAS_bswap64_i64 0
211
-#define TCG_TARGET_HAS_not_i64 0
212
#define TCG_TARGET_HAS_clz_i64 0
213
#define TCG_TARGET_HAS_ctz_i64 0
214
#define TCG_TARGET_HAS_ctpop_i64 0
215
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/tci/tcg-target-has.h
218
+++ b/tcg/tci/tcg-target-has.h
219
@@ -XXX,XX +XXX,XX @@
220
#define TCG_TARGET_HAS_clz_i32 1
221
#define TCG_TARGET_HAS_ctz_i32 1
222
#define TCG_TARGET_HAS_ctpop_i32 1
223
-#define TCG_TARGET_HAS_not_i32 1
224
#define TCG_TARGET_HAS_rot_i32 1
225
#define TCG_TARGET_HAS_negsetcond_i32 0
226
#define TCG_TARGET_HAS_muls2_i32 1
227
@@ -XXX,XX +XXX,XX @@
228
#define TCG_TARGET_HAS_clz_i64 1
229
#define TCG_TARGET_HAS_ctz_i64 1
230
#define TCG_TARGET_HAS_ctpop_i64 1
231
-#define TCG_TARGET_HAS_not_i64 1
232
#define TCG_TARGET_HAS_rot_i64 1
233
#define TCG_TARGET_HAS_negsetcond_i64 0
234
#define TCG_TARGET_HAS_muls2_i64 1
235
diff --git a/tcg/optimize.c b/tcg/optimize.c
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/optimize.c
238
+++ b/tcg/optimize.c
239
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
240
switch (ctx->type) {
241
case TCG_TYPE_I32:
242
not_op = INDEX_op_not_i32;
243
- have_not = TCG_TARGET_HAS_not_i32;
244
+ have_not = tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0);
245
break;
246
case TCG_TYPE_I64:
247
not_op = INDEX_op_not_i64;
248
- have_not = TCG_TARGET_HAS_not_i64;
249
+ have_not = tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0);
250
break;
251
case TCG_TYPE_V64:
252
case TCG_TYPE_V128:
253
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/tcg/tcg-op.c
256
+++ b/tcg/tcg-op.c
257
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
258
/* Some cases can be optimized here. */
259
if (arg2 == 0) {
260
tcg_gen_mov_i32(ret, arg1);
261
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) {
262
+ } else if (arg2 == -1 &&
263
+ tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
264
/* Don't recurse with tcg_gen_not_i32. */
265
tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
266
} else {
267
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
268
269
void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
270
{
271
- if (TCG_TARGET_HAS_not_i32) {
272
+ if (tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
273
tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
274
} else {
275
tcg_gen_xori_i32(ret, arg, -1);
276
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
277
/* Some cases can be optimized here. */
278
if (arg2 == 0) {
279
tcg_gen_mov_i64(ret, arg1);
280
- } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) {
281
+ } else if (arg2 == -1 &&
282
+ tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
283
/* Don't recurse with tcg_gen_not_i64. */
284
tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
285
} else {
286
@@ -XXX,XX +XXX,XX @@ void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
287
if (TCG_TARGET_REG_BITS == 32) {
288
tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
289
tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
290
- } else if (TCG_TARGET_HAS_not_i64) {
291
+ } else if (tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
292
tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
293
} else {
294
tcg_gen_xori_i64(ret, arg, -1);
295
diff --git a/tcg/tcg.c b/tcg/tcg.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/tcg.c
298
+++ b/tcg/tcg.c
299
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
300
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
301
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
302
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
303
+ OUTOP(INDEX_op_not_i32, TCGOutOpUnary, outop_not),
304
+ OUTOP(INDEX_op_not_i64, TCGOutOpUnary, outop_not),
305
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
306
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
307
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
308
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
309
return TCG_TARGET_HAS_bswap16_i32;
310
case INDEX_op_bswap32_i32:
311
return TCG_TARGET_HAS_bswap32_i32;
312
- case INDEX_op_not_i32:
313
- return TCG_TARGET_HAS_not_i32;
314
case INDEX_op_clz_i32:
315
return TCG_TARGET_HAS_clz_i32;
316
case INDEX_op_ctz_i32:
317
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
318
return TCG_TARGET_HAS_bswap32_i64;
319
case INDEX_op_bswap64_i64:
320
return TCG_TARGET_HAS_bswap64_i64;
321
- case INDEX_op_not_i64:
322
- return TCG_TARGET_HAS_not_i64;
323
case INDEX_op_clz_i64:
324
return TCG_TARGET_HAS_clz_i64;
325
case INDEX_op_ctz_i64:
326
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
327
break;
328
329
case INDEX_op_neg:
330
+ case INDEX_op_not_i32:
331
+ case INDEX_op_not_i64:
332
{
333
const TCGOutOpUnary *out =
334
container_of(all_outop[op->opc], TCGOutOpUnary, base);
335
diff --git a/tcg/tci.c b/tcg/tci.c
336
index XXXXXXX..XXXXXXX 100644
337
--- a/tcg/tci.c
338
+++ b/tcg/tci.c
339
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
340
regs[r0] = bswap32(regs[r1]);
341
break;
342
#endif
343
-#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
344
CASE_32_64(not)
345
tci_args_rr(insn, &r0, &r1);
346
regs[r0] = ~regs[r1];
347
break;
348
-#endif
349
#if TCG_TARGET_REG_BITS == 64
350
/* Load/store operations (64 bit). */
351
352
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/aarch64/tcg-target.c.inc
355
+++ b/tcg/aarch64/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
357
.out_rr = tgen_neg,
358
};
359
360
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
361
+{
362
+ tgen_orc(s, type, a0, TCG_REG_XZR, a1);
363
+}
364
+
365
+static const TCGOutOpUnary outop_not = {
366
+ .base.static_constraint = C_O1_I1(r, r),
367
+ .out_rr = tgen_not,
368
+};
369
+
370
371
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
372
const TCGArg args[TCG_MAX_OP_ARGS],
373
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
374
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
375
break;
376
377
- case INDEX_op_not_i64:
378
- case INDEX_op_not_i32:
379
- tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
380
- break;
381
-
382
case INDEX_op_mul_i64:
383
case INDEX_op_mul_i32:
384
tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
385
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
386
case INDEX_op_ld32u_i64:
387
case INDEX_op_ld32s_i64:
388
case INDEX_op_ld_i64:
389
- case INDEX_op_not_i32:
390
- case INDEX_op_not_i64:
391
case INDEX_op_bswap16_i32:
392
case INDEX_op_bswap32_i32:
393
case INDEX_op_bswap16_i64:
394
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
395
index XXXXXXX..XXXXXXX 100644
396
--- a/tcg/arm/tcg-target.c.inc
397
+++ b/tcg/arm/tcg-target.c.inc
398
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
399
.out_rr = tgen_neg,
400
};
401
402
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
403
+{
404
+ tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0));
405
+}
406
+
407
+static const TCGOutOpUnary outop_not = {
408
+ .base.static_constraint = C_O1_I1(r, r),
409
+ .out_rr = tgen_not,
410
+};
411
+
412
413
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
414
const TCGArg args[TCG_MAX_OP_ARGS],
415
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
416
}
417
tcg_out_mov_reg(s, COND_AL, args[0], a0);
418
break;
419
- case INDEX_op_not_i32:
420
- tcg_out_dat_reg(s, COND_AL,
421
- ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
422
- break;
423
case INDEX_op_mul_i32:
424
tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
425
break;
426
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
427
case INDEX_op_ld16u_i32:
428
case INDEX_op_ld16s_i32:
429
case INDEX_op_ld_i32:
430
- case INDEX_op_not_i32:
431
case INDEX_op_bswap16_i32:
432
case INDEX_op_bswap32_i32:
433
case INDEX_op_extract_i32:
434
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
435
index XXXXXXX..XXXXXXX 100644
436
--- a/tcg/i386/tcg-target.c.inc
437
+++ b/tcg/i386/tcg-target.c.inc
438
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
439
.out_rr = tgen_neg,
440
};
441
442
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
443
+{
444
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
445
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
446
+}
447
+
448
+static const TCGOutOpUnary outop_not = {
449
+ .base.static_constraint = C_O1_I1(r, 0),
450
+ .out_rr = tgen_not,
451
+};
452
+
453
454
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
455
const TCGArg args[TCG_MAX_OP_ARGS],
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
457
}
458
break;
459
460
- OP_32_64(not):
461
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
462
- break;
463
-
464
case INDEX_op_qemu_ld_i32:
465
tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
466
break;
467
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
468
case INDEX_op_bswap32_i32:
469
case INDEX_op_bswap32_i64:
470
case INDEX_op_bswap64_i64:
471
- case INDEX_op_not_i32:
472
- case INDEX_op_not_i64:
473
case INDEX_op_extrh_i64_i32:
474
return C_O1_I1(r, 0);
475
476
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
477
index XXXXXXX..XXXXXXX 100644
478
--- a/tcg/loongarch64/tcg-target.c.inc
479
+++ b/tcg/loongarch64/tcg-target.c.inc
480
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
481
.out_rr = tgen_neg,
482
};
483
484
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
485
+{
486
+ tgen_nor(s, type, a0, a1, TCG_REG_ZERO);
487
+}
488
+
489
+static const TCGOutOpUnary outop_not = {
490
+ .base.static_constraint = C_O1_I1(r, r),
491
+ .out_rr = tgen_not,
492
+};
493
+
494
495
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
496
const TCGArg args[TCG_MAX_OP_ARGS],
497
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
498
tcg_out_opc_srai_d(s, a0, a1, 32);
499
break;
500
501
- case INDEX_op_not_i32:
502
- case INDEX_op_not_i64:
503
- tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
504
- break;
505
-
506
case INDEX_op_extract_i32:
507
if (a2 == 0 && args[3] <= 12) {
508
tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
509
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
510
case INDEX_op_extrl_i64_i32:
511
case INDEX_op_extrh_i64_i32:
512
case INDEX_op_ext_i32_i64:
513
- case INDEX_op_not_i32:
514
- case INDEX_op_not_i64:
515
case INDEX_op_extract_i32:
516
case INDEX_op_extract_i64:
517
case INDEX_op_sextract_i32:
518
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
519
index XXXXXXX..XXXXXXX 100644
520
--- a/tcg/mips/tcg-target.c.inc
521
+++ b/tcg/mips/tcg-target.c.inc
522
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
523
.out_rr = tgen_neg,
524
};
525
526
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
527
+{
528
+ tgen_nor(s, type, a0, TCG_REG_ZERO, a1);
529
+}
530
+
531
+static const TCGOutOpUnary outop_not = {
532
+ .base.static_constraint = C_O1_I1(r, r),
533
+ .out_rr = tgen_not,
534
+};
535
+
536
537
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
538
const TCGArg args[TCG_MAX_OP_ARGS],
539
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
540
tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
541
break;
542
543
- case INDEX_op_not_i32:
544
- case INDEX_op_not_i64:
545
- i1 = OPC_NOR;
546
- goto do_unary;
547
- do_unary:
548
- tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
549
- break;
550
-
551
case INDEX_op_bswap16_i32:
552
case INDEX_op_bswap16_i64:
553
tcg_out_bswap16(s, a0, a1, a2);
554
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
555
case INDEX_op_ld16u_i32:
556
case INDEX_op_ld16s_i32:
557
case INDEX_op_ld_i32:
558
- case INDEX_op_not_i32:
559
case INDEX_op_bswap16_i32:
560
case INDEX_op_bswap32_i32:
561
case INDEX_op_extract_i32:
562
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
563
case INDEX_op_ld32s_i64:
564
case INDEX_op_ld32u_i64:
565
case INDEX_op_ld_i64:
566
- case INDEX_op_not_i64:
567
case INDEX_op_bswap16_i64:
568
case INDEX_op_bswap32_i64:
569
case INDEX_op_bswap64_i64:
570
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
571
index XXXXXXX..XXXXXXX 100644
572
--- a/tcg/ppc/tcg-target.c.inc
573
+++ b/tcg/ppc/tcg-target.c.inc
574
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
575
.out_rr = tgen_neg,
576
};
577
578
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
579
+{
580
+ tgen_nor(s, type, a0, a1, a1);
581
+}
582
+
583
+static const TCGOutOpUnary outop_not = {
584
+ .base.static_constraint = C_O1_I1(r, r),
585
+ .out_rr = tgen_not,
586
+};
587
+
588
589
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
590
const TCGArg args[TCG_MAX_OP_ARGS],
591
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
592
tcg_out_brcond2(s, args, const_args);
593
break;
594
595
- case INDEX_op_not_i32:
596
- case INDEX_op_not_i64:
597
- tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
598
- break;
599
-
600
case INDEX_op_shl_i64:
601
if (const_args[2]) {
602
/* Limit immediate shift count lest we create an illegal insn. */
603
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
604
case INDEX_op_ld16s_i32:
605
case INDEX_op_ld_i32:
606
case INDEX_op_ctpop_i32:
607
- case INDEX_op_not_i32:
608
case INDEX_op_bswap16_i32:
609
case INDEX_op_bswap32_i32:
610
case INDEX_op_extract_i32:
611
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
612
case INDEX_op_ld32s_i64:
613
case INDEX_op_ld_i64:
614
case INDEX_op_ctpop_i64:
615
- case INDEX_op_not_i64:
616
case INDEX_op_ext_i32_i64:
617
case INDEX_op_extu_i32_i64:
618
case INDEX_op_bswap16_i64:
619
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
620
index XXXXXXX..XXXXXXX 100644
621
--- a/tcg/riscv/tcg-target.c.inc
622
+++ b/tcg/riscv/tcg-target.c.inc
623
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
624
.out_rr = tgen_neg,
625
};
626
627
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
628
+{
629
+ tgen_xori(s, type, a0, a1, -1);
630
+}
631
+
632
+static const TCGOutOpUnary outop_not = {
633
+ .base.static_constraint = C_O1_I1(r, r),
634
+ .out_rr = tgen_not,
635
+};
636
+
637
638
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
639
const TCGArg args[TCG_MAX_OP_ARGS],
640
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
641
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
642
break;
643
644
- case INDEX_op_not_i32:
645
- case INDEX_op_not_i64:
646
- tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
647
- break;
648
-
649
case INDEX_op_mul_i32:
650
tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
651
break;
652
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
653
case INDEX_op_ld16u_i32:
654
case INDEX_op_ld16s_i32:
655
case INDEX_op_ld_i32:
656
- case INDEX_op_not_i32:
657
case INDEX_op_ld8u_i64:
658
case INDEX_op_ld8s_i64:
659
case INDEX_op_ld16u_i64:
660
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
661
case INDEX_op_ld32s_i64:
662
case INDEX_op_ld32u_i64:
663
case INDEX_op_ld_i64:
664
- case INDEX_op_not_i64:
665
case INDEX_op_extu_i32_i64:
666
case INDEX_op_extrl_i64_i32:
667
case INDEX_op_extrh_i64_i32:
668
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
669
index XXXXXXX..XXXXXXX 100644
670
--- a/tcg/s390x/tcg-target.c.inc
671
+++ b/tcg/s390x/tcg-target.c.inc
672
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
673
.out_rr = tgen_neg,
674
};
675
676
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
677
+{
678
+ tgen_nor(s, type, a0, a1, a1);
679
+}
680
+
681
+static TCGConstraintSetIndex cset_not(TCGType type, unsigned flags)
682
+{
683
+ return HAVE_FACILITY(MISC_INSN_EXT3) ? C_O1_I1(r, r) : C_NotImplemented;
684
+}
685
+
686
+static const TCGOutOpUnary outop_not = {
687
+ .base.static_constraint = C_Dynamic,
688
+ .base.dynamic_constraint = cset_not,
689
+ .out_rr = tgen_not,
690
+};
691
+
692
693
# define OP_32_64(x) \
694
case glue(glue(INDEX_op_,x),_i32): \
695
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
696
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
697
break;
698
699
- case INDEX_op_not_i32:
700
- tcg_out_insn(s, RRFa, NORK, args[0], args[1], args[1]);
701
- break;
702
-
703
case INDEX_op_mul_i32:
704
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
705
if (const_args[2]) {
706
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
707
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
708
break;
709
710
- case INDEX_op_not_i64:
711
- tcg_out_insn(s, RRFa, NOGRK, args[0], args[1], args[1]);
712
- break;
713
case INDEX_op_bswap64_i64:
714
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
715
break;
716
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
717
case INDEX_op_bswap32_i32:
718
case INDEX_op_bswap32_i64:
719
case INDEX_op_bswap64_i64:
720
- case INDEX_op_not_i32:
721
- case INDEX_op_not_i64:
722
case INDEX_op_ext_i32_i64:
723
case INDEX_op_extu_i32_i64:
724
case INDEX_op_extract_i32:
725
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
726
index XXXXXXX..XXXXXXX 100644
727
--- a/tcg/sparc64/tcg-target.c.inc
728
+++ b/tcg/sparc64/tcg-target.c.inc
729
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
730
.out_rr = tgen_neg,
731
};
732
733
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
734
+{
735
+ tgen_orc(s, type, a0, TCG_REG_G0, a1);
736
+}
737
+
738
+static const TCGOutOpUnary outop_not = {
739
+ .base.static_constraint = C_O1_I1(r, r),
740
+ .out_rr = tgen_not,
741
+};
742
+
743
744
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
745
const TCGArg args[TCG_MAX_OP_ARGS],
746
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
747
c = ARITH_UMUL;
748
goto gen_arith;
749
750
- OP_32_64(not):
751
-    c = ARITH_ORN;
752
-    goto gen_arith1;
753
-
754
case INDEX_op_div_i32:
755
tcg_out_div32(s, a0, a1, a2, c2, 0);
756
break;
757
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
758
tcg_out_arithc(s, a0, a1, a2, c2, c);
759
break;
760
761
- gen_arith1:
762
-    tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
763
-    break;
764
-
765
case INDEX_op_mb:
766
tcg_out_mb(s, a0);
767
break;
768
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
769
case INDEX_op_ld32u_i64:
770
case INDEX_op_ld32s_i64:
771
case INDEX_op_ld_i64:
772
- case INDEX_op_not_i32:
773
- case INDEX_op_not_i64:
774
case INDEX_op_ext_i32_i64:
775
case INDEX_op_extu_i32_i64:
776
case INDEX_op_extract_i64:
777
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
778
index XXXXXXX..XXXXXXX 100644
779
--- a/tcg/tci/tcg-target.c.inc
780
+++ b/tcg/tci/tcg-target.c.inc
781
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
782
case INDEX_op_ld32u_i64:
783
case INDEX_op_ld32s_i64:
784
case INDEX_op_ld_i64:
785
- case INDEX_op_not_i32:
786
- case INDEX_op_not_i64:
787
case INDEX_op_ext_i32_i64:
788
case INDEX_op_extu_i32_i64:
789
case INDEX_op_bswap16_i32:
790
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
791
.out_rr = tgen_neg,
792
};
793
794
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
795
+{
796
+ tcg_out_op_rr(s, glue(INDEX_op_not_i,TCG_TARGET_REG_BITS), a0, a1);
797
+}
798
+
799
+static const TCGOutOpUnary outop_not = {
800
+ .base.static_constraint = C_O1_I1(r, r),
801
+ .out_rr = tgen_not,
802
+};
803
+
804
805
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
806
const TCGArg args[TCG_MAX_OP_ARGS],
807
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
808
tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
809
break;
810
811
- CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
812
CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
813
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
814
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
815
--
816
2.43.0
817
818
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 13 ++++++-------
6
tcg/tcg-op.c | 16 ++++++++--------
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 11 +++++------
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 24 insertions(+), 29 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(eqv, 1, 2, 0, TCG_OPF_INT)
18
DEF(nand, 1, 2, 0, TCG_OPF_INT)
19
DEF(neg, 1, 1, 0, TCG_OPF_INT)
20
DEF(nor, 1, 2, 0, TCG_OPF_INT)
21
+DEF(not, 1, 1, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
DEF(sub, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
26
27
DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
-DEF(not_i32, 1, 1, 0, 0)
30
DEF(clz_i32, 1, 2, 0, 0)
31
DEF(ctz_i32, 1, 2, 0, 0)
32
DEF(ctpop_i32, 1, 1, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
34
DEF(bswap16_i64, 1, 1, 1, 0)
35
DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
-DEF(not_i64, 1, 1, 0, 0)
38
DEF(clz_i64, 1, 2, 0, 0)
39
DEF(ctz_i64, 1, 2, 0, 0)
40
DEF(ctpop_i64, 1, 1, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_rotl_i64:
47
return rol64(x, y & 63);
48
49
- CASE_OP_32_64_VEC(not):
50
+ case INDEX_op_not:
51
+ case INDEX_op_not_vec:
52
return ~x;
53
54
case INDEX_op_neg:
55
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
56
57
switch (ctx->type) {
58
case TCG_TYPE_I32:
59
- not_op = INDEX_op_not_i32;
60
- have_not = tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0);
61
- break;
62
case TCG_TYPE_I64:
63
- not_op = INDEX_op_not_i64;
64
- have_not = tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0);
65
+ not_op = INDEX_op_not;
66
+ have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
67
break;
68
case TCG_TYPE_V64:
69
case TCG_TYPE_V128:
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
case INDEX_op_nor_vec:
72
done = fold_nor(&ctx, op);
73
break;
74
- CASE_OP_32_64_VEC(not):
75
+ case INDEX_op_not:
76
+ case INDEX_op_not_vec:
77
done = fold_not(&ctx, op);
78
break;
79
case INDEX_op_or:
80
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/tcg-op.c
83
+++ b/tcg/tcg-op.c
84
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
85
if (arg2 == 0) {
86
tcg_gen_mov_i32(ret, arg1);
87
} else if (arg2 == -1 &&
88
- tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
89
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
90
/* Don't recurse with tcg_gen_not_i32. */
91
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
92
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg1);
93
} else {
94
tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2));
95
}
96
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
97
98
void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
99
{
100
- if (tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
101
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
102
+ if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
103
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg);
104
} else {
105
tcg_gen_xori_i32(ret, arg, -1);
106
}
107
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
108
if (arg2 == 0) {
109
tcg_gen_mov_i64(ret, arg1);
110
} else if (arg2 == -1 &&
111
- tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
112
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
113
/* Don't recurse with tcg_gen_not_i64. */
114
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
115
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg1);
116
} else {
117
tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2));
118
}
119
@@ -XXX,XX +XXX,XX @@ void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
120
if (TCG_TARGET_REG_BITS == 32) {
121
tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
122
tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
123
- } else if (tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
124
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
125
+ } else if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
126
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg);
127
} else {
128
tcg_gen_xori_i64(ret, arg, -1);
129
}
130
diff --git a/tcg/tcg.c b/tcg/tcg.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/tcg.c
133
+++ b/tcg/tcg.c
134
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
135
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
136
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
137
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
138
- OUTOP(INDEX_op_not_i32, TCGOutOpUnary, outop_not),
139
- OUTOP(INDEX_op_not_i64, TCGOutOpUnary, outop_not),
140
+ OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
141
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
142
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
143
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
144
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
145
break;
146
147
case INDEX_op_neg:
148
- case INDEX_op_not_i32:
149
- case INDEX_op_not_i64:
150
+ case INDEX_op_not:
151
{
152
const TCGOutOpUnary *out =
153
container_of(all_outop[op->opc], TCGOutOpUnary, base);
154
diff --git a/tcg/tci.c b/tcg/tci.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/tci.c
157
+++ b/tcg/tci.c
158
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
159
tci_args_rr(insn, &r0, &r1);
160
regs[r0] = -regs[r1];
161
break;
162
+ case INDEX_op_not:
163
+ tci_args_rr(insn, &r0, &r1);
164
+ regs[r0] = ~regs[r1];
165
+ break;
166
167
/* Arithmetic operations (32 bit). */
168
169
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
170
regs[r0] = bswap32(regs[r1]);
171
break;
172
#endif
173
- CASE_32_64(not)
174
- tci_args_rr(insn, &r0, &r1);
175
- regs[r0] = ~regs[r1];
176
- break;
177
#if TCG_TARGET_REG_BITS == 64
178
/* Load/store operations (64 bit). */
179
180
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
181
182
case INDEX_op_mov:
183
case INDEX_op_neg:
184
+ case INDEX_op_not:
185
case INDEX_op_ext_i32_i64:
186
case INDEX_op_extu_i32_i64:
187
case INDEX_op_bswap16_i32:
188
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
189
case INDEX_op_bswap32_i32:
190
case INDEX_op_bswap32_i64:
191
case INDEX_op_bswap64_i64:
192
- case INDEX_op_not_i32:
193
- case INDEX_op_not_i64:
194
case INDEX_op_ctpop_i32:
195
case INDEX_op_ctpop_i64:
196
tci_args_rr(insn, &r0, &r1);
197
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
198
index XXXXXXX..XXXXXXX 100644
199
--- a/docs/devel/tcg-ops.rst
200
+++ b/docs/devel/tcg-ops.rst
201
@@ -XXX,XX +XXX,XX @@ Logical
202
203
- | *t0* = *t1* ^ *t2*
204
205
- * - not_i32/i64 *t0*, *t1*
206
+ * - not *t0*, *t1*
207
208
- | *t0* = ~\ *t1*
209
210
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/tci/tcg-target.c.inc
213
+++ b/tcg/tci/tcg-target.c.inc
214
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_neg = {
215
216
static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
217
{
218
- tcg_out_op_rr(s, glue(INDEX_op_not_i,TCG_TARGET_REG_BITS), a0, a1);
219
+ tcg_out_op_rr(s, INDEX_op_not, a0, a1);
220
}
221
222
static const TCGOutOpUnary outop_not = {
223
--
224
2.43.0
225
226
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 4 ++--
6
tcg/tcg-op.c | 12 ++++++------
7
tcg/tcg.c | 14 ++++++--------
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 19 insertions(+), 23 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
21
+DEF(mul, 1, 2, 0, TCG_OPF_INT)
22
DEF(nand, 1, 2, 0, TCG_OPF_INT)
23
DEF(neg, 1, 1, 0, TCG_OPF_INT)
24
DEF(nor, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* arith */
29
-DEF(mul_i32, 1, 2, 0, 0)
30
DEF(div_i32, 1, 2, 0, 0)
31
DEF(divu_i32, 1, 2, 0, 0)
32
DEF(rem_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* arith */
37
-DEF(mul_i64, 1, 2, 0, 0)
38
DEF(div_i64, 1, 2, 0, 0)
39
DEF(divu_i64, 1, 2, 0, 0)
40
DEF(rem_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
46
case INDEX_op_sub:
47
return x - y;
48
49
- CASE_OP_32_64(mul):
50
+ case INDEX_op_mul:
51
return x * y;
52
53
case INDEX_op_and:
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
CASE_OP_32_64(movcond):
56
done = fold_movcond(&ctx, op);
57
break;
58
- CASE_OP_32_64(mul):
59
+ case INDEX_op_mul:
60
done = fold_mul(&ctx, op);
61
break;
62
CASE_OP_32_64(mulsh):
63
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/tcg-op.c
66
+++ b/tcg/tcg-op.c
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcondi_i32(TCGCond cond, TCGv_i32 ret,
68
69
void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
70
{
71
- tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
72
+ tcg_gen_op3_i32(INDEX_op_mul, ret, arg1, arg2);
73
}
74
75
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
77
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
78
} else if (TCG_TARGET_HAS_muluh_i32) {
79
TCGv_i32 t = tcg_temp_ebb_new_i32();
80
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
81
+ tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
82
tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
83
tcg_gen_mov_i32(rl, t);
84
tcg_temp_free_i32(t);
85
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
86
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
87
} else if (TCG_TARGET_HAS_mulsh_i32) {
88
TCGv_i32 t = tcg_temp_ebb_new_i32();
89
- tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2);
90
+ tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
91
tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
92
tcg_gen_mov_i32(rl, t);
93
tcg_temp_free_i32(t);
94
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
95
TCGv_i32 t1;
96
97
if (TCG_TARGET_REG_BITS == 64) {
98
- tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
99
+ tcg_gen_op3_i64(INDEX_op_mul, ret, arg1, arg2);
100
return;
101
}
102
103
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
104
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
105
} else if (TCG_TARGET_HAS_muluh_i64) {
106
TCGv_i64 t = tcg_temp_ebb_new_i64();
107
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
108
+ tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
109
tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
110
tcg_gen_mov_i64(rl, t);
111
tcg_temp_free_i64(t);
112
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
113
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
114
} else if (TCG_TARGET_HAS_mulsh_i64) {
115
TCGv_i64 t = tcg_temp_ebb_new_i64();
116
- tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2);
117
+ tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
118
tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
119
tcg_gen_mov_i64(rl, t);
120
tcg_temp_free_i64(t);
121
diff --git a/tcg/tcg.c b/tcg/tcg.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/tcg.c
124
+++ b/tcg/tcg.c
125
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
126
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
127
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
128
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
129
- OUTOP(INDEX_op_mul_i32, TCGOutOpBinary, outop_mul),
130
- OUTOP(INDEX_op_mul_i64, TCGOutOpBinary, outop_mul),
131
+ OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
132
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
133
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
134
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
135
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
136
goto do_not_remove;
137
138
case INDEX_op_mulu2_i32:
139
- opc_new = INDEX_op_mul_i32;
140
+ opc_new = INDEX_op_mul;
141
opc_new2 = INDEX_op_muluh_i32;
142
have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
143
goto do_mul2;
144
case INDEX_op_muls2_i32:
145
- opc_new = INDEX_op_mul_i32;
146
+ opc_new = INDEX_op_mul;
147
opc_new2 = INDEX_op_mulsh_i32;
148
have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
149
goto do_mul2;
150
case INDEX_op_mulu2_i64:
151
- opc_new = INDEX_op_mul_i64;
152
+ opc_new = INDEX_op_mul;
153
opc_new2 = INDEX_op_muluh_i64;
154
have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
155
goto do_mul2;
156
case INDEX_op_muls2_i64:
157
- opc_new = INDEX_op_mul_i64;
158
+ opc_new = INDEX_op_mul;
159
opc_new2 = INDEX_op_mulsh_i64;
160
have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
161
goto do_mul2;
162
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
163
case INDEX_op_and:
164
case INDEX_op_andc:
165
case INDEX_op_eqv:
166
- case INDEX_op_mul_i32:
167
- case INDEX_op_mul_i64:
168
+ case INDEX_op_mul:
169
case INDEX_op_nand:
170
case INDEX_op_nor:
171
case INDEX_op_or:
172
diff --git a/tcg/tci.c b/tcg/tci.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/tcg/tci.c
175
+++ b/tcg/tci.c
176
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
177
tci_args_rrr(insn, &r0, &r1, &r2);
178
regs[r0] = regs[r1] - regs[r2];
179
break;
180
- CASE_32_64(mul)
181
+ case INDEX_op_mul:
182
tci_args_rrr(insn, &r0, &r1, &r2);
183
regs[r0] = regs[r1] * regs[r2];
184
break;
185
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
186
case INDEX_op_and:
187
case INDEX_op_andc:
188
case INDEX_op_eqv:
189
+ case INDEX_op_mul:
190
case INDEX_op_nand:
191
case INDEX_op_nor:
192
case INDEX_op_or:
193
case INDEX_op_orc:
194
case INDEX_op_sub:
195
case INDEX_op_xor:
196
- case INDEX_op_mul_i32:
197
- case INDEX_op_mul_i64:
198
case INDEX_op_div_i32:
199
case INDEX_op_div_i64:
200
case INDEX_op_rem_i32:
201
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
202
index XXXXXXX..XXXXXXX 100644
203
--- a/docs/devel/tcg-ops.rst
204
+++ b/docs/devel/tcg-ops.rst
205
@@ -XXX,XX +XXX,XX @@ Arithmetic
206
207
- | *t0* = -*t1* (two's complement)
208
209
- * - mul_i32/i64 *t0*, *t1*, *t2*
210
+ * - mul *t0*, *t1*, *t2*
211
212
- | *t0* = *t1* * *t2*
213
214
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tci/tcg-target.c.inc
217
+++ b/tcg/tci/tcg-target.c.inc
218
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
219
static void tgen_mul(TCGContext *s, TCGType type,
220
TCGReg a0, TCGReg a1, TCGReg a2)
221
{
222
- tcg_out_op_rrr(s, glue(INDEX_op_mul_i,TCG_TARGET_REG_BITS), a0, a1, a2);
223
+ tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
224
}
225
226
static const TCGOutOpBinary outop_mul = {
227
--
228
2.43.0
229
230
diff view generated by jsdifflib
Deleted patch
1
Remove unreachable mul[su]h_i32 leftovers from commit aeb6326ec5e
2
("tcg/riscv: Require TCG_TARGET_REG_BITS == 64").
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-has.h | 2 --
8
tcg/arm/tcg-target-has.h | 1 -
9
tcg/i386/tcg-target-has.h | 2 --
10
tcg/loongarch64/tcg-target-has.h | 2 --
11
tcg/mips/tcg-target-has.h | 2 --
12
tcg/ppc/tcg-target-has.h | 2 --
13
tcg/riscv/tcg-target-has.h | 2 --
14
tcg/s390x/tcg-target-has.h | 2 --
15
tcg/sparc64/tcg-target-has.h | 2 --
16
tcg/tcg-has.h | 1 -
17
tcg/tci/tcg-target-has.h | 2 --
18
tcg/tcg-op.c | 7 ++++---
19
tcg/tcg.c | 16 ++++++---------
20
tcg/aarch64/tcg-target.c.inc | 21 ++++++++++++++++----
21
tcg/arm/tcg-target.c.inc | 4 ++++
22
tcg/i386/tcg-target.c.inc | 4 ++++
23
tcg/loongarch64/tcg-target.c.inc | 24 +++++++++++++---------
24
tcg/mips/tcg-target.c.inc | 34 +++++++++++++++++---------------
25
tcg/ppc/tcg-target.c.inc | 20 +++++++++++--------
26
tcg/riscv/tcg-target.c.inc | 24 +++++++++++++++-------
27
tcg/s390x/tcg-target.c.inc | 4 ++++
28
tcg/sparc64/tcg-target.c.inc | 23 ++++++++++++++++-----
29
tcg/tci/tcg-target.c.inc | 4 ++++
30
23 files changed, 123 insertions(+), 82 deletions(-)
31
32
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/aarch64/tcg-target-has.h
35
+++ b/tcg/aarch64/tcg-target-has.h
36
@@ -XXX,XX +XXX,XX @@
37
#define TCG_TARGET_HAS_sub2_i32 1
38
#define TCG_TARGET_HAS_mulu2_i32 0
39
#define TCG_TARGET_HAS_muls2_i32 0
40
-#define TCG_TARGET_HAS_muluh_i32 0
41
#define TCG_TARGET_HAS_mulsh_i32 0
42
#define TCG_TARGET_HAS_extr_i64_i32 0
43
#define TCG_TARGET_HAS_qemu_st8_i32 0
44
@@ -XXX,XX +XXX,XX @@
45
#define TCG_TARGET_HAS_sub2_i64 1
46
#define TCG_TARGET_HAS_mulu2_i64 0
47
#define TCG_TARGET_HAS_muls2_i64 0
48
-#define TCG_TARGET_HAS_muluh_i64 1
49
#define TCG_TARGET_HAS_mulsh_i64 1
50
51
/*
52
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/arm/tcg-target-has.h
55
+++ b/tcg/arm/tcg-target-has.h
56
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
57
#define TCG_TARGET_HAS_negsetcond_i32 1
58
#define TCG_TARGET_HAS_mulu2_i32 1
59
#define TCG_TARGET_HAS_muls2_i32 1
60
-#define TCG_TARGET_HAS_muluh_i32 0
61
#define TCG_TARGET_HAS_mulsh_i32 0
62
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
63
#define TCG_TARGET_HAS_rem_i32 0
64
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/i386/tcg-target-has.h
67
+++ b/tcg/i386/tcg-target-has.h
68
@@ -XXX,XX +XXX,XX @@
69
#define TCG_TARGET_HAS_sub2_i32 1
70
#define TCG_TARGET_HAS_mulu2_i32 1
71
#define TCG_TARGET_HAS_muls2_i32 1
72
-#define TCG_TARGET_HAS_muluh_i32 0
73
#define TCG_TARGET_HAS_mulsh_i32 0
74
75
#if TCG_TARGET_REG_BITS == 64
76
@@ -XXX,XX +XXX,XX @@
77
#define TCG_TARGET_HAS_sub2_i64 1
78
#define TCG_TARGET_HAS_mulu2_i64 1
79
#define TCG_TARGET_HAS_muls2_i64 1
80
-#define TCG_TARGET_HAS_muluh_i64 0
81
#define TCG_TARGET_HAS_mulsh_i64 0
82
#define TCG_TARGET_HAS_qemu_st8_i32 0
83
#else
84
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/loongarch64/tcg-target-has.h
87
+++ b/tcg/loongarch64/tcg-target-has.h
88
@@ -XXX,XX +XXX,XX @@
89
#define TCG_TARGET_HAS_sub2_i32 0
90
#define TCG_TARGET_HAS_mulu2_i32 0
91
#define TCG_TARGET_HAS_muls2_i32 0
92
-#define TCG_TARGET_HAS_muluh_i32 1
93
#define TCG_TARGET_HAS_mulsh_i32 1
94
#define TCG_TARGET_HAS_bswap16_i32 1
95
#define TCG_TARGET_HAS_bswap32_i32 1
96
@@ -XXX,XX +XXX,XX @@
97
#define TCG_TARGET_HAS_sub2_i64 0
98
#define TCG_TARGET_HAS_mulu2_i64 0
99
#define TCG_TARGET_HAS_muls2_i64 0
100
-#define TCG_TARGET_HAS_muluh_i64 1
101
#define TCG_TARGET_HAS_mulsh_i64 1
102
103
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
104
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/mips/tcg-target-has.h
107
+++ b/tcg/mips/tcg-target-has.h
108
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
109
#define TCG_TARGET_HAS_rem_i32 1
110
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
111
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
112
-#define TCG_TARGET_HAS_muluh_i32 1
113
#define TCG_TARGET_HAS_mulsh_i32 1
114
#define TCG_TARGET_HAS_bswap16_i32 1
115
#define TCG_TARGET_HAS_bswap32_i32 1
116
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
117
#define TCG_TARGET_HAS_sub2_i64 0
118
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
119
#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
120
-#define TCG_TARGET_HAS_muluh_i64 1
121
#define TCG_TARGET_HAS_mulsh_i64 1
122
#define TCG_TARGET_HAS_ext32s_i64 1
123
#define TCG_TARGET_HAS_ext32u_i64 1
124
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/ppc/tcg-target-has.h
127
+++ b/tcg/ppc/tcg-target-has.h
128
@@ -XXX,XX +XXX,XX @@
129
#define TCG_TARGET_HAS_negsetcond_i32 1
130
#define TCG_TARGET_HAS_mulu2_i32 0
131
#define TCG_TARGET_HAS_muls2_i32 0
132
-#define TCG_TARGET_HAS_muluh_i32 1
133
#define TCG_TARGET_HAS_mulsh_i32 1
134
#define TCG_TARGET_HAS_qemu_st8_i32 0
135
136
@@ -XXX,XX +XXX,XX @@
137
#define TCG_TARGET_HAS_sub2_i64 1
138
#define TCG_TARGET_HAS_mulu2_i64 0
139
#define TCG_TARGET_HAS_muls2_i64 0
140
-#define TCG_TARGET_HAS_muluh_i64 1
141
#define TCG_TARGET_HAS_mulsh_i64 1
142
#endif
143
144
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/riscv/tcg-target-has.h
147
+++ b/tcg/riscv/tcg-target-has.h
148
@@ -XXX,XX +XXX,XX @@
149
#define TCG_TARGET_HAS_sub2_i32 1
150
#define TCG_TARGET_HAS_mulu2_i32 0
151
#define TCG_TARGET_HAS_muls2_i32 0
152
-#define TCG_TARGET_HAS_muluh_i32 0
153
#define TCG_TARGET_HAS_mulsh_i32 0
154
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
155
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
156
@@ -XXX,XX +XXX,XX @@
157
#define TCG_TARGET_HAS_sub2_i64 1
158
#define TCG_TARGET_HAS_mulu2_i64 0
159
#define TCG_TARGET_HAS_muls2_i64 0
160
-#define TCG_TARGET_HAS_muluh_i64 1
161
#define TCG_TARGET_HAS_mulsh_i64 1
162
163
#define TCG_TARGET_HAS_qemu_ldst_i128 0
164
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/s390x/tcg-target-has.h
167
+++ b/tcg/s390x/tcg-target-has.h
168
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
169
#define TCG_TARGET_HAS_sub2_i32 1
170
#define TCG_TARGET_HAS_mulu2_i32 0
171
#define TCG_TARGET_HAS_muls2_i32 0
172
-#define TCG_TARGET_HAS_muluh_i32 0
173
#define TCG_TARGET_HAS_mulsh_i32 0
174
#define TCG_TARGET_HAS_extr_i64_i32 0
175
#define TCG_TARGET_HAS_qemu_st8_i32 0
176
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
177
#define TCG_TARGET_HAS_sub2_i64 1
178
#define TCG_TARGET_HAS_mulu2_i64 1
179
#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
180
-#define TCG_TARGET_HAS_muluh_i64 0
181
#define TCG_TARGET_HAS_mulsh_i64 0
182
183
#define TCG_TARGET_HAS_qemu_ldst_i128 1
184
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
185
index XXXXXXX..XXXXXXX 100644
186
--- a/tcg/sparc64/tcg-target-has.h
187
+++ b/tcg/sparc64/tcg-target-has.h
188
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
189
#define TCG_TARGET_HAS_sub2_i32 1
190
#define TCG_TARGET_HAS_mulu2_i32 1
191
#define TCG_TARGET_HAS_muls2_i32 1
192
-#define TCG_TARGET_HAS_muluh_i32 0
193
#define TCG_TARGET_HAS_mulsh_i32 0
194
#define TCG_TARGET_HAS_qemu_st8_i32 0
195
196
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
197
#define TCG_TARGET_HAS_sub2_i64 1
198
#define TCG_TARGET_HAS_mulu2_i64 0
199
#define TCG_TARGET_HAS_muls2_i64 0
200
-#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
201
#define TCG_TARGET_HAS_mulsh_i64 0
202
203
#define TCG_TARGET_HAS_qemu_ldst_i128 0
204
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
205
index XXXXXXX..XXXXXXX 100644
206
--- a/tcg/tcg-has.h
207
+++ b/tcg/tcg-has.h
208
@@ -XXX,XX +XXX,XX @@
209
#define TCG_TARGET_HAS_sub2_i64 0
210
#define TCG_TARGET_HAS_mulu2_i64 0
211
#define TCG_TARGET_HAS_muls2_i64 0
212
-#define TCG_TARGET_HAS_muluh_i64 0
213
#define TCG_TARGET_HAS_mulsh_i64 0
214
/* Turn some undef macros into true macros. */
215
#define TCG_TARGET_HAS_add2_i32 1
216
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
217
index XXXXXXX..XXXXXXX 100644
218
--- a/tcg/tci/tcg-target-has.h
219
+++ b/tcg/tci/tcg-target-has.h
220
@@ -XXX,XX +XXX,XX @@
221
#define TCG_TARGET_HAS_rot_i32 1
222
#define TCG_TARGET_HAS_negsetcond_i32 0
223
#define TCG_TARGET_HAS_muls2_i32 1
224
-#define TCG_TARGET_HAS_muluh_i32 0
225
#define TCG_TARGET_HAS_mulsh_i32 0
226
#define TCG_TARGET_HAS_qemu_st8_i32 0
227
228
@@ -XXX,XX +XXX,XX @@
229
#define TCG_TARGET_HAS_add2_i64 1
230
#define TCG_TARGET_HAS_sub2_i64 1
231
#define TCG_TARGET_HAS_mulu2_i64 1
232
-#define TCG_TARGET_HAS_muluh_i64 0
233
#define TCG_TARGET_HAS_mulsh_i64 0
234
#else
235
#define TCG_TARGET_HAS_mulu2_i32 1
236
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
237
index XXXXXXX..XXXXXXX 100644
238
--- a/tcg/tcg-op.c
239
+++ b/tcg/tcg-op.c
240
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
241
{
242
if (TCG_TARGET_HAS_mulu2_i32) {
243
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
244
- } else if (TCG_TARGET_HAS_muluh_i32) {
245
+ } else if (tcg_op_supported(INDEX_op_muluh_i32, TCG_TYPE_I32, 0)) {
246
TCGv_i32 t = tcg_temp_ebb_new_i32();
247
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
248
tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
249
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
250
{
251
if (TCG_TARGET_HAS_mulu2_i64) {
252
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
253
- } else if (TCG_TARGET_HAS_muluh_i64) {
254
+ } else if (tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
255
TCGv_i64 t = tcg_temp_ebb_new_i64();
256
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
257
tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
258
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
259
tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
260
tcg_gen_mov_i64(rl, t);
261
tcg_temp_free_i64(t);
262
- } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
263
+ } else if (TCG_TARGET_HAS_mulu2_i64 ||
264
+ tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
265
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
266
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
267
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
268
diff --git a/tcg/tcg.c b/tcg/tcg.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/tcg/tcg.c
271
+++ b/tcg/tcg.c
272
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
273
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
274
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
275
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
276
+ OUTOP(INDEX_op_muluh_i32, TCGOutOpBinary, outop_muluh),
277
+ OUTOP(INDEX_op_muluh_i64, TCGOutOpBinary, outop_muluh),
278
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
279
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
280
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
281
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
282
return TCG_TARGET_HAS_mulu2_i32;
283
case INDEX_op_muls2_i32:
284
return TCG_TARGET_HAS_muls2_i32;
285
- case INDEX_op_muluh_i32:
286
- return TCG_TARGET_HAS_muluh_i32;
287
case INDEX_op_mulsh_i32:
288
return TCG_TARGET_HAS_mulsh_i32;
289
case INDEX_op_bswap16_i32:
290
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
291
return TCG_TARGET_HAS_mulu2_i64;
292
case INDEX_op_muls2_i64:
293
return TCG_TARGET_HAS_muls2_i64;
294
- case INDEX_op_muluh_i64:
295
- return TCG_TARGET_HAS_muluh_i64;
296
case INDEX_op_mulsh_i64:
297
return TCG_TARGET_HAS_mulsh_i64;
298
299
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
300
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
301
int nb_iargs, nb_oargs;
302
TCGOpcode opc_new, opc_new2;
303
- bool have_opc_new2;
304
TCGLifeData arg_life = 0;
305
TCGTemp *ts;
306
TCGOpcode opc = op->opc;
307
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
308
case INDEX_op_mulu2_i32:
309
opc_new = INDEX_op_mul;
310
opc_new2 = INDEX_op_muluh_i32;
311
- have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
312
goto do_mul2;
313
case INDEX_op_muls2_i32:
314
opc_new = INDEX_op_mul;
315
opc_new2 = INDEX_op_mulsh_i32;
316
- have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
317
goto do_mul2;
318
case INDEX_op_mulu2_i64:
319
opc_new = INDEX_op_mul;
320
opc_new2 = INDEX_op_muluh_i64;
321
- have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
322
goto do_mul2;
323
case INDEX_op_muls2_i64:
324
opc_new = INDEX_op_mul;
325
opc_new2 = INDEX_op_mulsh_i64;
326
- have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
327
goto do_mul2;
328
do_mul2:
329
nb_iargs = 2;
330
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
331
op->opc = opc = opc_new;
332
op->args[1] = op->args[2];
333
op->args[2] = op->args[3];
334
- } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
335
+ } else if (arg_temp(op->args[0])->state == TS_DEAD &&
336
+ tcg_op_supported(opc_new2, TCGOP_TYPE(op), 0)) {
337
/* The low part of the operation is dead; generate the high. */
338
op->opc = opc = opc_new2;
339
op->args[0] = op->args[1];
340
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
341
case INDEX_op_andc:
342
case INDEX_op_eqv:
343
case INDEX_op_mul:
344
+ case INDEX_op_muluh_i32:
345
+ case INDEX_op_muluh_i64:
346
case INDEX_op_nand:
347
case INDEX_op_nor:
348
case INDEX_op_or:
349
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
350
index XXXXXXX..XXXXXXX 100644
351
--- a/tcg/aarch64/tcg-target.c.inc
352
+++ b/tcg/aarch64/tcg-target.c.inc
353
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
354
.out_rrr = tgen_mul,
355
};
356
357
+static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
358
+{
359
+ return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented;
360
+}
361
+
362
+static void tgen_muluh(TCGContext *s, TCGType type,
363
+ TCGReg a0, TCGReg a1, TCGReg a2)
364
+{
365
+ tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
366
+}
367
+
368
+static const TCGOutOpBinary outop_muluh = {
369
+ .base.static_constraint = C_Dynamic,
370
+ .base.dynamic_constraint = cset_mulh,
371
+ .out_rrr = tgen_muluh,
372
+};
373
+
374
static const TCGOutOpBinary outop_nand = {
375
.base.static_constraint = C_NotImplemented,
376
};
377
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
378
args[5], const_args[4], const_args[5], true);
379
break;
380
381
- case INDEX_op_muluh_i64:
382
- tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
383
- break;
384
case INDEX_op_mulsh_i64:
385
tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
386
break;
387
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
388
case INDEX_op_rem_i64:
389
case INDEX_op_remu_i32:
390
case INDEX_op_remu_i64:
391
- case INDEX_op_muluh_i64:
392
case INDEX_op_mulsh_i64:
393
return C_O1_I2(r, r, r);
394
395
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
396
index XXXXXXX..XXXXXXX 100644
397
--- a/tcg/arm/tcg-target.c.inc
398
+++ b/tcg/arm/tcg-target.c.inc
399
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
400
.out_rrr = tgen_mul,
401
};
402
403
+static const TCGOutOpBinary outop_muluh = {
404
+ .base.static_constraint = C_NotImplemented,
405
+};
406
+
407
static const TCGOutOpBinary outop_nand = {
408
.base.static_constraint = C_NotImplemented,
409
};
410
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
411
index XXXXXXX..XXXXXXX 100644
412
--- a/tcg/i386/tcg-target.c.inc
413
+++ b/tcg/i386/tcg-target.c.inc
414
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
415
.out_rri = tgen_muli,
416
};
417
418
+static const TCGOutOpBinary outop_muluh = {
419
+ .base.static_constraint = C_NotImplemented,
420
+};
421
+
422
static const TCGOutOpBinary outop_nand = {
423
.base.static_constraint = C_NotImplemented,
424
};
425
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
426
index XXXXXXX..XXXXXXX 100644
427
--- a/tcg/loongarch64/tcg-target.c.inc
428
+++ b/tcg/loongarch64/tcg-target.c.inc
429
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
430
.out_rrr = tgen_mul,
431
};
432
433
+static void tgen_muluh(TCGContext *s, TCGType type,
434
+ TCGReg a0, TCGReg a1, TCGReg a2)
435
+{
436
+ if (type == TCG_TYPE_I32) {
437
+ tcg_out_opc_mulh_wu(s, a0, a1, a2);
438
+ } else {
439
+ tcg_out_opc_mulh_du(s, a0, a1, a2);
440
+ }
441
+}
442
+
443
+static const TCGOutOpBinary outop_muluh = {
444
+ .base.static_constraint = C_O1_I2(r, r, r),
445
+ .out_rrr = tgen_muluh,
446
+};
447
+
448
static const TCGOutOpBinary outop_nand = {
449
.base.static_constraint = C_NotImplemented,
450
};
451
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
452
tcg_out_opc_mulh_d(s, a0, a1, a2);
453
break;
454
455
- case INDEX_op_muluh_i32:
456
- tcg_out_opc_mulh_wu(s, a0, a1, a2);
457
- break;
458
- case INDEX_op_muluh_i64:
459
- tcg_out_opc_mulh_du(s, a0, a1, a2);
460
- break;
461
-
462
case INDEX_op_div_i32:
463
tcg_out_opc_div_w(s, a0, a1, a2);
464
break;
465
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
466
467
case INDEX_op_mulsh_i32:
468
case INDEX_op_mulsh_i64:
469
- case INDEX_op_muluh_i32:
470
- case INDEX_op_muluh_i64:
471
case INDEX_op_div_i32:
472
case INDEX_op_div_i64:
473
case INDEX_op_divu_i32:
474
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
475
index XXXXXXX..XXXXXXX 100644
476
--- a/tcg/mips/tcg-target.c.inc
477
+++ b/tcg/mips/tcg-target.c.inc
478
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
479
.out_rrr = tgen_mul,
480
};
481
482
+static void tgen_muluh(TCGContext *s, TCGType type,
483
+ TCGReg a0, TCGReg a1, TCGReg a2)
484
+{
485
+ if (use_mips32r6_instructions) {
486
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MUHU : OPC_DMUHU;
487
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
488
+ } else {
489
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU;
490
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
491
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
492
+ }
493
+}
494
+
495
+static const TCGOutOpBinary outop_muluh = {
496
+ .base.static_constraint = C_O1_I2(r, r, r),
497
+ .out_rrr = tgen_muluh,
498
+};
499
+
500
static const TCGOutOpBinary outop_nand = {
501
.base.static_constraint = C_NotImplemented,
502
};
503
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
504
}
505
i1 = OPC_MULT, i2 = OPC_MFHI;
506
goto do_hilo1;
507
- case INDEX_op_muluh_i32:
508
- if (use_mips32r6_instructions) {
509
- tcg_out_opc_reg(s, OPC_MUHU, a0, a1, a2);
510
- break;
511
- }
512
- i1 = OPC_MULTU, i2 = OPC_MFHI;
513
- goto do_hilo1;
514
case INDEX_op_div_i32:
515
if (use_mips32r6_instructions) {
516
tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
517
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
518
}
519
i1 = OPC_DMULT, i2 = OPC_MFHI;
520
goto do_hilo1;
521
- case INDEX_op_muluh_i64:
522
- if (use_mips32r6_instructions) {
523
- tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2);
524
- break;
525
- }
526
- i1 = OPC_DMULTU, i2 = OPC_MFHI;
527
- goto do_hilo1;
528
case INDEX_op_div_i64:
529
if (use_mips32r6_instructions) {
530
tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
531
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
532
return C_O0_I2(rz, r);
533
534
case INDEX_op_mulsh_i32:
535
- case INDEX_op_muluh_i32:
536
case INDEX_op_div_i32:
537
case INDEX_op_divu_i32:
538
case INDEX_op_rem_i32:
539
case INDEX_op_remu_i32:
540
case INDEX_op_setcond_i32:
541
case INDEX_op_mulsh_i64:
542
- case INDEX_op_muluh_i64:
543
case INDEX_op_div_i64:
544
case INDEX_op_divu_i64:
545
case INDEX_op_rem_i64:
546
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
547
index XXXXXXX..XXXXXXX 100644
548
--- a/tcg/ppc/tcg-target.c.inc
549
+++ b/tcg/ppc/tcg-target.c.inc
550
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
551
.out_rri = tgen_muli,
552
};
553
554
+static void tgen_muluh(TCGContext *s, TCGType type,
555
+ TCGReg a0, TCGReg a1, TCGReg a2)
556
+{
557
+ uint32_t insn = type == TCG_TYPE_I32 ? MULHWU : MULHDU;
558
+ tcg_out32(s, insn | TAB(a0, a1, a2));
559
+}
560
+
561
+static const TCGOutOpBinary outop_muluh = {
562
+ .base.static_constraint = C_O1_I2(r, r, r),
563
+ .out_rrr = tgen_muluh,
564
+};
565
+
566
static void tgen_nand(TCGContext *s, TCGType type,
567
TCGReg a0, TCGReg a1, TCGReg a2)
568
{
569
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
570
}
571
break;
572
573
- case INDEX_op_muluh_i32:
574
- tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
575
- break;
576
case INDEX_op_mulsh_i32:
577
tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
578
break;
579
- case INDEX_op_muluh_i64:
580
- tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
581
- break;
582
case INDEX_op_mulsh_i64:
583
tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
584
break;
585
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
586
case INDEX_op_divu_i32:
587
case INDEX_op_rem_i32:
588
case INDEX_op_remu_i32:
589
- case INDEX_op_muluh_i32:
590
case INDEX_op_mulsh_i32:
591
case INDEX_op_div_i64:
592
case INDEX_op_divu_i64:
593
case INDEX_op_rem_i64:
594
case INDEX_op_remu_i64:
595
case INDEX_op_mulsh_i64:
596
- case INDEX_op_muluh_i64:
597
return C_O1_I2(r, r, r);
598
599
case INDEX_op_clz_i32:
600
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
601
index XXXXXXX..XXXXXXX 100644
602
--- a/tcg/riscv/tcg-target.c.inc
603
+++ b/tcg/riscv/tcg-target.c.inc
604
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
605
.out_rrr = tgen_mul,
606
};
607
608
+static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
609
+{
610
+ return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r);
611
+}
612
+
613
+static void tgen_muluh(TCGContext *s, TCGType type,
614
+ TCGReg a0, TCGReg a1, TCGReg a2)
615
+{
616
+ tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
617
+}
618
+
619
+static const TCGOutOpBinary outop_muluh = {
620
+ .base.static_constraint = C_Dynamic,
621
+ .base.dynamic_constraint = cset_mulh,
622
+ .out_rrr = tgen_muluh,
623
+};
624
+
625
static const TCGOutOpBinary outop_nand = {
626
.base.static_constraint = C_NotImplemented,
627
};
628
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
629
tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
630
break;
631
632
- case INDEX_op_muluh_i32:
633
- case INDEX_op_muluh_i64:
634
- tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
635
- break;
636
-
637
case INDEX_op_mb:
638
tcg_out_mb(s, a0);
639
break;
640
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
641
return C_O1_I2(r, r, rI);
642
643
case INDEX_op_mulsh_i32:
644
- case INDEX_op_muluh_i32:
645
case INDEX_op_div_i32:
646
case INDEX_op_divu_i32:
647
case INDEX_op_rem_i32:
648
case INDEX_op_remu_i32:
649
case INDEX_op_mulsh_i64:
650
- case INDEX_op_muluh_i64:
651
case INDEX_op_div_i64:
652
case INDEX_op_divu_i64:
653
case INDEX_op_rem_i64:
654
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
655
index XXXXXXX..XXXXXXX 100644
656
--- a/tcg/s390x/tcg-target.c.inc
657
+++ b/tcg/s390x/tcg-target.c.inc
658
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
659
.out_rri = tgen_muli,
660
};
661
662
+static const TCGOutOpBinary outop_muluh = {
663
+ .base.static_constraint = C_NotImplemented,
664
+};
665
+
666
static void tgen_nand(TCGContext *s, TCGType type,
667
TCGReg a0, TCGReg a1, TCGReg a2)
668
{
669
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
670
index XXXXXXX..XXXXXXX 100644
671
--- a/tcg/sparc64/tcg-target.c.inc
672
+++ b/tcg/sparc64/tcg-target.c.inc
673
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
674
.out_rri = tgen_muli,
675
};
676
677
+static void tgen_muluh(TCGContext *s, TCGType type,
678
+ TCGReg a0, TCGReg a1, TCGReg a2)
679
+{
680
+ tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI);
681
+}
682
+
683
+static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags)
684
+{
685
+ return (type == TCG_TYPE_I64 && use_vis3_instructions
686
+ ? C_O1_I2(r, r, r) : C_NotImplemented);
687
+}
688
+
689
+static const TCGOutOpBinary outop_muluh = {
690
+ .base.static_constraint = C_Dynamic,
691
+ .base.dynamic_constraint = cset_muluh,
692
+ .out_rrr = tgen_muluh,
693
+};
694
+
695
static const TCGOutOpBinary outop_nand = {
696
.base.static_constraint = C_NotImplemented,
697
};
698
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
699
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
700
const_args[4], args[5], const_args[5], true);
701
break;
702
- case INDEX_op_muluh_i64:
703
- tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
704
- break;
705
706
gen_arith:
707
tcg_out_arithc(s, a0, a1, a2, c2, c);
708
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
709
case INDEX_op_mulu2_i32:
710
case INDEX_op_muls2_i32:
711
return C_O2_I2(r, r, rz, rJ);
712
- case INDEX_op_muluh_i64:
713
- return C_O1_I2(r, r, r);
714
715
default:
716
return C_NotImplemented;
717
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
718
index XXXXXXX..XXXXXXX 100644
719
--- a/tcg/tci/tcg-target.c.inc
720
+++ b/tcg/tci/tcg-target.c.inc
721
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
722
.out_rrr = tgen_mul,
723
};
724
725
+static const TCGOutOpBinary outop_muluh = {
726
+ .base.static_constraint = C_NotImplemented,
727
+};
728
+
729
static void tgen_nand(TCGContext *s, TCGType type,
730
TCGReg a0, TCGReg a1, TCGReg a2)
731
{
732
--
733
2.43.0
734
735
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 19 +++++++++++--------
6
tcg/tcg-op.c | 10 +++++-----
7
tcg/tcg.c | 13 ++++---------
8
docs/devel/tcg-ops.rst | 2 +-
9
5 files changed, 22 insertions(+), 25 deletions(-)
10
1
11
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg-opc.h
14
+++ b/include/tcg/tcg-opc.h
15
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
16
DEF(andc, 1, 2, 0, TCG_OPF_INT)
17
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
18
DEF(mul, 1, 2, 0, TCG_OPF_INT)
19
+DEF(muluh, 1, 2, 0, TCG_OPF_INT)
20
DEF(nand, 1, 2, 0, TCG_OPF_INT)
21
DEF(neg, 1, 1, 0, TCG_OPF_INT)
22
DEF(nor, 1, 2, 0, TCG_OPF_INT)
23
@@ -XXX,XX +XXX,XX @@ DEF(add2_i32, 2, 4, 0, 0)
24
DEF(sub2_i32, 2, 4, 0, 0)
25
DEF(mulu2_i32, 2, 2, 0, 0)
26
DEF(muls2_i32, 2, 2, 0, 0)
27
-DEF(muluh_i32, 1, 2, 0, 0)
28
DEF(mulsh_i32, 1, 2, 0, 0)
29
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
30
DEF(setcond2_i32, 1, 4, 1, 0)
31
@@ -XXX,XX +XXX,XX @@ DEF(add2_i64, 2, 4, 0, 0)
32
DEF(sub2_i64, 2, 4, 0, 0)
33
DEF(mulu2_i64, 2, 2, 0, 0)
34
DEF(muls2_i64, 2, 2, 0, 0)
35
-DEF(muluh_i64, 1, 2, 0, 0)
36
DEF(mulsh_i64, 1, 2, 0, 0)
37
38
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
39
diff --git a/tcg/optimize.c b/tcg/optimize.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/optimize.c
42
+++ b/tcg/optimize.c
43
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
44
return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
45
}
46
47
-static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
48
+static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
49
+ uint64_t x, uint64_t y)
50
{
51
uint64_t l64, h64;
52
53
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
54
case INDEX_op_extrh_i64_i32:
55
return (uint64_t)x >> 32;
56
57
- case INDEX_op_muluh_i32:
58
- return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
59
+ case INDEX_op_muluh:
60
+ if (type == TCG_TYPE_I32) {
61
+ return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
62
+ }
63
+ mulu64(&l64, &h64, x, y);
64
+ return h64;
65
+
66
case INDEX_op_mulsh_i32:
67
return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
68
69
- case INDEX_op_muluh_i64:
70
- mulu64(&l64, &h64, x, y);
71
- return h64;
72
case INDEX_op_mulsh_i64:
73
muls64(&l64, &h64, x, y);
74
return h64;
75
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
76
static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
77
uint64_t x, uint64_t y)
78
{
79
- uint64_t res = do_constant_folding_2(op, x, y);
80
+ uint64_t res = do_constant_folding_2(op, type, x, y);
81
if (type == TCG_TYPE_I32) {
82
res = (int32_t)res;
83
}
84
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
85
done = fold_mul(&ctx, op);
86
break;
87
CASE_OP_32_64(mulsh):
88
- CASE_OP_32_64(muluh):
89
+ case INDEX_op_muluh:
90
done = fold_mul_highpart(&ctx, op);
91
break;
92
CASE_OP_32_64(muls2):
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
98
{
99
if (TCG_TARGET_HAS_mulu2_i32) {
100
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
101
- } else if (tcg_op_supported(INDEX_op_muluh_i32, TCG_TYPE_I32, 0)) {
102
+ } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
103
TCGv_i32 t = tcg_temp_ebb_new_i32();
104
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
105
- tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
106
+ tcg_gen_op3_i32(INDEX_op_muluh, rh, arg1, arg2);
107
tcg_gen_mov_i32(rl, t);
108
tcg_temp_free_i32(t);
109
} else if (TCG_TARGET_REG_BITS == 64) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
111
{
112
if (TCG_TARGET_HAS_mulu2_i64) {
113
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
114
- } else if (tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
115
+ } else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
116
TCGv_i64 t = tcg_temp_ebb_new_i64();
117
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
118
- tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
119
+ tcg_gen_op3_i64(INDEX_op_muluh, rh, arg1, arg2);
120
tcg_gen_mov_i64(rl, t);
121
tcg_temp_free_i64(t);
122
} else {
123
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
124
tcg_gen_mov_i64(rl, t);
125
tcg_temp_free_i64(t);
126
} else if (TCG_TARGET_HAS_mulu2_i64 ||
127
- tcg_op_supported(INDEX_op_muluh_i64, TCG_TYPE_I64, 0)) {
128
+ tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
129
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
130
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
131
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
132
diff --git a/tcg/tcg.c b/tcg/tcg.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tcg/tcg.c
135
+++ b/tcg/tcg.c
136
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
137
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
138
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
139
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
140
- OUTOP(INDEX_op_muluh_i32, TCGOutOpBinary, outop_muluh),
141
- OUTOP(INDEX_op_muluh_i64, TCGOutOpBinary, outop_muluh),
142
+ OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
143
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
144
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
145
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
146
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
147
}
148
goto do_not_remove;
149
150
- case INDEX_op_mulu2_i32:
151
- opc_new = INDEX_op_mul;
152
- opc_new2 = INDEX_op_muluh_i32;
153
- goto do_mul2;
154
case INDEX_op_muls2_i32:
155
opc_new = INDEX_op_mul;
156
opc_new2 = INDEX_op_mulsh_i32;
157
goto do_mul2;
158
+ case INDEX_op_mulu2_i32:
159
case INDEX_op_mulu2_i64:
160
opc_new = INDEX_op_mul;
161
- opc_new2 = INDEX_op_muluh_i64;
162
+ opc_new2 = INDEX_op_muluh;
163
goto do_mul2;
164
case INDEX_op_muls2_i64:
165
opc_new = INDEX_op_mul;
166
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
167
case INDEX_op_andc:
168
case INDEX_op_eqv:
169
case INDEX_op_mul:
170
- case INDEX_op_muluh_i32:
171
- case INDEX_op_muluh_i64:
172
+ case INDEX_op_muluh:
173
case INDEX_op_nand:
174
case INDEX_op_nor:
175
case INDEX_op_or:
176
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
177
index XXXXXXX..XXXXXXX 100644
178
--- a/docs/devel/tcg-ops.rst
179
+++ b/docs/devel/tcg-ops.rst
180
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
181
182
* - mulsh_i32/i64 *t0*, *t1*, *t2*
183
184
- muluh_i32/i64 *t0*, *t1*, *t2*
185
+ muluh *t0*, *t1*, *t2*
186
187
- | Provide the high part of a signed or unsigned multiply, respectively.
188
|
189
--
190
2.43.0
191
192
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 10 +++++-----
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 14 ++++----------
8
docs/devel/tcg-ops.rst | 2 +-
9
5 files changed, 15 insertions(+), 22 deletions(-)
10
1
11
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg-opc.h
14
+++ b/include/tcg/tcg-opc.h
15
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
16
DEF(andc, 1, 2, 0, TCG_OPF_INT)
17
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
18
DEF(mul, 1, 2, 0, TCG_OPF_INT)
19
+DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
20
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
21
DEF(nand, 1, 2, 0, TCG_OPF_INT)
22
DEF(neg, 1, 1, 0, TCG_OPF_INT)
23
@@ -XXX,XX +XXX,XX @@ DEF(add2_i32, 2, 4, 0, 0)
24
DEF(sub2_i32, 2, 4, 0, 0)
25
DEF(mulu2_i32, 2, 2, 0, 0)
26
DEF(muls2_i32, 2, 2, 0, 0)
27
-DEF(mulsh_i32, 1, 2, 0, 0)
28
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
29
DEF(setcond2_i32, 1, 4, 1, 0)
30
31
@@ -XXX,XX +XXX,XX @@ DEF(add2_i64, 2, 4, 0, 0)
32
DEF(sub2_i64, 2, 4, 0, 0)
33
DEF(mulu2_i64, 2, 2, 0, 0)
34
DEF(muls2_i64, 2, 2, 0, 0)
35
-DEF(mulsh_i64, 1, 2, 0, 0)
36
37
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
38
39
diff --git a/tcg/optimize.c b/tcg/optimize.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/optimize.c
42
+++ b/tcg/optimize.c
43
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
44
mulu64(&l64, &h64, x, y);
45
return h64;
46
47
- case INDEX_op_mulsh_i32:
48
- return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
49
-
50
- case INDEX_op_mulsh_i64:
51
+ case INDEX_op_mulsh:
52
+ if (type == TCG_TYPE_I32) {
53
+ return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
54
+ }
55
muls64(&l64, &h64, x, y);
56
return h64;
57
58
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
59
case INDEX_op_mul:
60
done = fold_mul(&ctx, op);
61
break;
62
- CASE_OP_32_64(mulsh):
63
+ case INDEX_op_mulsh:
64
case INDEX_op_muluh:
65
done = fold_mul_highpart(&ctx, op);
66
break;
67
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/tcg-op.c
70
+++ b/tcg/tcg-op.c
71
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
72
{
73
if (TCG_TARGET_HAS_muls2_i32) {
74
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
75
- } else if (tcg_op_supported(INDEX_op_mulsh_i32, TCG_TYPE_I32, 0)) {
76
+ } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I32, 0)) {
77
TCGv_i32 t = tcg_temp_ebb_new_i32();
78
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
79
- tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
80
+ tcg_gen_op3_i32(INDEX_op_mulsh, rh, arg1, arg2);
81
tcg_gen_mov_i32(rl, t);
82
tcg_temp_free_i32(t);
83
} else if (TCG_TARGET_REG_BITS == 32) {
84
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
85
{
86
if (TCG_TARGET_HAS_muls2_i64) {
87
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
88
- } else if (tcg_op_supported(INDEX_op_mulsh_i64, TCG_TYPE_I64, 0)) {
89
+ } else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I64, 0)) {
90
TCGv_i64 t = tcg_temp_ebb_new_i64();
91
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
92
- tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
93
+ tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2);
94
tcg_gen_mov_i64(rl, t);
95
tcg_temp_free_i64(t);
96
} else if (TCG_TARGET_HAS_mulu2_i64 ||
97
diff --git a/tcg/tcg.c b/tcg/tcg.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/tcg/tcg.c
100
+++ b/tcg/tcg.c
101
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
102
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
103
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
104
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
105
- OUTOP(INDEX_op_mulsh_i32, TCGOutOpBinary, outop_mulsh),
106
- OUTOP(INDEX_op_mulsh_i64, TCGOutOpBinary, outop_mulsh),
107
+ OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
108
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
109
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
110
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
111
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
112
goto do_not_remove;
113
114
case INDEX_op_muls2_i32:
115
+ case INDEX_op_muls2_i64:
116
opc_new = INDEX_op_mul;
117
- opc_new2 = INDEX_op_mulsh_i32;
118
+ opc_new2 = INDEX_op_mulsh;
119
goto do_mul2;
120
case INDEX_op_mulu2_i32:
121
case INDEX_op_mulu2_i64:
122
opc_new = INDEX_op_mul;
123
opc_new2 = INDEX_op_muluh;
124
- goto do_mul2;
125
- case INDEX_op_muls2_i64:
126
- opc_new = INDEX_op_mul;
127
- opc_new2 = INDEX_op_mulsh_i64;
128
- goto do_mul2;
129
do_mul2:
130
nb_iargs = 2;
131
nb_oargs = 2;
132
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
133
case INDEX_op_andc:
134
case INDEX_op_eqv:
135
case INDEX_op_mul:
136
- case INDEX_op_mulsh_i32:
137
- case INDEX_op_mulsh_i64:
138
+ case INDEX_op_mulsh:
139
case INDEX_op_muluh:
140
case INDEX_op_nand:
141
case INDEX_op_nor:
142
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
143
index XXXXXXX..XXXXXXX 100644
144
--- a/docs/devel/tcg-ops.rst
145
+++ b/docs/devel/tcg-ops.rst
146
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
147
148
- | Similar to mulu2, except the two inputs *t1* and *t2* are signed.
149
150
- * - mulsh_i32/i64 *t0*, *t1*, *t2*
151
+ * - mulsh *t0*, *t1*, *t2*
152
153
muluh *t0*, *t1*, *t2*
154
155
--
156
2.43.0
157
158
diff view generated by jsdifflib
Deleted patch
1
For TCI, we're losing type information in the interpreter.
2
Introduce a tci-specific opcode to handle the difference.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg-op.c | 8 +++---
8
tcg/tcg.c | 6 +++--
9
tcg/tci.c | 3 ++-
10
tcg/aarch64/tcg-target.c.inc | 17 ++++++++-----
11
tcg/arm/tcg-target.c.inc | 28 +++++++++++++--------
12
tcg/i386/tcg-target.c.inc | 4 +++
13
tcg/loongarch64/tcg-target.c.inc | 24 +++++++++++-------
14
tcg/mips/tcg-target.c.inc | 37 ++++++++++++++++------------
15
tcg/ppc/tcg-target.c.inc | 21 +++++++++-------
16
tcg/riscv/tcg-target.c.inc | 21 +++++++++-------
17
tcg/s390x/tcg-target.c.inc | 4 +++
18
tcg/sparc64/tcg-target.c.inc | 42 ++++++++++++++++++++++++++------
19
tcg/tci/tcg-target-opc.h.inc | 1 +
20
tcg/tci/tcg-target.c.inc | 17 ++++++++++---
21
14 files changed, 156 insertions(+), 77 deletions(-)
22
23
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/tcg-op.c
26
+++ b/tcg/tcg-op.c
27
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
28
29
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
30
{
31
- if (TCG_TARGET_HAS_div_i32) {
32
+ if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
33
tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
34
} else if (TCG_TARGET_HAS_div2_i32) {
35
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
36
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
37
{
38
if (TCG_TARGET_HAS_rem_i32) {
39
tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
40
- } else if (TCG_TARGET_HAS_div_i32) {
41
+ } else if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
42
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
43
tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
44
tcg_gen_mul_i32(t0, t0, arg2);
45
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
46
47
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
48
{
49
- if (TCG_TARGET_HAS_div_i64) {
50
+ if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
51
tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
52
} else if (TCG_TARGET_HAS_div2_i64) {
53
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
54
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
55
{
56
if (TCG_TARGET_HAS_rem_i64) {
57
tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
58
- } else if (TCG_TARGET_HAS_div_i64) {
59
+ } else if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
60
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
61
tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
62
tcg_gen_mul_i64(t0, t0, arg2);
63
diff --git a/tcg/tcg.c b/tcg/tcg.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/tcg.c
66
+++ b/tcg/tcg.c
67
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
68
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
69
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
70
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
71
+ OUTOP(INDEX_op_div_i32, TCGOutOpBinary, outop_divs),
72
+ OUTOP(INDEX_op_div_i64, TCGOutOpBinary, outop_divs),
73
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
74
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
75
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
76
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
77
78
case INDEX_op_negsetcond_i32:
79
return TCG_TARGET_HAS_negsetcond_i32;
80
- case INDEX_op_div_i32:
81
case INDEX_op_divu_i32:
82
return TCG_TARGET_HAS_div_i32;
83
case INDEX_op_rem_i32:
84
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
85
86
case INDEX_op_negsetcond_i64:
87
return TCG_TARGET_HAS_negsetcond_i64;
88
- case INDEX_op_div_i64:
89
case INDEX_op_divu_i64:
90
return TCG_TARGET_HAS_div_i64;
91
case INDEX_op_rem_i64:
92
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
93
case INDEX_op_add:
94
case INDEX_op_and:
95
case INDEX_op_andc:
96
+ case INDEX_op_div_i32:
97
+ case INDEX_op_div_i64:
98
case INDEX_op_eqv:
99
case INDEX_op_mul:
100
case INDEX_op_mulsh:
101
diff --git a/tcg/tci.c b/tcg/tci.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/tci.c
104
+++ b/tcg/tci.c
105
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
106
107
/* Arithmetic operations (32 bit). */
108
109
- case INDEX_op_div_i32:
110
+ case INDEX_op_tci_divs32:
111
tci_args_rrr(insn, &r0, &r1, &r2);
112
regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
113
break;
114
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
115
case INDEX_op_clz_i64:
116
case INDEX_op_ctz_i32:
117
case INDEX_op_ctz_i64:
118
+ case INDEX_op_tci_divs32:
119
tci_args_rrr(insn, &r0, &r1, &r2);
120
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
121
op_name, str_r(r0), str_r(r1), str_r(r2));
122
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/aarch64/tcg-target.c.inc
125
+++ b/tcg/aarch64/tcg-target.c.inc
126
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
127
.out_rrr = tgen_andc,
128
};
129
130
+static void tgen_divs(TCGContext *s, TCGType type,
131
+ TCGReg a0, TCGReg a1, TCGReg a2)
132
+{
133
+ tcg_out_insn(s, 3508, SDIV, type, a0, a1, a2);
134
+}
135
+
136
+static const TCGOutOpBinary outop_divs = {
137
+ .base.static_constraint = C_O1_I2(r, r, r),
138
+ .out_rrr = tgen_divs,
139
+};
140
+
141
static void tgen_eqv(TCGContext *s, TCGType type,
142
TCGReg a0, TCGReg a1, TCGReg a2)
143
{
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
145
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
146
break;
147
148
- case INDEX_op_div_i64:
149
- case INDEX_op_div_i32:
150
- tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
151
- break;
152
case INDEX_op_divu_i64:
153
case INDEX_op_divu_i32:
154
tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
155
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
156
case INDEX_op_negsetcond_i64:
157
return C_O1_I2(r, r, rC);
158
159
- case INDEX_op_div_i32:
160
- case INDEX_op_div_i64:
161
case INDEX_op_divu_i32:
162
case INDEX_op_divu_i64:
163
case INDEX_op_rem_i32:
164
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/arm/tcg-target.c.inc
167
+++ b/tcg/arm/tcg-target.c.inc
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
169
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
170
}
171
172
-static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
173
- TCGReg rd, TCGReg rn, TCGReg rm)
174
-{
175
- tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
176
-}
177
-
178
static void tcg_out_udiv(TCGContext *s, ARMCond cond,
179
TCGReg rd, TCGReg rn, TCGReg rm)
180
{
181
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
182
.out_rrr = tgen_andc,
183
};
184
185
+static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
186
+{
187
+ return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
188
+}
189
+
190
+static void tgen_divs(TCGContext *s, TCGType type,
191
+ TCGReg a0, TCGReg a1, TCGReg a2)
192
+{
193
+ /* sdiv */
194
+ tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
195
+}
196
+
197
+static const TCGOutOpBinary outop_divs = {
198
+ .base.static_constraint = C_Dynamic,
199
+ .base.dynamic_constraint = cset_idiv,
200
+ .out_rrr = tgen_divs,
201
+};
202
+
203
static const TCGOutOpBinary outop_eqv = {
204
.base.static_constraint = C_NotImplemented,
205
};
206
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
207
}
208
break;
209
210
- case INDEX_op_div_i32:
211
- tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
212
- break;
213
case INDEX_op_divu_i32:
214
tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
215
break;
216
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
217
case INDEX_op_ctz_i32:
218
return C_O1_I2(r, r, rIK);
219
220
- case INDEX_op_div_i32:
221
case INDEX_op_divu_i32:
222
return C_O1_I2(r, r, r);
223
224
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
225
index XXXXXXX..XXXXXXX 100644
226
--- a/tcg/i386/tcg-target.c.inc
227
+++ b/tcg/i386/tcg-target.c.inc
228
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
229
.out_rrr = tgen_andc,
230
};
231
232
+static const TCGOutOpBinary outop_divs = {
233
+ .base.static_constraint = C_NotImplemented,
234
+};
235
+
236
static const TCGOutOpBinary outop_eqv = {
237
.base.static_constraint = C_NotImplemented,
238
};
239
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/tcg/loongarch64/tcg-target.c.inc
242
+++ b/tcg/loongarch64/tcg-target.c.inc
243
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
244
.out_rrr = tgen_andc,
245
};
246
247
+static void tgen_divs(TCGContext *s, TCGType type,
248
+ TCGReg a0, TCGReg a1, TCGReg a2)
249
+{
250
+ if (type == TCG_TYPE_I32) {
251
+ tcg_out_opc_div_w(s, a0, a1, a2);
252
+ } else {
253
+ tcg_out_opc_div_d(s, a0, a1, a2);
254
+ }
255
+}
256
+
257
+static const TCGOutOpBinary outop_divs = {
258
+ .base.static_constraint = C_O1_I2(r, r, r),
259
+ .out_rrr = tgen_divs,
260
+};
261
+
262
static const TCGOutOpBinary outop_eqv = {
263
.base.static_constraint = C_NotImplemented,
264
};
265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
266
}
267
break;
268
269
- case INDEX_op_div_i32:
270
- tcg_out_opc_div_w(s, a0, a1, a2);
271
- break;
272
- case INDEX_op_div_i64:
273
- tcg_out_opc_div_d(s, a0, a1, a2);
274
- break;
275
-
276
case INDEX_op_divu_i32:
277
tcg_out_opc_div_wu(s, a0, a1, a2);
278
break;
279
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
280
case INDEX_op_setcond_i64:
281
return C_O1_I2(r, rz, rJ);
282
283
- case INDEX_op_div_i32:
284
- case INDEX_op_div_i64:
285
case INDEX_op_divu_i32:
286
case INDEX_op_divu_i64:
287
case INDEX_op_rem_i32:
288
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
289
index XXXXXXX..XXXXXXX 100644
290
--- a/tcg/mips/tcg-target.c.inc
291
+++ b/tcg/mips/tcg-target.c.inc
292
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
293
.base.static_constraint = C_NotImplemented,
294
};
295
296
+static void tgen_divs(TCGContext *s, TCGType type,
297
+ TCGReg a0, TCGReg a1, TCGReg a2)
298
+{
299
+ if (use_mips32r6_instructions) {
300
+ if (type == TCG_TYPE_I32) {
301
+ tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
302
+ } else {
303
+ tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
304
+ }
305
+ } else {
306
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV;
307
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
308
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
309
+ }
310
+}
311
+
312
+static const TCGOutOpBinary outop_divs = {
313
+ .base.static_constraint = C_O1_I2(r, r, r),
314
+ .out_rrr = tgen_divs,
315
+};
316
+
317
static const TCGOutOpBinary outop_eqv = {
318
.base.static_constraint = C_NotImplemented,
319
};
320
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
321
tcg_out_ldst(s, i1, a0, a1, a2);
322
break;
323
324
- case INDEX_op_div_i32:
325
- if (use_mips32r6_instructions) {
326
- tcg_out_opc_reg(s, OPC_DIV_R6, a0, a1, a2);
327
- break;
328
- }
329
- i1 = OPC_DIV, i2 = OPC_MFLO;
330
- goto do_hilo1;
331
case INDEX_op_divu_i32:
332
if (use_mips32r6_instructions) {
333
tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
334
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
335
}
336
i1 = OPC_DIVU, i2 = OPC_MFHI;
337
goto do_hilo1;
338
- case INDEX_op_div_i64:
339
- if (use_mips32r6_instructions) {
340
- tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
341
- break;
342
- }
343
- i1 = OPC_DDIV, i2 = OPC_MFLO;
344
- goto do_hilo1;
345
case INDEX_op_divu_i64:
346
if (use_mips32r6_instructions) {
347
tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
348
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
349
case INDEX_op_st_i64:
350
return C_O0_I2(rz, r);
351
352
- case INDEX_op_div_i32:
353
case INDEX_op_divu_i32:
354
case INDEX_op_rem_i32:
355
case INDEX_op_remu_i32:
356
case INDEX_op_setcond_i32:
357
- case INDEX_op_div_i64:
358
case INDEX_op_divu_i64:
359
case INDEX_op_rem_i64:
360
case INDEX_op_remu_i64:
361
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
362
index XXXXXXX..XXXXXXX 100644
363
--- a/tcg/ppc/tcg-target.c.inc
364
+++ b/tcg/ppc/tcg-target.c.inc
365
@@ -XXX,XX +XXX,XX @@ static void tgen_eqv(TCGContext *s, TCGType type,
366
tcg_out32(s, EQV | SAB(a1, a0, a2));
367
}
368
369
+static void tgen_divs(TCGContext *s, TCGType type,
370
+ TCGReg a0, TCGReg a1, TCGReg a2)
371
+{
372
+ uint32_t insn = type == TCG_TYPE_I32 ? DIVW : DIVD;
373
+ tcg_out32(s, insn | TAB(a0, a1, a2));
374
+}
375
+
376
+static const TCGOutOpBinary outop_divs = {
377
+ .base.static_constraint = C_O1_I2(r, r, r),
378
+ .out_rrr = tgen_divs,
379
+};
380
+
381
static const TCGOutOpBinary outop_eqv = {
382
.base.static_constraint = C_O1_I2(r, r, r),
383
.out_rrr = tgen_eqv,
384
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
385
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
386
break;
387
388
- case INDEX_op_div_i32:
389
- tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
390
- break;
391
-
392
case INDEX_op_divu_i32:
393
tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
394
break;
395
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
396
}
397
break;
398
399
- case INDEX_op_div_i64:
400
- tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
401
- break;
402
case INDEX_op_divu_i64:
403
tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
404
break;
405
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
406
case INDEX_op_rotr_i64:
407
return C_O1_I2(r, r, ri);
408
409
- case INDEX_op_div_i32:
410
case INDEX_op_divu_i32:
411
case INDEX_op_rem_i32:
412
case INDEX_op_remu_i32:
413
- case INDEX_op_div_i64:
414
case INDEX_op_divu_i64:
415
case INDEX_op_rem_i64:
416
case INDEX_op_remu_i64:
417
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
418
index XXXXXXX..XXXXXXX 100644
419
--- a/tcg/riscv/tcg-target.c.inc
420
+++ b/tcg/riscv/tcg-target.c.inc
421
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
422
.out_rrr = tgen_andc,
423
};
424
425
+static void tgen_divs(TCGContext *s, TCGType type,
426
+ TCGReg a0, TCGReg a1, TCGReg a2)
427
+{
428
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVW : OPC_DIV;
429
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
430
+}
431
+
432
+static const TCGOutOpBinary outop_divs = {
433
+ .base.static_constraint = C_O1_I2(r, r, r),
434
+ .out_rrr = tgen_divs,
435
+};
436
+
437
static void tgen_eqv(TCGContext *s, TCGType type,
438
TCGReg a0, TCGReg a1, TCGReg a2)
439
{
440
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
441
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
442
break;
443
444
- case INDEX_op_div_i32:
445
- tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
446
- break;
447
- case INDEX_op_div_i64:
448
- tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
449
- break;
450
-
451
case INDEX_op_divu_i32:
452
tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
453
break;
454
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
455
case INDEX_op_negsetcond_i64:
456
return C_O1_I2(r, r, rI);
457
458
- case INDEX_op_div_i32:
459
case INDEX_op_divu_i32:
460
case INDEX_op_rem_i32:
461
case INDEX_op_remu_i32:
462
- case INDEX_op_div_i64:
463
case INDEX_op_divu_i64:
464
case INDEX_op_rem_i64:
465
case INDEX_op_remu_i64:
466
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
467
index XXXXXXX..XXXXXXX 100644
468
--- a/tcg/s390x/tcg-target.c.inc
469
+++ b/tcg/s390x/tcg-target.c.inc
470
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
471
.out_rrr = tgen_andc,
472
};
473
474
+static const TCGOutOpBinary outop_divs = {
475
+ .base.static_constraint = C_NotImplemented,
476
+};
477
+
478
static void tgen_eqv(TCGContext *s, TCGType type,
479
TCGReg a0, TCGReg a1, TCGReg a2)
480
{
481
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
482
index XXXXXXX..XXXXXXX 100644
483
--- a/tcg/sparc64/tcg-target.c.inc
484
+++ b/tcg/sparc64/tcg-target.c.inc
485
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
486
.out_rrr = tgen_andc,
487
};
488
489
+static void tgen_divs_rJ(TCGContext *s, TCGType type,
490
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
491
+{
492
+ uint32_t insn;
493
+
494
+ if (type == TCG_TYPE_I32) {
495
+ /* Load Y with the sign extension of a1 to 64-bits. */
496
+ tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA);
497
+ tcg_out_sety(s, TCG_REG_T1);
498
+ insn = ARITH_SDIV;
499
+ } else {
500
+ insn = ARITH_SDIVX;
501
+ }
502
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
503
+}
504
+
505
+static void tgen_divs(TCGContext *s, TCGType type,
506
+ TCGReg a0, TCGReg a1, TCGReg a2)
507
+{
508
+ tgen_divs_rJ(s, type, a0, a1, a2, false);
509
+}
510
+
511
+static void tgen_divsi(TCGContext *s, TCGType type,
512
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
513
+{
514
+ tgen_divs_rJ(s, type, a0, a1, a2, true);
515
+}
516
+
517
+static const TCGOutOpBinary outop_divs = {
518
+ .base.static_constraint = C_O1_I2(r, r, rJ),
519
+ .out_rrr = tgen_divs,
520
+ .out_rri = tgen_divsi,
521
+};
522
+
523
static const TCGOutOpBinary outop_eqv = {
524
.base.static_constraint = C_NotImplemented,
525
};
526
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
527
c = SHIFT_SRA;
528
goto do_shift32;
529
530
- case INDEX_op_div_i32:
531
- tcg_out_div32(s, a0, a1, a2, c2, 0);
532
- break;
533
case INDEX_op_divu_i32:
534
tcg_out_div32(s, a0, a1, a2, c2, 1);
535
break;
536
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
537
case INDEX_op_sar_i64:
538
c = SHIFT_SRAX;
539
goto do_shift64;
540
- case INDEX_op_div_i64:
541
- c = ARITH_SDIVX;
542
- goto gen_arith;
543
case INDEX_op_divu_i64:
544
c = ARITH_UDIVX;
545
goto gen_arith;
546
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
547
case INDEX_op_qemu_st_i64:
548
return C_O0_I2(rz, r);
549
550
- case INDEX_op_div_i32:
551
- case INDEX_op_div_i64:
552
case INDEX_op_divu_i32:
553
case INDEX_op_divu_i64:
554
case INDEX_op_shl_i32:
555
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
556
index XXXXXXX..XXXXXXX 100644
557
--- a/tcg/tci/tcg-target-opc.h.inc
558
+++ b/tcg/tci/tcg-target-opc.h.inc
559
@@ -XXX,XX +XXX,XX @@
560
/* These opcodes for use between the tci generator and interpreter. */
561
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
562
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
563
+DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
564
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
565
index XXXXXXX..XXXXXXX 100644
566
--- a/tcg/tci/tcg-target.c.inc
567
+++ b/tcg/tci/tcg-target.c.inc
568
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
569
case INDEX_op_st_i64:
570
return C_O0_I2(r, r);
571
572
- case INDEX_op_div_i32:
573
- case INDEX_op_div_i64:
574
case INDEX_op_divu_i32:
575
case INDEX_op_divu_i64:
576
case INDEX_op_rem_i32:
577
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
578
.out_rrr = tgen_andc,
579
};
580
581
+static void tgen_divs(TCGContext *s, TCGType type,
582
+ TCGReg a0, TCGReg a1, TCGReg a2)
583
+{
584
+ TCGOpcode opc = (type == TCG_TYPE_I32
585
+ ? INDEX_op_tci_divs32
586
+ : INDEX_op_div_i64);
587
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
588
+}
589
+
590
+static const TCGOutOpBinary outop_divs = {
591
+ .base.static_constraint = C_O1_I2(r, r, r),
592
+ .out_rrr = tgen_divs,
593
+};
594
+
595
static void tgen_eqv(TCGContext *s, TCGType type,
596
TCGReg a0, TCGReg a1, TCGReg a2)
597
{
598
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
599
CASE_32_64(sar)
600
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
601
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
602
- CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
603
CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
604
CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
605
CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
606
--
607
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Rename to INDEX_op_divs to emphasize signed inputs,
2
and mirroring INDEX_op_divu_*.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 3 +--
9
tcg/optimize.c | 12 +++++++-----
10
tcg/tcg-op.c | 16 ++++++++--------
11
tcg/tcg.c | 6 ++----
12
tcg/tci.c | 5 ++---
13
docs/devel/tcg-ops.rst | 2 +-
14
tcg/tci/tcg-target.c.inc | 2 +-
15
7 files changed, 22 insertions(+), 24 deletions(-)
16
17
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg-opc.h
20
+++ b/include/tcg/tcg-opc.h
21
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
22
DEF(add, 1, 2, 0, TCG_OPF_INT)
23
DEF(and, 1, 2, 0, TCG_OPF_INT)
24
DEF(andc, 1, 2, 0, TCG_OPF_INT)
25
+DEF(divs, 1, 2, 0, TCG_OPF_INT)
26
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
27
DEF(mul, 1, 2, 0, TCG_OPF_INT)
28
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
29
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
30
DEF(st16_i32, 0, 2, 1, 0)
31
DEF(st_i32, 0, 2, 1, 0)
32
/* arith */
33
-DEF(div_i32, 1, 2, 0, 0)
34
DEF(divu_i32, 1, 2, 0, 0)
35
DEF(rem_i32, 1, 2, 0, 0)
36
DEF(remu_i32, 1, 2, 0, 0)
37
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
38
DEF(st32_i64, 0, 2, 1, 0)
39
DEF(st_i64, 0, 2, 1, 0)
40
/* arith */
41
-DEF(div_i64, 1, 2, 0, 0)
42
DEF(divu_i64, 1, 2, 0, 0)
43
DEF(rem_i64, 1, 2, 0, 0)
44
DEF(remu_i64, 1, 2, 0, 0)
45
diff --git a/tcg/optimize.c b/tcg/optimize.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/optimize.c
48
+++ b/tcg/optimize.c
49
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
50
muls64(&l64, &h64, x, y);
51
return h64;
52
53
- case INDEX_op_div_i32:
54
+ case INDEX_op_divs:
55
/* Avoid crashing on divide by zero, otherwise undefined. */
56
- return (int32_t)x / ((int32_t)y ? : 1);
57
+ if (type == TCG_TYPE_I32) {
58
+ return (int32_t)x / ((int32_t)y ? : 1);
59
+ }
60
+ return (int64_t)x / ((int64_t)y ? : 1);
61
+
62
case INDEX_op_divu_i32:
63
return (uint32_t)x / ((uint32_t)y ? : 1);
64
- case INDEX_op_div_i64:
65
- return (int64_t)x / ((int64_t)y ? : 1);
66
case INDEX_op_divu_i64:
67
return (uint64_t)x / ((uint64_t)y ? : 1);
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
CASE_OP_32_64(deposit):
71
done = fold_deposit(&ctx, op);
72
break;
73
- CASE_OP_32_64(div):
74
+ case INDEX_op_divs:
75
CASE_OP_32_64(divu):
76
done = fold_divide(&ctx, op);
77
break;
78
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/tcg/tcg-op.c
81
+++ b/tcg/tcg-op.c
82
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
83
84
void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
85
{
86
- if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
87
- tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2);
88
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
89
+ tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2);
90
} else if (TCG_TARGET_HAS_div2_i32) {
91
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
92
tcg_gen_sari_i32(t0, arg1, 31);
93
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
94
{
95
if (TCG_TARGET_HAS_rem_i32) {
96
tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
97
- } else if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) {
98
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
99
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
100
- tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2);
101
+ tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2);
102
tcg_gen_mul_i32(t0, t0, arg2);
103
tcg_gen_sub_i32(ret, arg1, t0);
104
tcg_temp_free_i32(t0);
105
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
106
107
void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
108
{
109
- if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
110
- tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2);
111
+ if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
112
+ tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2);
113
} else if (TCG_TARGET_HAS_div2_i64) {
114
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
115
tcg_gen_sari_i64(t0, arg1, 63);
116
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
117
{
118
if (TCG_TARGET_HAS_rem_i64) {
119
tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
120
- } else if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) {
121
+ } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
122
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
123
- tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2);
124
+ tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2);
125
tcg_gen_mul_i64(t0, t0, arg2);
126
tcg_gen_sub_i64(ret, arg1, t0);
127
tcg_temp_free_i64(t0);
128
diff --git a/tcg/tcg.c b/tcg/tcg.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/tcg.c
131
+++ b/tcg/tcg.c
132
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
133
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
134
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
135
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
136
- OUTOP(INDEX_op_div_i32, TCGOutOpBinary, outop_divs),
137
- OUTOP(INDEX_op_div_i64, TCGOutOpBinary, outop_divs),
138
+ OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
139
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
140
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
141
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
142
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
143
case INDEX_op_add:
144
case INDEX_op_and:
145
case INDEX_op_andc:
146
- case INDEX_op_div_i32:
147
- case INDEX_op_div_i64:
148
+ case INDEX_op_divs:
149
case INDEX_op_eqv:
150
case INDEX_op_mul:
151
case INDEX_op_mulsh:
152
diff --git a/tcg/tci.c b/tcg/tci.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/tci.c
155
+++ b/tcg/tci.c
156
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
157
158
/* Arithmetic operations (64 bit). */
159
160
- case INDEX_op_div_i64:
161
+ case INDEX_op_divs:
162
tci_args_rrr(insn, &r0, &r1, &r2);
163
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
164
break;
165
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
166
case INDEX_op_add:
167
case INDEX_op_and:
168
case INDEX_op_andc:
169
+ case INDEX_op_divs:
170
case INDEX_op_eqv:
171
case INDEX_op_mul:
172
case INDEX_op_nand:
173
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
174
case INDEX_op_orc:
175
case INDEX_op_sub:
176
case INDEX_op_xor:
177
- case INDEX_op_div_i32:
178
- case INDEX_op_div_i64:
179
case INDEX_op_rem_i32:
180
case INDEX_op_rem_i64:
181
case INDEX_op_divu_i32:
182
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
183
index XXXXXXX..XXXXXXX 100644
184
--- a/docs/devel/tcg-ops.rst
185
+++ b/docs/devel/tcg-ops.rst
186
@@ -XXX,XX +XXX,XX @@ Arithmetic
187
188
- | *t0* = *t1* * *t2*
189
190
- * - div_i32/i64 *t0*, *t1*, *t2*
191
+ * - divs *t0*, *t1*, *t2*
192
193
- | *t0* = *t1* / *t2* (signed)
194
| Undefined behavior if division by zero or overflow.
195
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
196
index XXXXXXX..XXXXXXX 100644
197
--- a/tcg/tci/tcg-target.c.inc
198
+++ b/tcg/tci/tcg-target.c.inc
199
@@ -XXX,XX +XXX,XX @@ static void tgen_divs(TCGContext *s, TCGType type,
200
{
201
TCGOpcode opc = (type == TCG_TYPE_I32
202
? INDEX_op_tci_divs32
203
- : INDEX_op_div_i64);
204
+ : INDEX_op_divs);
205
tcg_out_op_rrr(s, opc, a0, a1, a2);
206
}
207
208
--
209
2.43.0
210
211
diff view generated by jsdifflib
Deleted patch
1
For TCI, we're losing type information in the interpreter.
2
Introduce a tci-specific opcode to handle the difference.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-has.h | 2 --
8
tcg/arm/tcg-target-has.h | 1 -
9
tcg/loongarch64/tcg-target-has.h | 2 --
10
tcg/mips/tcg-target-has.h | 2 --
11
tcg/ppc/tcg-target-has.h | 2 --
12
tcg/riscv/tcg-target-has.h | 2 --
13
tcg/sparc64/tcg-target-has.h | 2 --
14
tcg/tcg-has.h | 15 ++++----
15
tcg/tci/tcg-target-has.h | 2 --
16
tcg/tcg-op.c | 8 ++---
17
tcg/tcg.c | 8 ++---
18
tcg/tci.c | 3 +-
19
tcg/aarch64/tcg-target.c.inc | 18 ++++++----
20
tcg/arm/tcg-target.c.inc | 26 +++++++-------
21
tcg/i386/tcg-target.c.inc | 4 +++
22
tcg/loongarch64/tcg-target.c.inc | 24 ++++++++-----
23
tcg/mips/tcg-target.c.inc | 37 ++++++++++---------
24
tcg/ppc/tcg-target.c.inc | 21 ++++++-----
25
tcg/riscv/tcg-target.c.inc | 21 ++++++-----
26
tcg/s390x/tcg-target.c.inc | 4 +++
27
tcg/sparc64/tcg-target.c.inc | 61 +++++++++++++++++---------------
28
tcg/tci/tcg-target-opc.h.inc | 1 +
29
tcg/tci/tcg-target.c.inc | 17 +++++++--
30
23 files changed, 157 insertions(+), 126 deletions(-)
31
32
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/aarch64/tcg-target-has.h
35
+++ b/tcg/aarch64/tcg-target-has.h
36
@@ -XXX,XX +XXX,XX @@
37
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
38
39
/* optional instructions */
40
-#define TCG_TARGET_HAS_div_i32 1
41
#define TCG_TARGET_HAS_rem_i32 1
42
#define TCG_TARGET_HAS_bswap16_i32 1
43
#define TCG_TARGET_HAS_bswap32_i32 1
44
@@ -XXX,XX +XXX,XX @@
45
#define TCG_TARGET_HAS_extr_i64_i32 0
46
#define TCG_TARGET_HAS_qemu_st8_i32 0
47
48
-#define TCG_TARGET_HAS_div_i64 1
49
#define TCG_TARGET_HAS_rem_i64 1
50
#define TCG_TARGET_HAS_bswap16_i64 1
51
#define TCG_TARGET_HAS_bswap32_i64 1
52
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/arm/tcg-target-has.h
55
+++ b/tcg/arm/tcg-target-has.h
56
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
57
#define TCG_TARGET_HAS_negsetcond_i32 1
58
#define TCG_TARGET_HAS_mulu2_i32 1
59
#define TCG_TARGET_HAS_muls2_i32 1
60
-#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
61
#define TCG_TARGET_HAS_rem_i32 0
62
#define TCG_TARGET_HAS_qemu_st8_i32 0
63
64
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/loongarch64/tcg-target-has.h
67
+++ b/tcg/loongarch64/tcg-target-has.h
68
@@ -XXX,XX +XXX,XX @@
69
70
/* optional instructions */
71
#define TCG_TARGET_HAS_negsetcond_i32 0
72
-#define TCG_TARGET_HAS_div_i32 1
73
#define TCG_TARGET_HAS_rem_i32 1
74
#define TCG_TARGET_HAS_div2_i32 0
75
#define TCG_TARGET_HAS_rot_i32 1
76
@@ -XXX,XX +XXX,XX @@
77
78
/* 64-bit operations */
79
#define TCG_TARGET_HAS_negsetcond_i64 0
80
-#define TCG_TARGET_HAS_div_i64 1
81
#define TCG_TARGET_HAS_rem_i64 1
82
#define TCG_TARGET_HAS_div2_i64 0
83
#define TCG_TARGET_HAS_rot_i64 1
84
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/mips/tcg-target-has.h
87
+++ b/tcg/mips/tcg-target-has.h
88
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
89
#endif
90
91
/* optional instructions */
92
-#define TCG_TARGET_HAS_div_i32 1
93
#define TCG_TARGET_HAS_rem_i32 1
94
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
95
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
96
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
97
#define TCG_TARGET_HAS_add2_i32 0
98
#define TCG_TARGET_HAS_sub2_i32 0
99
#define TCG_TARGET_HAS_extr_i64_i32 1
100
-#define TCG_TARGET_HAS_div_i64 1
101
#define TCG_TARGET_HAS_rem_i64 1
102
#define TCG_TARGET_HAS_add2_i64 0
103
#define TCG_TARGET_HAS_sub2_i64 0
104
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/ppc/tcg-target-has.h
107
+++ b/tcg/ppc/tcg-target-has.h
108
@@ -XXX,XX +XXX,XX @@
109
#define have_vsx (cpuinfo & CPUINFO_VSX)
110
111
/* optional instructions */
112
-#define TCG_TARGET_HAS_div_i32 1
113
#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
114
#define TCG_TARGET_HAS_rot_i32 1
115
#define TCG_TARGET_HAS_bswap16_i32 1
116
@@ -XXX,XX +XXX,XX @@
117
#define TCG_TARGET_HAS_add2_i32 0
118
#define TCG_TARGET_HAS_sub2_i32 0
119
#define TCG_TARGET_HAS_extr_i64_i32 0
120
-#define TCG_TARGET_HAS_div_i64 1
121
#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
122
#define TCG_TARGET_HAS_rot_i64 1
123
#define TCG_TARGET_HAS_bswap16_i64 1
124
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/riscv/tcg-target-has.h
127
+++ b/tcg/riscv/tcg-target-has.h
128
@@ -XXX,XX +XXX,XX @@
129
130
/* optional instructions */
131
#define TCG_TARGET_HAS_negsetcond_i32 1
132
-#define TCG_TARGET_HAS_div_i32 1
133
#define TCG_TARGET_HAS_rem_i32 1
134
#define TCG_TARGET_HAS_div2_i32 0
135
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
136
@@ -XXX,XX +XXX,XX @@
137
#define TCG_TARGET_HAS_qemu_st8_i32 0
138
139
#define TCG_TARGET_HAS_negsetcond_i64 1
140
-#define TCG_TARGET_HAS_div_i64 1
141
#define TCG_TARGET_HAS_rem_i64 1
142
#define TCG_TARGET_HAS_div2_i64 0
143
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
144
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/sparc64/tcg-target-has.h
147
+++ b/tcg/sparc64/tcg-target-has.h
148
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
149
#endif
150
151
/* optional instructions */
152
-#define TCG_TARGET_HAS_div_i32        1
153
#define TCG_TARGET_HAS_rem_i32        0
154
#define TCG_TARGET_HAS_rot_i32 0
155
#define TCG_TARGET_HAS_bswap16_i32 0
156
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
157
#define TCG_TARGET_HAS_qemu_st8_i32 0
158
159
#define TCG_TARGET_HAS_extr_i64_i32 0
160
-#define TCG_TARGET_HAS_div_i64 1
161
#define TCG_TARGET_HAS_rem_i64 0
162
#define TCG_TARGET_HAS_rot_i64 0
163
#define TCG_TARGET_HAS_bswap16_i64 0
164
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tcg-has.h
167
+++ b/tcg/tcg-has.h
168
@@ -XXX,XX +XXX,XX @@
169
#define TCG_TARGET_HAS_sub2_i32 1
170
#endif
171
172
-/* Only one of DIV or DIV2 should be defined. */
173
-#if defined(TCG_TARGET_HAS_div_i32)
174
+#ifndef TCG_TARGET_HAS_div2_i32
175
#define TCG_TARGET_HAS_div2_i32 0
176
-#elif defined(TCG_TARGET_HAS_div2_i32)
177
-#define TCG_TARGET_HAS_div_i32 0
178
+#endif
179
+#ifndef TCG_TARGET_HAS_div2_i64
180
+#define TCG_TARGET_HAS_div2_i64 0
181
+#endif
182
+#ifndef TCG_TARGET_HAS_rem_i32
183
#define TCG_TARGET_HAS_rem_i32 0
184
#endif
185
-#if defined(TCG_TARGET_HAS_div_i64)
186
-#define TCG_TARGET_HAS_div2_i64 0
187
-#elif defined(TCG_TARGET_HAS_div2_i64)
188
-#define TCG_TARGET_HAS_div_i64 0
189
+#ifndef TCG_TARGET_HAS_rem_i64
190
#define TCG_TARGET_HAS_rem_i64 0
191
#endif
192
193
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tcg/tci/tcg-target-has.h
196
+++ b/tcg/tci/tcg-target-has.h
197
@@ -XXX,XX +XXX,XX @@
198
199
#define TCG_TARGET_HAS_bswap16_i32 1
200
#define TCG_TARGET_HAS_bswap32_i32 1
201
-#define TCG_TARGET_HAS_div_i32 1
202
#define TCG_TARGET_HAS_rem_i32 1
203
#define TCG_TARGET_HAS_extract2_i32 0
204
#define TCG_TARGET_HAS_clz_i32 1
205
@@ -XXX,XX +XXX,XX @@
206
#define TCG_TARGET_HAS_bswap32_i64 1
207
#define TCG_TARGET_HAS_bswap64_i64 1
208
#define TCG_TARGET_HAS_extract2_i64 0
209
-#define TCG_TARGET_HAS_div_i64 1
210
#define TCG_TARGET_HAS_rem_i64 1
211
#define TCG_TARGET_HAS_clz_i64 1
212
#define TCG_TARGET_HAS_ctz_i64 1
213
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tcg-op.c
216
+++ b/tcg/tcg-op.c
217
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
218
219
void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
220
{
221
- if (TCG_TARGET_HAS_div_i32) {
222
+ if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
223
tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
224
} else if (TCG_TARGET_HAS_div2_i32) {
225
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
226
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
227
{
228
if (TCG_TARGET_HAS_rem_i32) {
229
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
230
- } else if (TCG_TARGET_HAS_div_i32) {
231
+ } else if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
232
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
233
tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
234
tcg_gen_mul_i32(t0, t0, arg2);
235
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
236
237
void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
238
{
239
- if (TCG_TARGET_HAS_div_i64) {
240
+ if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
241
tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
242
} else if (TCG_TARGET_HAS_div2_i64) {
243
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
244
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
245
{
246
if (TCG_TARGET_HAS_rem_i64) {
247
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
248
- } else if (TCG_TARGET_HAS_div_i64) {
249
+ } else if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
250
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
251
tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
252
tcg_gen_mul_i64(t0, t0, arg2);
253
diff --git a/tcg/tcg.c b/tcg/tcg.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/tcg/tcg.c
256
+++ b/tcg/tcg.c
257
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
258
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
259
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
260
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
261
+ OUTOP(INDEX_op_divu_i32, TCGOutOpBinary, outop_divu),
262
+ OUTOP(INDEX_op_divu_i64, TCGOutOpBinary, outop_divu),
263
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
264
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
265
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
266
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
267
268
case INDEX_op_negsetcond_i32:
269
return TCG_TARGET_HAS_negsetcond_i32;
270
- case INDEX_op_divu_i32:
271
- return TCG_TARGET_HAS_div_i32;
272
case INDEX_op_rem_i32:
273
case INDEX_op_remu_i32:
274
return TCG_TARGET_HAS_rem_i32;
275
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
276
277
case INDEX_op_negsetcond_i64:
278
return TCG_TARGET_HAS_negsetcond_i64;
279
- case INDEX_op_divu_i64:
280
- return TCG_TARGET_HAS_div_i64;
281
case INDEX_op_rem_i64:
282
case INDEX_op_remu_i64:
283
return TCG_TARGET_HAS_rem_i64;
284
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
285
case INDEX_op_and:
286
case INDEX_op_andc:
287
case INDEX_op_divs:
288
+ case INDEX_op_divu_i32:
289
+ case INDEX_op_divu_i64:
290
case INDEX_op_eqv:
291
case INDEX_op_mul:
292
case INDEX_op_mulsh:
293
diff --git a/tcg/tci.c b/tcg/tci.c
294
index XXXXXXX..XXXXXXX 100644
295
--- a/tcg/tci.c
296
+++ b/tcg/tci.c
297
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
298
tci_args_rrr(insn, &r0, &r1, &r2);
299
regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
300
break;
301
- case INDEX_op_divu_i32:
302
+ case INDEX_op_tci_divu32:
303
tci_args_rrr(insn, &r0, &r1, &r2);
304
regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
305
break;
306
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
307
case INDEX_op_ctz_i32:
308
case INDEX_op_ctz_i64:
309
case INDEX_op_tci_divs32:
310
+ case INDEX_op_tci_divu32:
311
tci_args_rrr(insn, &r0, &r1, &r2);
312
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
313
op_name, str_r(r0), str_r(r1), str_r(r2));
314
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
315
index XXXXXXX..XXXXXXX 100644
316
--- a/tcg/aarch64/tcg-target.c.inc
317
+++ b/tcg/aarch64/tcg-target.c.inc
318
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
319
.out_rrr = tgen_divs,
320
};
321
322
+static void tgen_divu(TCGContext *s, TCGType type,
323
+ TCGReg a0, TCGReg a1, TCGReg a2)
324
+{
325
+ tcg_out_insn(s, 3508, UDIV, type, a0, a1, a2);
326
+}
327
+
328
+static const TCGOutOpBinary outop_divu = {
329
+ .base.static_constraint = C_O1_I2(r, r, r),
330
+ .out_rrr = tgen_divu,
331
+};
332
+
333
static void tgen_eqv(TCGContext *s, TCGType type,
334
TCGReg a0, TCGReg a1, TCGReg a2)
335
{
336
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
337
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
338
break;
339
340
- case INDEX_op_divu_i64:
341
- case INDEX_op_divu_i32:
342
- tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
343
- break;
344
-
345
case INDEX_op_rem_i64:
346
case INDEX_op_rem_i32:
347
tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2);
348
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
349
case INDEX_op_negsetcond_i64:
350
return C_O1_I2(r, r, rC);
351
352
- case INDEX_op_divu_i32:
353
- case INDEX_op_divu_i64:
354
case INDEX_op_rem_i32:
355
case INDEX_op_rem_i64:
356
case INDEX_op_remu_i32:
357
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
358
index XXXXXXX..XXXXXXX 100644
359
--- a/tcg/arm/tcg-target.c.inc
360
+++ b/tcg/arm/tcg-target.c.inc
361
@@ -XXX,XX +XXX,XX @@ static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
362
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
363
}
364
365
-static void tcg_out_udiv(TCGContext *s, ARMCond cond,
366
- TCGReg rd, TCGReg rn, TCGReg rm)
367
-{
368
- tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
369
-}
370
-
371
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
372
{
373
/* sxtb */
374
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
375
.out_rrr = tgen_divs,
376
};
377
378
+static void tgen_divu(TCGContext *s, TCGType type,
379
+ TCGReg a0, TCGReg a1, TCGReg a2)
380
+{
381
+ /* udiv */
382
+ tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
383
+}
384
+
385
+static const TCGOutOpBinary outop_divu = {
386
+ .base.static_constraint = C_Dynamic,
387
+ .base.dynamic_constraint = cset_idiv,
388
+ .out_rrr = tgen_divu,
389
+};
390
+
391
static const TCGOutOpBinary outop_eqv = {
392
.base.static_constraint = C_NotImplemented,
393
};
394
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
395
}
396
break;
397
398
- case INDEX_op_divu_i32:
399
- tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
400
- break;
401
-
402
case INDEX_op_mb:
403
tcg_out_mb(s, args[0]);
404
break;
405
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
406
case INDEX_op_ctz_i32:
407
return C_O1_I2(r, r, rIK);
408
409
- case INDEX_op_divu_i32:
410
- return C_O1_I2(r, r, r);
411
-
412
case INDEX_op_mulu2_i32:
413
case INDEX_op_muls2_i32:
414
return C_O2_I2(r, r, r, r);
415
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
416
index XXXXXXX..XXXXXXX 100644
417
--- a/tcg/i386/tcg-target.c.inc
418
+++ b/tcg/i386/tcg-target.c.inc
419
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
420
.base.static_constraint = C_NotImplemented,
421
};
422
423
+static const TCGOutOpBinary outop_divu = {
424
+ .base.static_constraint = C_NotImplemented,
425
+};
426
+
427
static const TCGOutOpBinary outop_eqv = {
428
.base.static_constraint = C_NotImplemented,
429
};
430
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
431
index XXXXXXX..XXXXXXX 100644
432
--- a/tcg/loongarch64/tcg-target.c.inc
433
+++ b/tcg/loongarch64/tcg-target.c.inc
434
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
435
.out_rrr = tgen_divs,
436
};
437
438
+static void tgen_divu(TCGContext *s, TCGType type,
439
+ TCGReg a0, TCGReg a1, TCGReg a2)
440
+{
441
+ if (type == TCG_TYPE_I32) {
442
+ tcg_out_opc_div_wu(s, a0, a1, a2);
443
+ } else {
444
+ tcg_out_opc_div_du(s, a0, a1, a2);
445
+ }
446
+}
447
+
448
+static const TCGOutOpBinary outop_divu = {
449
+ .base.static_constraint = C_O1_I2(r, r, r),
450
+ .out_rrr = tgen_divu,
451
+};
452
+
453
static const TCGOutOpBinary outop_eqv = {
454
.base.static_constraint = C_NotImplemented,
455
};
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
457
}
458
break;
459
460
- case INDEX_op_divu_i32:
461
- tcg_out_opc_div_wu(s, a0, a1, a2);
462
- break;
463
- case INDEX_op_divu_i64:
464
- tcg_out_opc_div_du(s, a0, a1, a2);
465
- break;
466
-
467
case INDEX_op_rem_i32:
468
tcg_out_opc_mod_w(s, a0, a1, a2);
469
break;
470
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
471
case INDEX_op_setcond_i64:
472
return C_O1_I2(r, rz, rJ);
473
474
- case INDEX_op_divu_i32:
475
- case INDEX_op_divu_i64:
476
case INDEX_op_rem_i32:
477
case INDEX_op_rem_i64:
478
case INDEX_op_remu_i32:
479
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
480
index XXXXXXX..XXXXXXX 100644
481
--- a/tcg/mips/tcg-target.c.inc
482
+++ b/tcg/mips/tcg-target.c.inc
483
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
484
.out_rrr = tgen_divs,
485
};
486
487
+static void tgen_divu(TCGContext *s, TCGType type,
488
+ TCGReg a0, TCGReg a1, TCGReg a2)
489
+{
490
+ if (use_mips32r6_instructions) {
491
+ if (type == TCG_TYPE_I32) {
492
+ tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
493
+ } else {
494
+ tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
495
+ }
496
+ } else {
497
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU;
498
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
499
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
500
+ }
501
+}
502
+
503
+static const TCGOutOpBinary outop_divu = {
504
+ .base.static_constraint = C_O1_I2(r, r, r),
505
+ .out_rrr = tgen_divu,
506
+};
507
+
508
static const TCGOutOpBinary outop_eqv = {
509
.base.static_constraint = C_NotImplemented,
510
};
511
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
512
tcg_out_ldst(s, i1, a0, a1, a2);
513
break;
514
515
- case INDEX_op_divu_i32:
516
- if (use_mips32r6_instructions) {
517
- tcg_out_opc_reg(s, OPC_DIVU_R6, a0, a1, a2);
518
- break;
519
- }
520
- i1 = OPC_DIVU, i2 = OPC_MFLO;
521
- goto do_hilo1;
522
case INDEX_op_rem_i32:
523
if (use_mips32r6_instructions) {
524
tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
525
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
526
}
527
i1 = OPC_DIVU, i2 = OPC_MFHI;
528
goto do_hilo1;
529
- case INDEX_op_divu_i64:
530
- if (use_mips32r6_instructions) {
531
- tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
532
- break;
533
- }
534
- i1 = OPC_DDIVU, i2 = OPC_MFLO;
535
- goto do_hilo1;
536
case INDEX_op_rem_i64:
537
if (use_mips32r6_instructions) {
538
tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
539
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
540
case INDEX_op_st_i64:
541
return C_O0_I2(rz, r);
542
543
- case INDEX_op_divu_i32:
544
case INDEX_op_rem_i32:
545
case INDEX_op_remu_i32:
546
case INDEX_op_setcond_i32:
547
- case INDEX_op_divu_i64:
548
case INDEX_op_rem_i64:
549
case INDEX_op_remu_i64:
550
case INDEX_op_setcond_i64:
551
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
552
index XXXXXXX..XXXXXXX 100644
553
--- a/tcg/ppc/tcg-target.c.inc
554
+++ b/tcg/ppc/tcg-target.c.inc
555
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
556
.out_rrr = tgen_divs,
557
};
558
559
+static void tgen_divu(TCGContext *s, TCGType type,
560
+ TCGReg a0, TCGReg a1, TCGReg a2)
561
+{
562
+ uint32_t insn = type == TCG_TYPE_I32 ? DIVWU : DIVDU;
563
+ tcg_out32(s, insn | TAB(a0, a1, a2));
564
+}
565
+
566
+static const TCGOutOpBinary outop_divu = {
567
+ .base.static_constraint = C_O1_I2(r, r, r),
568
+ .out_rrr = tgen_divu,
569
+};
570
+
571
static const TCGOutOpBinary outop_eqv = {
572
.base.static_constraint = C_O1_I2(r, r, r),
573
.out_rrr = tgen_eqv,
574
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
575
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
576
break;
577
578
- case INDEX_op_divu_i32:
579
- tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
580
- break;
581
-
582
case INDEX_op_rem_i32:
583
tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
584
break;
585
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
586
}
587
break;
588
589
- case INDEX_op_divu_i64:
590
- tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
591
- break;
592
case INDEX_op_rem_i64:
593
tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
594
break;
595
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
596
case INDEX_op_rotr_i64:
597
return C_O1_I2(r, r, ri);
598
599
- case INDEX_op_divu_i32:
600
case INDEX_op_rem_i32:
601
case INDEX_op_remu_i32:
602
- case INDEX_op_divu_i64:
603
case INDEX_op_rem_i64:
604
case INDEX_op_remu_i64:
605
return C_O1_I2(r, r, r);
606
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
607
index XXXXXXX..XXXXXXX 100644
608
--- a/tcg/riscv/tcg-target.c.inc
609
+++ b/tcg/riscv/tcg-target.c.inc
610
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
611
.out_rrr = tgen_divs,
612
};
613
614
+static void tgen_divu(TCGContext *s, TCGType type,
615
+ TCGReg a0, TCGReg a1, TCGReg a2)
616
+{
617
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVUW : OPC_DIVU;
618
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
619
+}
620
+
621
+static const TCGOutOpBinary outop_divu = {
622
+ .base.static_constraint = C_O1_I2(r, r, r),
623
+ .out_rrr = tgen_divu,
624
+};
625
+
626
static void tgen_eqv(TCGContext *s, TCGType type,
627
TCGReg a0, TCGReg a1, TCGReg a2)
628
{
629
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
630
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
631
break;
632
633
- case INDEX_op_divu_i32:
634
- tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
635
- break;
636
- case INDEX_op_divu_i64:
637
- tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
638
- break;
639
-
640
case INDEX_op_rem_i32:
641
tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
642
break;
643
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
644
case INDEX_op_negsetcond_i64:
645
return C_O1_I2(r, r, rI);
646
647
- case INDEX_op_divu_i32:
648
case INDEX_op_rem_i32:
649
case INDEX_op_remu_i32:
650
- case INDEX_op_divu_i64:
651
case INDEX_op_rem_i64:
652
case INDEX_op_remu_i64:
653
return C_O1_I2(r, rz, rz);
654
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
655
index XXXXXXX..XXXXXXX 100644
656
--- a/tcg/s390x/tcg-target.c.inc
657
+++ b/tcg/s390x/tcg-target.c.inc
658
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
659
.base.static_constraint = C_NotImplemented,
660
};
661
662
+static const TCGOutOpBinary outop_divu = {
663
+ .base.static_constraint = C_NotImplemented,
664
+};
665
+
666
static void tgen_eqv(TCGContext *s, TCGType type,
667
TCGReg a0, TCGReg a1, TCGReg a2)
668
{
669
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
670
index XXXXXXX..XXXXXXX 100644
671
--- a/tcg/sparc64/tcg-target.c.inc
672
+++ b/tcg/sparc64/tcg-target.c.inc
673
@@ -XXX,XX +XXX,XX @@ static void tcg_out_sety(TCGContext *s, TCGReg rs)
674
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
675
}
676
677
-static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
678
- int32_t val2, int val2const, int uns)
679
-{
680
- /* Load Y with the sign/zero extension of RS1 to 64-bits. */
681
- if (uns) {
682
- tcg_out_sety(s, TCG_REG_G0);
683
- } else {
684
- tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
685
- tcg_out_sety(s, TCG_REG_T1);
686
- }
687
-
688
- tcg_out_arithc(s, rd, rs1, val2, val2const,
689
- uns ? ARITH_UDIV : ARITH_SDIV);
690
-}
691
-
692
static const uint8_t tcg_cond_to_bcond[16] = {
693
[TCG_COND_EQ] = COND_E,
694
[TCG_COND_NE] = COND_NE,
695
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
696
.out_rri = tgen_divsi,
697
};
698
699
+static void tgen_divu_rJ(TCGContext *s, TCGType type,
700
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
701
+{
702
+ uint32_t insn;
703
+
704
+ if (type == TCG_TYPE_I32) {
705
+ /* Load Y with the zero extension to 64-bits. */
706
+ tcg_out_sety(s, TCG_REG_G0);
707
+ insn = ARITH_UDIV;
708
+ } else {
709
+ insn = ARITH_UDIVX;
710
+ }
711
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
712
+}
713
+
714
+static void tgen_divu(TCGContext *s, TCGType type,
715
+ TCGReg a0, TCGReg a1, TCGReg a2)
716
+{
717
+ tgen_divu_rJ(s, type, a0, a1, a2, false);
718
+}
719
+
720
+static void tgen_divui(TCGContext *s, TCGType type,
721
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
722
+{
723
+ tgen_divu_rJ(s, type, a0, a1, a2, true);
724
+}
725
+
726
+static const TCGOutOpBinary outop_divu = {
727
+ .base.static_constraint = C_O1_I2(r, r, rJ),
728
+ .out_rrr = tgen_divu,
729
+ .out_rri = tgen_divui,
730
+};
731
+
732
static const TCGOutOpBinary outop_eqv = {
733
.base.static_constraint = C_NotImplemented,
734
};
735
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
736
c = SHIFT_SRA;
737
goto do_shift32;
738
739
- case INDEX_op_divu_i32:
740
- tcg_out_div32(s, a0, a1, a2, c2, 1);
741
- break;
742
-
743
case INDEX_op_brcond_i32:
744
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
745
break;
746
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
747
case INDEX_op_sar_i64:
748
c = SHIFT_SRAX;
749
goto do_shift64;
750
- case INDEX_op_divu_i64:
751
- c = ARITH_UDIVX;
752
- goto gen_arith;
753
754
case INDEX_op_brcond_i64:
755
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
756
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
757
const_args[4], args[5], const_args[5], true);
758
break;
759
760
- gen_arith:
761
- tcg_out_arithc(s, a0, a1, a2, c2, c);
762
- break;
763
-
764
case INDEX_op_mb:
765
tcg_out_mb(s, a0);
766
break;
767
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
768
case INDEX_op_qemu_st_i64:
769
return C_O0_I2(rz, r);
770
771
- case INDEX_op_divu_i32:
772
- case INDEX_op_divu_i64:
773
case INDEX_op_shl_i32:
774
case INDEX_op_shl_i64:
775
case INDEX_op_shr_i32:
776
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
777
index XXXXXXX..XXXXXXX 100644
778
--- a/tcg/tci/tcg-target-opc.h.inc
779
+++ b/tcg/tci/tcg-target-opc.h.inc
780
@@ -XXX,XX +XXX,XX @@
781
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
782
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
783
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
784
+DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
785
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
786
index XXXXXXX..XXXXXXX 100644
787
--- a/tcg/tci/tcg-target.c.inc
788
+++ b/tcg/tci/tcg-target.c.inc
789
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
790
case INDEX_op_st_i64:
791
return C_O0_I2(r, r);
792
793
- case INDEX_op_divu_i32:
794
- case INDEX_op_divu_i64:
795
case INDEX_op_rem_i32:
796
case INDEX_op_rem_i64:
797
case INDEX_op_remu_i32:
798
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
799
.out_rrr = tgen_divs,
800
};
801
802
+static void tgen_divu(TCGContext *s, TCGType type,
803
+ TCGReg a0, TCGReg a1, TCGReg a2)
804
+{
805
+ TCGOpcode opc = (type == TCG_TYPE_I32
806
+ ? INDEX_op_tci_divu32
807
+ : INDEX_op_divu_i64);
808
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
809
+}
810
+
811
+static const TCGOutOpBinary outop_divu = {
812
+ .base.static_constraint = C_O1_I2(r, r, r),
813
+ .out_rrr = tgen_divu,
814
+};
815
+
816
static void tgen_eqv(TCGContext *s, TCGType type,
817
TCGReg a0, TCGReg a1, TCGReg a2)
818
{
819
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
820
CASE_32_64(sar)
821
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
822
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
823
- CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
824
CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
825
CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
826
CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
827
--
828
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 9 +++++----
6
tcg/tcg-op.c | 16 ++++++++--------
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 20 insertions(+), 23 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(divs, 1, 2, 0, TCG_OPF_INT)
21
+DEF(divu, 1, 2, 0, TCG_OPF_INT)
22
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
23
DEF(mul, 1, 2, 0, TCG_OPF_INT)
24
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* arith */
29
-DEF(divu_i32, 1, 2, 0, 0)
30
DEF(rem_i32, 1, 2, 0, 0)
31
DEF(remu_i32, 1, 2, 0, 0)
32
DEF(div2_i32, 2, 3, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* arith */
37
-DEF(divu_i64, 1, 2, 0, 0)
38
DEF(rem_i64, 1, 2, 0, 0)
39
DEF(remu_i64, 1, 2, 0, 0)
40
DEF(div2_i64, 2, 3, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
}
47
return (int64_t)x / ((int64_t)y ? : 1);
48
49
- case INDEX_op_divu_i32:
50
- return (uint32_t)x / ((uint32_t)y ? : 1);
51
- case INDEX_op_divu_i64:
52
+ case INDEX_op_divu:
53
+ if (type == TCG_TYPE_I32) {
54
+ return (uint32_t)x / ((uint32_t)y ? : 1);
55
+ }
56
return (uint64_t)x / ((uint64_t)y ? : 1);
57
58
case INDEX_op_rem_i32:
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
done = fold_deposit(&ctx, op);
61
break;
62
case INDEX_op_divs:
63
- CASE_OP_32_64(divu):
64
+ case INDEX_op_divu:
65
done = fold_divide(&ctx, op);
66
break;
67
case INDEX_op_dup_vec:
68
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/tcg-op.c
71
+++ b/tcg/tcg-op.c
72
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
73
74
void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
75
{
76
- if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
77
- tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
78
+ if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
79
+ tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2);
80
} else if (TCG_TARGET_HAS_div2_i32) {
81
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
82
TCGv_i32 zero = tcg_constant_i32(0);
83
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
84
{
85
if (TCG_TARGET_HAS_rem_i32) {
86
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
87
- } else if (tcg_op_supported(INDEX_op_divu_i32, TCG_TYPE_I32, 0)) {
88
+ } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
89
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
90
- tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2);
91
+ tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2);
92
tcg_gen_mul_i32(t0, t0, arg2);
93
tcg_gen_sub_i32(ret, arg1, t0);
94
tcg_temp_free_i32(t0);
95
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
96
97
void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
98
{
99
- if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
100
- tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
101
+ if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
102
+ tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2);
103
} else if (TCG_TARGET_HAS_div2_i64) {
104
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
105
TCGv_i64 zero = tcg_constant_i64(0);
106
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
107
{
108
if (TCG_TARGET_HAS_rem_i64) {
109
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
110
- } else if (tcg_op_supported(INDEX_op_divu_i64, TCG_TYPE_I64, 0)) {
111
+ } else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
112
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
113
- tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2);
114
+ tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2);
115
tcg_gen_mul_i64(t0, t0, arg2);
116
tcg_gen_sub_i64(ret, arg1, t0);
117
tcg_temp_free_i64(t0);
118
diff --git a/tcg/tcg.c b/tcg/tcg.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/tcg.c
121
+++ b/tcg/tcg.c
122
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
123
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
124
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
125
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
126
- OUTOP(INDEX_op_divu_i32, TCGOutOpBinary, outop_divu),
127
- OUTOP(INDEX_op_divu_i64, TCGOutOpBinary, outop_divu),
128
+ OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
129
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
130
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
131
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
132
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
133
case INDEX_op_and:
134
case INDEX_op_andc:
135
case INDEX_op_divs:
136
- case INDEX_op_divu_i32:
137
- case INDEX_op_divu_i64:
138
+ case INDEX_op_divu:
139
case INDEX_op_eqv:
140
case INDEX_op_mul:
141
case INDEX_op_mulsh:
142
diff --git a/tcg/tci.c b/tcg/tci.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/tci.c
145
+++ b/tcg/tci.c
146
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
147
tci_args_rrr(insn, &r0, &r1, &r2);
148
regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
149
break;
150
- case INDEX_op_divu_i64:
151
+ case INDEX_op_divu:
152
tci_args_rrr(insn, &r0, &r1, &r2);
153
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
154
break;
155
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
156
case INDEX_op_and:
157
case INDEX_op_andc:
158
case INDEX_op_divs:
159
+ case INDEX_op_divu:
160
case INDEX_op_eqv:
161
case INDEX_op_mul:
162
case INDEX_op_nand:
163
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
164
case INDEX_op_xor:
165
case INDEX_op_rem_i32:
166
case INDEX_op_rem_i64:
167
- case INDEX_op_divu_i32:
168
- case INDEX_op_divu_i64:
169
case INDEX_op_remu_i32:
170
case INDEX_op_remu_i64:
171
case INDEX_op_shl_i32:
172
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
173
index XXXXXXX..XXXXXXX 100644
174
--- a/docs/devel/tcg-ops.rst
175
+++ b/docs/devel/tcg-ops.rst
176
@@ -XXX,XX +XXX,XX @@ Arithmetic
177
- | *t0* = *t1* / *t2* (signed)
178
| Undefined behavior if division by zero or overflow.
179
180
- * - divu_i32/i64 *t0*, *t1*, *t2*
181
+ * - divu *t0*, *t1*, *t2*
182
183
- | *t0* = *t1* / *t2* (unsigned)
184
| Undefined behavior if division by zero.
185
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
186
index XXXXXXX..XXXXXXX 100644
187
--- a/tcg/tci/tcg-target.c.inc
188
+++ b/tcg/tci/tcg-target.c.inc
189
@@ -XXX,XX +XXX,XX @@ static void tgen_divu(TCGContext *s, TCGType type,
190
{
191
TCGOpcode opc = (type == TCG_TYPE_I32
192
? INDEX_op_tci_divu32
193
- : INDEX_op_divu_i64);
194
+ : INDEX_op_divu);
195
tcg_out_op_rrr(s, opc, a0, a1, a2);
196
}
197
198
--
199
2.43.0
200
201
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 24 +++++++++++++++--
5
tcg/aarch64/tcg-target.c.inc | 4 +++
6
tcg/arm/tcg-target.c.inc | 4 +++
7
tcg/i386/tcg-target.c.inc | 17 ++++++++----
8
tcg/loongarch64/tcg-target.c.inc | 4 +++
9
tcg/mips/tcg-target.c.inc | 4 +++
10
tcg/ppc/tcg-target.c.inc | 4 +++
11
tcg/riscv/tcg-target.c.inc | 4 +++
12
tcg/s390x/tcg-target.c.inc | 44 ++++++++++++++++----------------
13
tcg/sparc64/tcg-target.c.inc | 4 +++
14
tcg/tci/tcg-target.c.inc | 4 +++
15
11 files changed, 88 insertions(+), 29 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBinary {
22
TCGReg a0, TCGReg a1, tcg_target_long a2);
23
} TCGOutOpBinary;
24
25
+typedef struct TCGOutOpDivRem {
26
+ TCGOutOp base;
27
+ void (*out_rr01r)(TCGContext *s, TCGType type,
28
+ TCGReg a0, TCGReg a1, TCGReg a4);
29
+} TCGOutOpDivRem;
30
+
31
typedef struct TCGOutOpUnary {
32
TCGOutOp base;
33
void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
34
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
35
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
36
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
37
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
38
+ OUTOP(INDEX_op_div2_i32, TCGOutOpDivRem, outop_divs2),
39
+ OUTOP(INDEX_op_div2_i64, TCGOutOpDivRem, outop_divs2),
40
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
41
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
42
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
43
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
44
case INDEX_op_rem_i32:
45
case INDEX_op_remu_i32:
46
return TCG_TARGET_HAS_rem_i32;
47
- case INDEX_op_div2_i32:
48
case INDEX_op_divu2_i32:
49
return TCG_TARGET_HAS_div2_i32;
50
case INDEX_op_rotl_i32:
51
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
52
case INDEX_op_rem_i64:
53
case INDEX_op_remu_i64:
54
return TCG_TARGET_HAS_rem_i64;
55
- case INDEX_op_div2_i64:
56
case INDEX_op_divu2_i64:
57
return TCG_TARGET_HAS_div2_i64;
58
case INDEX_op_rotl_i64:
59
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
60
}
61
break;
62
63
+ case INDEX_op_div2_i32:
64
+ case INDEX_op_div2_i64:
65
+ {
66
+ const TCGOutOpDivRem *out =
67
+ container_of(all_outop[op->opc], TCGOutOpDivRem, base);
68
+
69
+ /* Only used by x86 and s390x, which use matching constraints. */
70
+ tcg_debug_assert(new_args[0] == new_args[2]);
71
+ tcg_debug_assert(new_args[1] == new_args[3]);
72
+ tcg_debug_assert(!const_args[4]);
73
+ out->out_rr01r(s, type, new_args[0], new_args[1], new_args[4]);
74
+ }
75
+ break;
76
+
77
default:
78
if (def->flags & TCG_OPF_VECTOR) {
79
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
80
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/aarch64/tcg-target.c.inc
83
+++ b/tcg/aarch64/tcg-target.c.inc
84
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
85
.out_rrr = tgen_divs,
86
};
87
88
+static const TCGOutOpDivRem outop_divs2 = {
89
+ .base.static_constraint = C_NotImplemented,
90
+};
91
+
92
static void tgen_divu(TCGContext *s, TCGType type,
93
TCGReg a0, TCGReg a1, TCGReg a2)
94
{
95
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/arm/tcg-target.c.inc
98
+++ b/tcg/arm/tcg-target.c.inc
99
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
100
.out_rrr = tgen_divs,
101
};
102
103
+static const TCGOutOpDivRem outop_divs2 = {
104
+ .base.static_constraint = C_NotImplemented,
105
+};
106
+
107
static void tgen_divu(TCGContext *s, TCGType type,
108
TCGReg a0, TCGReg a1, TCGReg a2)
109
{
110
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
111
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/i386/tcg-target.c.inc
113
+++ b/tcg/i386/tcg-target.c.inc
114
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
115
.base.static_constraint = C_NotImplemented,
116
};
117
118
+static void tgen_divs2(TCGContext *s, TCGType type,
119
+ TCGReg a0, TCGReg a1, TCGReg a4)
120
+{
121
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
122
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, a4);
123
+}
124
+
125
+static const TCGOutOpDivRem outop_divs2 = {
126
+ .base.static_constraint = C_O2_I3(a, d, 0, 1, r),
127
+ .out_rr01r = tgen_divs2,
128
+};
129
+
130
static const TCGOutOpBinary outop_divu = {
131
.base.static_constraint = C_NotImplemented,
132
};
133
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
134
}
135
break;
136
137
- OP_32_64(div2):
138
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
139
- break;
140
OP_32_64(divu2):
141
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
142
break;
143
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
144
case INDEX_op_movcond_i64:
145
return C_O1_I4(r, r, reT, r, 0);
146
147
- case INDEX_op_div2_i32:
148
- case INDEX_op_div2_i64:
149
case INDEX_op_divu2_i32:
150
case INDEX_op_divu2_i64:
151
return C_O2_I3(a, d, 0, 1, r);
152
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
153
index XXXXXXX..XXXXXXX 100644
154
--- a/tcg/loongarch64/tcg-target.c.inc
155
+++ b/tcg/loongarch64/tcg-target.c.inc
156
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
157
.out_rrr = tgen_divs,
158
};
159
160
+static const TCGOutOpDivRem outop_divs2 = {
161
+ .base.static_constraint = C_NotImplemented,
162
+};
163
+
164
static void tgen_divu(TCGContext *s, TCGType type,
165
TCGReg a0, TCGReg a1, TCGReg a2)
166
{
167
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
168
index XXXXXXX..XXXXXXX 100644
169
--- a/tcg/mips/tcg-target.c.inc
170
+++ b/tcg/mips/tcg-target.c.inc
171
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
172
.out_rrr = tgen_divs,
173
};
174
175
+static const TCGOutOpDivRem outop_divs2 = {
176
+ .base.static_constraint = C_NotImplemented,
177
+};
178
+
179
static void tgen_divu(TCGContext *s, TCGType type,
180
TCGReg a0, TCGReg a1, TCGReg a2)
181
{
182
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/ppc/tcg-target.c.inc
185
+++ b/tcg/ppc/tcg-target.c.inc
186
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
187
.out_rrr = tgen_divs,
188
};
189
190
+static const TCGOutOpDivRem outop_divs2 = {
191
+ .base.static_constraint = C_NotImplemented,
192
+};
193
+
194
static void tgen_divu(TCGContext *s, TCGType type,
195
TCGReg a0, TCGReg a1, TCGReg a2)
196
{
197
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
198
index XXXXXXX..XXXXXXX 100644
199
--- a/tcg/riscv/tcg-target.c.inc
200
+++ b/tcg/riscv/tcg-target.c.inc
201
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
202
.out_rrr = tgen_divs,
203
};
204
205
+static const TCGOutOpDivRem outop_divs2 = {
206
+ .base.static_constraint = C_NotImplemented,
207
+};
208
+
209
static void tgen_divu(TCGContext *s, TCGType type,
210
TCGReg a0, TCGReg a1, TCGReg a2)
211
{
212
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/s390x/tcg-target.c.inc
215
+++ b/tcg/s390x/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
217
.base.static_constraint = C_NotImplemented,
218
};
219
220
+static void tgen_divs2(TCGContext *s, TCGType type,
221
+ TCGReg a0, TCGReg a1, TCGReg a4)
222
+{
223
+ tcg_debug_assert((a1 & 1) == 0);
224
+ tcg_debug_assert(a0 == a1 + 1);
225
+ if (type == TCG_TYPE_I32) {
226
+ tcg_out_insn(s, RR, DR, a1, a4);
227
+ } else {
228
+ /*
229
+ * TODO: Move the sign-extend of the numerator from a2 into a3
230
+ * into the tcg backend, instead of in early expansion. It is
231
+ * required for 32-bit DR, but not 64-bit DSGR.
232
+ */
233
+ tcg_out_insn(s, RRE, DSGR, a1, a4);
234
+ }
235
+}
236
+
237
+static const TCGOutOpDivRem outop_divs2 = {
238
+ .base.static_constraint = C_O2_I3(o, m, 0, 1, r),
239
+ .out_rr01r = tgen_divs2,
240
+};
241
+
242
static const TCGOutOpBinary outop_divu = {
243
.base.static_constraint = C_NotImplemented,
244
};
245
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
246
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
247
break;
248
249
- case INDEX_op_div2_i32:
250
- tcg_debug_assert(args[0] == args[2]);
251
- tcg_debug_assert(args[1] == args[3]);
252
- tcg_debug_assert((args[1] & 1) == 0);
253
- tcg_debug_assert(args[0] == args[1] + 1);
254
- tcg_out_insn(s, RR, DR, args[1], args[4]);
255
- break;
256
case INDEX_op_divu2_i32:
257
tcg_debug_assert(args[0] == args[2]);
258
tcg_debug_assert(args[1] == args[3]);
259
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
260
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
261
break;
262
263
- case INDEX_op_div2_i64:
264
- /*
265
- * ??? We get an unnecessary sign-extension of the dividend
266
- * into op0 with this definition, but as we do in fact always
267
- * produce both quotient and remainder using INDEX_op_div_i64
268
- * instead requires jumping through even more hoops.
269
- */
270
- tcg_debug_assert(args[0] == args[2]);
271
- tcg_debug_assert(args[1] == args[3]);
272
- tcg_debug_assert((args[1] & 1) == 0);
273
- tcg_debug_assert(args[0] == args[1] + 1);
274
- tcg_out_insn(s, RRE, DSGR, args[1], args[4]);
275
- break;
276
case INDEX_op_divu2_i64:
277
tcg_debug_assert(args[0] == args[2]);
278
tcg_debug_assert(args[1] == args[3]);
279
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
280
case INDEX_op_movcond_i64:
281
return C_O1_I4(r, r, rC, rI, r);
282
283
- case INDEX_op_div2_i32:
284
- case INDEX_op_div2_i64:
285
case INDEX_op_divu2_i32:
286
case INDEX_op_divu2_i64:
287
return C_O2_I3(o, m, 0, 1, r);
288
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
289
index XXXXXXX..XXXXXXX 100644
290
--- a/tcg/sparc64/tcg-target.c.inc
291
+++ b/tcg/sparc64/tcg-target.c.inc
292
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
293
.out_rri = tgen_divsi,
294
};
295
296
+static const TCGOutOpDivRem outop_divs2 = {
297
+ .base.static_constraint = C_NotImplemented,
298
+};
299
+
300
static void tgen_divu_rJ(TCGContext *s, TCGType type,
301
TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
302
{
303
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
304
index XXXXXXX..XXXXXXX 100644
305
--- a/tcg/tci/tcg-target.c.inc
306
+++ b/tcg/tci/tcg-target.c.inc
307
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divs = {
308
.out_rrr = tgen_divs,
309
};
310
311
+static const TCGOutOpDivRem outop_divs2 = {
312
+ .base.static_constraint = C_NotImplemented,
313
+};
314
+
315
static void tgen_divu(TCGContext *s, TCGType type,
316
TCGReg a0, TCGReg a1, TCGReg a2)
317
{
318
--
319
2.43.0
320
321
diff view generated by jsdifflib
Deleted patch
1
Rename to INDEX_op_divs2 to emphasize signed inputs,
2
and mirroring INDEX_op_divu2_*. Document the opcode.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-opc.h | 3 +--
8
tcg/tcg-op.c | 16 ++++++++--------
9
tcg/tcg.c | 6 ++----
10
docs/devel/tcg-ops.rst | 9 +++++++++
11
4 files changed, 20 insertions(+), 14 deletions(-)
12
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(divs, 1, 2, 0, TCG_OPF_INT)
21
+DEF(divs2, 2, 3, 0, TCG_OPF_INT)
22
DEF(divu, 1, 2, 0, TCG_OPF_INT)
23
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
24
DEF(mul, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(st_i32, 0, 2, 1, 0)
26
/* arith */
27
DEF(rem_i32, 1, 2, 0, 0)
28
DEF(remu_i32, 1, 2, 0, 0)
29
-DEF(div2_i32, 2, 3, 0, 0)
30
DEF(divu2_i32, 2, 3, 0, 0)
31
/* shifts/rotates */
32
DEF(shl_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st_i64, 0, 2, 1, 0)
34
/* arith */
35
DEF(rem_i64, 1, 2, 0, 0)
36
DEF(remu_i64, 1, 2, 0, 0)
37
-DEF(div2_i64, 2, 3, 0, 0)
38
DEF(divu2_i64, 2, 3, 0, 0)
39
/* shifts/rotates */
40
DEF(shl_i64, 1, 2, 0, 0)
41
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/tcg-op.c
44
+++ b/tcg/tcg-op.c
45
@@ -XXX,XX +XXX,XX @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
46
{
47
if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
48
tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2);
49
- } else if (TCG_TARGET_HAS_div2_i32) {
50
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) {
51
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
52
tcg_gen_sari_i32(t0, arg1, 31);
53
- tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2);
54
+ tcg_gen_op5_i32(INDEX_op_divs2, ret, t0, arg1, t0, arg2);
55
tcg_temp_free_i32(t0);
56
} else {
57
gen_helper_div_i32(ret, arg1, arg2);
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
59
tcg_gen_mul_i32(t0, t0, arg2);
60
tcg_gen_sub_i32(ret, arg1, t0);
61
tcg_temp_free_i32(t0);
62
- } else if (TCG_TARGET_HAS_div2_i32) {
63
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I32, 0)) {
64
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
65
tcg_gen_sari_i32(t0, arg1, 31);
66
- tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2);
67
+ tcg_gen_op5_i32(INDEX_op_divs2, t0, ret, arg1, t0, arg2);
68
tcg_temp_free_i32(t0);
69
} else {
70
gen_helper_rem_i32(ret, arg1, arg2);
71
@@ -XXX,XX +XXX,XX @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
72
{
73
if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
74
tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2);
75
- } else if (TCG_TARGET_HAS_div2_i64) {
76
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) {
77
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
78
tcg_gen_sari_i64(t0, arg1, 63);
79
- tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2);
80
+ tcg_gen_op5_i64(INDEX_op_divs2, ret, t0, arg1, t0, arg2);
81
tcg_temp_free_i64(t0);
82
} else {
83
gen_helper_div_i64(ret, arg1, arg2);
84
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
85
tcg_gen_mul_i64(t0, t0, arg2);
86
tcg_gen_sub_i64(ret, arg1, t0);
87
tcg_temp_free_i64(t0);
88
- } else if (TCG_TARGET_HAS_div2_i64) {
89
+ } else if (tcg_op_supported(INDEX_op_divs2, TCG_TYPE_I64, 0)) {
90
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
91
tcg_gen_sari_i64(t0, arg1, 63);
92
- tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2);
93
+ tcg_gen_op5_i64(INDEX_op_divs2, t0, ret, arg1, t0, arg2);
94
tcg_temp_free_i64(t0);
95
} else {
96
gen_helper_rem_i64(ret, arg1, arg2);
97
diff --git a/tcg/tcg.c b/tcg/tcg.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/tcg/tcg.c
100
+++ b/tcg/tcg.c
101
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
102
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
103
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
104
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
105
- OUTOP(INDEX_op_div2_i32, TCGOutOpDivRem, outop_divs2),
106
- OUTOP(INDEX_op_div2_i64, TCGOutOpDivRem, outop_divs2),
107
+ OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
108
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
109
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
110
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
111
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
112
}
113
break;
114
115
- case INDEX_op_div2_i32:
116
- case INDEX_op_div2_i64:
117
+ case INDEX_op_divs2:
118
{
119
const TCGOutOpDivRem *out =
120
container_of(all_outop[op->opc], TCGOutOpDivRem, base);
121
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
122
index XXXXXXX..XXXXXXX 100644
123
--- a/docs/devel/tcg-ops.rst
124
+++ b/docs/devel/tcg-ops.rst
125
@@ -XXX,XX +XXX,XX @@ Arithmetic
126
- | *t0* = *t1* % *t2* (unsigned)
127
| Undefined behavior if division by zero.
128
129
+ * - divs2 *q*, *r*, *nl*, *nh*, *d*
130
+
131
+ - | *q* = *nh:nl* / *d* (signed)
132
+ | *r* = *nh:nl* % *d*
133
+ | Undefined behaviour if division by zero, or the double-word
134
+ numerator divided by the single-word divisor does not fit
135
+ within the single-word quotient. The code generator will
136
+ pass *nh* as a simple sign-extension of *nl*, so the only
137
+ overflow should be *INT_MIN* / -1.
138
139
Logical
140
-------
141
--
142
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/i386/tcg-target-has.h | 2 --
5
tcg/loongarch64/tcg-target-has.h | 2 --
6
tcg/riscv/tcg-target-has.h | 2 --
7
tcg/s390x/tcg-target-has.h | 2 --
8
tcg/tcg-has.h | 7 -------
9
tcg/tcg-op.c | 8 +++----
10
tcg/tcg.c | 8 +++----
11
tcg/aarch64/tcg-target.c.inc | 4 ++++
12
tcg/arm/tcg-target.c.inc | 4 ++++
13
tcg/i386/tcg-target.c.inc | 20 +++++++++++-------
14
tcg/loongarch64/tcg-target.c.inc | 4 ++++
15
tcg/mips/tcg-target.c.inc | 4 ++++
16
tcg/ppc/tcg-target.c.inc | 4 ++++
17
tcg/riscv/tcg-target.c.inc | 4 ++++
18
tcg/s390x/tcg-target.c.inc | 36 +++++++++++++++-----------------
19
tcg/sparc64/tcg-target.c.inc | 4 ++++
20
tcg/tci/tcg-target.c.inc | 4 ++++
21
17 files changed, 69 insertions(+), 50 deletions(-)
22
1
23
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/i386/tcg-target-has.h
26
+++ b/tcg/i386/tcg-target-has.h
27
@@ -XXX,XX +XXX,XX @@
28
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
29
30
/* optional instructions */
31
-#define TCG_TARGET_HAS_div2_i32 1
32
#define TCG_TARGET_HAS_rot_i32 1
33
#define TCG_TARGET_HAS_bswap16_i32 1
34
#define TCG_TARGET_HAS_bswap32_i32 1
35
@@ -XXX,XX +XXX,XX @@
36
#if TCG_TARGET_REG_BITS == 64
37
/* Keep 32-bit values zero-extended in a register. */
38
#define TCG_TARGET_HAS_extr_i64_i32 1
39
-#define TCG_TARGET_HAS_div2_i64 1
40
#define TCG_TARGET_HAS_rot_i64 1
41
#define TCG_TARGET_HAS_bswap16_i64 1
42
#define TCG_TARGET_HAS_bswap32_i64 1
43
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/loongarch64/tcg-target-has.h
46
+++ b/tcg/loongarch64/tcg-target-has.h
47
@@ -XXX,XX +XXX,XX @@
48
/* optional instructions */
49
#define TCG_TARGET_HAS_negsetcond_i32 0
50
#define TCG_TARGET_HAS_rem_i32 1
51
-#define TCG_TARGET_HAS_div2_i32 0
52
#define TCG_TARGET_HAS_rot_i32 1
53
#define TCG_TARGET_HAS_extract2_i32 0
54
#define TCG_TARGET_HAS_add2_i32 0
55
@@ -XXX,XX +XXX,XX @@
56
/* 64-bit operations */
57
#define TCG_TARGET_HAS_negsetcond_i64 0
58
#define TCG_TARGET_HAS_rem_i64 1
59
-#define TCG_TARGET_HAS_div2_i64 0
60
#define TCG_TARGET_HAS_rot_i64 1
61
#define TCG_TARGET_HAS_extract2_i64 0
62
#define TCG_TARGET_HAS_extr_i64_i32 1
63
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/riscv/tcg-target-has.h
66
+++ b/tcg/riscv/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
/* optional instructions */
69
#define TCG_TARGET_HAS_negsetcond_i32 1
70
#define TCG_TARGET_HAS_rem_i32 1
71
-#define TCG_TARGET_HAS_div2_i32 0
72
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
73
#define TCG_TARGET_HAS_extract2_i32 0
74
#define TCG_TARGET_HAS_add2_i32 1
75
@@ -XXX,XX +XXX,XX @@
76
77
#define TCG_TARGET_HAS_negsetcond_i64 1
78
#define TCG_TARGET_HAS_rem_i64 1
79
-#define TCG_TARGET_HAS_div2_i64 0
80
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
81
#define TCG_TARGET_HAS_extract2_i64 0
82
#define TCG_TARGET_HAS_extr_i64_i32 1
83
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/s390x/tcg-target-has.h
86
+++ b/tcg/s390x/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
88
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
89
90
/* optional instructions */
91
-#define TCG_TARGET_HAS_div2_i32 1
92
#define TCG_TARGET_HAS_rot_i32 1
93
#define TCG_TARGET_HAS_bswap16_i32 1
94
#define TCG_TARGET_HAS_bswap32_i32 1
95
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
96
#define TCG_TARGET_HAS_extr_i64_i32 0
97
#define TCG_TARGET_HAS_qemu_st8_i32 0
98
99
-#define TCG_TARGET_HAS_div2_i64 1
100
#define TCG_TARGET_HAS_rot_i64 1
101
#define TCG_TARGET_HAS_bswap16_i64 1
102
#define TCG_TARGET_HAS_bswap32_i64 1
103
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/tcg-has.h
106
+++ b/tcg/tcg-has.h
107
@@ -XXX,XX +XXX,XX @@
108
#define TCG_TARGET_HAS_extr_i64_i32 0
109
#define TCG_TARGET_HAS_div_i64 0
110
#define TCG_TARGET_HAS_rem_i64 0
111
-#define TCG_TARGET_HAS_div2_i64 0
112
#define TCG_TARGET_HAS_rot_i64 0
113
#define TCG_TARGET_HAS_bswap16_i64 0
114
#define TCG_TARGET_HAS_bswap32_i64 0
115
@@ -XXX,XX +XXX,XX @@
116
#define TCG_TARGET_HAS_sub2_i32 1
117
#endif
118
119
-#ifndef TCG_TARGET_HAS_div2_i32
120
-#define TCG_TARGET_HAS_div2_i32 0
121
-#endif
122
-#ifndef TCG_TARGET_HAS_div2_i64
123
-#define TCG_TARGET_HAS_div2_i64 0
124
-#endif
125
#ifndef TCG_TARGET_HAS_rem_i32
126
#define TCG_TARGET_HAS_rem_i32 0
127
#endif
128
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/tcg-op.c
131
+++ b/tcg/tcg-op.c
132
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
133
{
134
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
135
tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2);
136
- } else if (TCG_TARGET_HAS_div2_i32) {
137
+ } else if (tcg_op_supported(INDEX_op_divu2_i32, TCG_TYPE_I32, 0)) {
138
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
139
TCGv_i32 zero = tcg_constant_i32(0);
140
tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2);
141
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
142
tcg_gen_mul_i32(t0, t0, arg2);
143
tcg_gen_sub_i32(ret, arg1, t0);
144
tcg_temp_free_i32(t0);
145
- } else if (TCG_TARGET_HAS_div2_i32) {
146
+ } else if (tcg_op_supported(INDEX_op_divu2_i32, TCG_TYPE_I32, 0)) {
147
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
148
TCGv_i32 zero = tcg_constant_i32(0);
149
tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2);
150
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
151
{
152
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
153
tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2);
154
- } else if (TCG_TARGET_HAS_div2_i64) {
155
+ } else if (tcg_op_supported(INDEX_op_divu2_i64, TCG_TYPE_I64, 0)) {
156
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
157
TCGv_i64 zero = tcg_constant_i64(0);
158
tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2);
159
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
160
tcg_gen_mul_i64(t0, t0, arg2);
161
tcg_gen_sub_i64(ret, arg1, t0);
162
tcg_temp_free_i64(t0);
163
- } else if (TCG_TARGET_HAS_div2_i64) {
164
+ } else if (tcg_op_supported(INDEX_op_divu2_i64, TCG_TYPE_I64, 0)) {
165
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
166
TCGv_i64 zero = tcg_constant_i64(0);
167
tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2);
168
diff --git a/tcg/tcg.c b/tcg/tcg.c
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/tcg.c
171
+++ b/tcg/tcg.c
172
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
173
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
174
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
175
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
176
+ OUTOP(INDEX_op_divu2_i32, TCGOutOpDivRem, outop_divu2),
177
+ OUTOP(INDEX_op_divu2_i64, TCGOutOpDivRem, outop_divu2),
178
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
179
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
180
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
181
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
182
case INDEX_op_rem_i32:
183
case INDEX_op_remu_i32:
184
return TCG_TARGET_HAS_rem_i32;
185
- case INDEX_op_divu2_i32:
186
- return TCG_TARGET_HAS_div2_i32;
187
case INDEX_op_rotl_i32:
188
case INDEX_op_rotr_i32:
189
return TCG_TARGET_HAS_rot_i32;
190
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
191
case INDEX_op_rem_i64:
192
case INDEX_op_remu_i64:
193
return TCG_TARGET_HAS_rem_i64;
194
- case INDEX_op_divu2_i64:
195
- return TCG_TARGET_HAS_div2_i64;
196
case INDEX_op_rotl_i64:
197
case INDEX_op_rotr_i64:
198
return TCG_TARGET_HAS_rot_i64;
199
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
200
break;
201
202
case INDEX_op_divs2:
203
+ case INDEX_op_divu2_i32:
204
+ case INDEX_op_divu2_i64:
205
{
206
const TCGOutOpDivRem *out =
207
container_of(all_outop[op->opc], TCGOutOpDivRem, base);
208
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tcg/aarch64/tcg-target.c.inc
211
+++ b/tcg/aarch64/tcg-target.c.inc
212
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
213
.out_rrr = tgen_divu,
214
};
215
216
+static const TCGOutOpDivRem outop_divu2 = {
217
+ .base.static_constraint = C_NotImplemented,
218
+};
219
+
220
static void tgen_eqv(TCGContext *s, TCGType type,
221
TCGReg a0, TCGReg a1, TCGReg a2)
222
{
223
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
224
index XXXXXXX..XXXXXXX 100644
225
--- a/tcg/arm/tcg-target.c.inc
226
+++ b/tcg/arm/tcg-target.c.inc
227
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
228
.out_rrr = tgen_divu,
229
};
230
231
+static const TCGOutOpDivRem outop_divu2 = {
232
+ .base.static_constraint = C_NotImplemented,
233
+};
234
+
235
static const TCGOutOpBinary outop_eqv = {
236
.base.static_constraint = C_NotImplemented,
237
};
238
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
239
index XXXXXXX..XXXXXXX 100644
240
--- a/tcg/i386/tcg-target.c.inc
241
+++ b/tcg/i386/tcg-target.c.inc
242
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
243
.base.static_constraint = C_NotImplemented,
244
};
245
246
+static void tgen_divu2(TCGContext *s, TCGType type,
247
+ TCGReg a0, TCGReg a1, TCGReg a4)
248
+{
249
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
250
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, a4);
251
+}
252
+
253
+static const TCGOutOpDivRem outop_divu2 = {
254
+ .base.static_constraint = C_O2_I3(a, d, 0, 1, r),
255
+ .out_rr01r = tgen_divu2,
256
+};
257
+
258
static const TCGOutOpBinary outop_eqv = {
259
.base.static_constraint = C_NotImplemented,
260
};
261
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
262
}
263
break;
264
265
- OP_32_64(divu2):
266
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
267
- break;
268
-
269
OP_32_64(shl):
270
/* For small constant 3-operand shift, use LEA. */
271
if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
272
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
273
case INDEX_op_movcond_i64:
274
return C_O1_I4(r, r, reT, r, 0);
275
276
- case INDEX_op_divu2_i32:
277
- case INDEX_op_divu2_i64:
278
- return C_O2_I3(a, d, 0, 1, r);
279
-
280
case INDEX_op_mulu2_i32:
281
case INDEX_op_mulu2_i64:
282
case INDEX_op_muls2_i32:
283
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
284
index XXXXXXX..XXXXXXX 100644
285
--- a/tcg/loongarch64/tcg-target.c.inc
286
+++ b/tcg/loongarch64/tcg-target.c.inc
287
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
288
.out_rrr = tgen_divu,
289
};
290
291
+static const TCGOutOpDivRem outop_divu2 = {
292
+ .base.static_constraint = C_NotImplemented,
293
+};
294
+
295
static const TCGOutOpBinary outop_eqv = {
296
.base.static_constraint = C_NotImplemented,
297
};
298
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
299
index XXXXXXX..XXXXXXX 100644
300
--- a/tcg/mips/tcg-target.c.inc
301
+++ b/tcg/mips/tcg-target.c.inc
302
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
303
.out_rrr = tgen_divu,
304
};
305
306
+static const TCGOutOpDivRem outop_divu2 = {
307
+ .base.static_constraint = C_NotImplemented,
308
+};
309
+
310
static const TCGOutOpBinary outop_eqv = {
311
.base.static_constraint = C_NotImplemented,
312
};
313
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
314
index XXXXXXX..XXXXXXX 100644
315
--- a/tcg/ppc/tcg-target.c.inc
316
+++ b/tcg/ppc/tcg-target.c.inc
317
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
318
.out_rrr = tgen_divu,
319
};
320
321
+static const TCGOutOpDivRem outop_divu2 = {
322
+ .base.static_constraint = C_NotImplemented,
323
+};
324
+
325
static const TCGOutOpBinary outop_eqv = {
326
.base.static_constraint = C_O1_I2(r, r, r),
327
.out_rrr = tgen_eqv,
328
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
329
index XXXXXXX..XXXXXXX 100644
330
--- a/tcg/riscv/tcg-target.c.inc
331
+++ b/tcg/riscv/tcg-target.c.inc
332
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
333
.out_rrr = tgen_divu,
334
};
335
336
+static const TCGOutOpDivRem outop_divu2 = {
337
+ .base.static_constraint = C_NotImplemented,
338
+};
339
+
340
static void tgen_eqv(TCGContext *s, TCGType type,
341
TCGReg a0, TCGReg a1, TCGReg a2)
342
{
343
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/s390x/tcg-target.c.inc
346
+++ b/tcg/s390x/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
348
.base.static_constraint = C_NotImplemented,
349
};
350
351
+static void tgen_divu2(TCGContext *s, TCGType type,
352
+ TCGReg a0, TCGReg a1, TCGReg a4)
353
+{
354
+ tcg_debug_assert((a1 & 1) == 0);
355
+ tcg_debug_assert(a0 == a1 + 1);
356
+ if (type == TCG_TYPE_I32) {
357
+ tcg_out_insn(s, RRE, DLR, a1, a4);
358
+ } else {
359
+ tcg_out_insn(s, RRE, DLGR, a1, a4);
360
+ }
361
+}
362
+
363
+static const TCGOutOpDivRem outop_divu2 = {
364
+ .base.static_constraint = C_O2_I3(o, m, 0, 1, r),
365
+ .out_rr01r = tgen_divu2,
366
+};
367
+
368
static void tgen_eqv(TCGContext *s, TCGType type,
369
TCGReg a0, TCGReg a1, TCGReg a2)
370
{
371
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
372
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
373
break;
374
375
- case INDEX_op_divu2_i32:
376
- tcg_debug_assert(args[0] == args[2]);
377
- tcg_debug_assert(args[1] == args[3]);
378
- tcg_debug_assert((args[1] & 1) == 0);
379
- tcg_debug_assert(args[0] == args[1] + 1);
380
- tcg_out_insn(s, RRE, DLR, args[1], args[4]);
381
- break;
382
-
383
case INDEX_op_shl_i32:
384
op = RS_SLL;
385
op2 = RSY_SLLK;
386
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
387
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
388
break;
389
390
- case INDEX_op_divu2_i64:
391
- tcg_debug_assert(args[0] == args[2]);
392
- tcg_debug_assert(args[1] == args[3]);
393
- tcg_debug_assert((args[1] & 1) == 0);
394
- tcg_debug_assert(args[0] == args[1] + 1);
395
- tcg_out_insn(s, RRE, DLGR, args[1], args[4]);
396
- break;
397
case INDEX_op_mulu2_i64:
398
tcg_debug_assert(args[0] == args[2]);
399
tcg_debug_assert((args[1] & 1) == 0);
400
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
401
case INDEX_op_movcond_i64:
402
return C_O1_I4(r, r, rC, rI, r);
403
404
- case INDEX_op_divu2_i32:
405
- case INDEX_op_divu2_i64:
406
- return C_O2_I3(o, m, 0, 1, r);
407
-
408
case INDEX_op_mulu2_i64:
409
return C_O2_I2(o, m, 0, r);
410
case INDEX_op_muls2_i64:
411
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
412
index XXXXXXX..XXXXXXX 100644
413
--- a/tcg/sparc64/tcg-target.c.inc
414
+++ b/tcg/sparc64/tcg-target.c.inc
415
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
416
.out_rri = tgen_divui,
417
};
418
419
+static const TCGOutOpDivRem outop_divu2 = {
420
+ .base.static_constraint = C_NotImplemented,
421
+};
422
+
423
static const TCGOutOpBinary outop_eqv = {
424
.base.static_constraint = C_NotImplemented,
425
};
426
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
427
index XXXXXXX..XXXXXXX 100644
428
--- a/tcg/tci/tcg-target.c.inc
429
+++ b/tcg/tci/tcg-target.c.inc
430
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_divu = {
431
.out_rrr = tgen_divu,
432
};
433
434
+static const TCGOutOpDivRem outop_divu2 = {
435
+ .base.static_constraint = C_NotImplemented,
436
+};
437
+
438
static void tgen_eqv(TCGContext *s, TCGType type,
439
TCGReg a0, TCGReg a1, TCGReg a2)
440
{
441
--
442
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/tcg-op.c | 16 ++++++++--------
6
tcg/tcg.c | 6 ++----
7
docs/devel/tcg-ops.rst | 10 ++++++++++
8
4 files changed, 21 insertions(+), 14 deletions(-)
9
1
10
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg-opc.h
13
+++ b/include/tcg/tcg-opc.h
14
@@ -XXX,XX +XXX,XX @@ DEF(andc, 1, 2, 0, TCG_OPF_INT)
15
DEF(divs, 1, 2, 0, TCG_OPF_INT)
16
DEF(divs2, 2, 3, 0, TCG_OPF_INT)
17
DEF(divu, 1, 2, 0, TCG_OPF_INT)
18
+DEF(divu2, 2, 3, 0, TCG_OPF_INT)
19
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
20
DEF(mul, 1, 2, 0, TCG_OPF_INT)
21
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
22
@@ -XXX,XX +XXX,XX @@ DEF(st_i32, 0, 2, 1, 0)
23
/* arith */
24
DEF(rem_i32, 1, 2, 0, 0)
25
DEF(remu_i32, 1, 2, 0, 0)
26
-DEF(divu2_i32, 2, 3, 0, 0)
27
/* shifts/rotates */
28
DEF(shl_i32, 1, 2, 0, 0)
29
DEF(shr_i32, 1, 2, 0, 0)
30
@@ -XXX,XX +XXX,XX @@ DEF(st_i64, 0, 2, 1, 0)
31
/* arith */
32
DEF(rem_i64, 1, 2, 0, 0)
33
DEF(remu_i64, 1, 2, 0, 0)
34
-DEF(divu2_i64, 2, 3, 0, 0)
35
/* shifts/rotates */
36
DEF(shl_i64, 1, 2, 0, 0)
37
DEF(shr_i64, 1, 2, 0, 0)
38
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/tcg-op.c
41
+++ b/tcg/tcg-op.c
42
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
43
{
44
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
45
tcg_gen_op3_i32(INDEX_op_divu, ret, arg1, arg2);
46
- } else if (tcg_op_supported(INDEX_op_divu2_i32, TCG_TYPE_I32, 0)) {
47
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) {
48
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
49
TCGv_i32 zero = tcg_constant_i32(0);
50
- tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2);
51
+ tcg_gen_op5_i32(INDEX_op_divu2, ret, t0, arg1, zero, arg2);
52
tcg_temp_free_i32(t0);
53
} else {
54
gen_helper_divu_i32(ret, arg1, arg2);
55
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
56
tcg_gen_mul_i32(t0, t0, arg2);
57
tcg_gen_sub_i32(ret, arg1, t0);
58
tcg_temp_free_i32(t0);
59
- } else if (tcg_op_supported(INDEX_op_divu2_i32, TCG_TYPE_I32, 0)) {
60
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I32, 0)) {
61
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
62
TCGv_i32 zero = tcg_constant_i32(0);
63
- tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2);
64
+ tcg_gen_op5_i32(INDEX_op_divu2, t0, ret, arg1, zero, arg2);
65
tcg_temp_free_i32(t0);
66
} else {
67
gen_helper_remu_i32(ret, arg1, arg2);
68
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
69
{
70
if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
71
tcg_gen_op3_i64(INDEX_op_divu, ret, arg1, arg2);
72
- } else if (tcg_op_supported(INDEX_op_divu2_i64, TCG_TYPE_I64, 0)) {
73
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) {
74
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
75
TCGv_i64 zero = tcg_constant_i64(0);
76
- tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2);
77
+ tcg_gen_op5_i64(INDEX_op_divu2, ret, t0, arg1, zero, arg2);
78
tcg_temp_free_i64(t0);
79
} else {
80
gen_helper_divu_i64(ret, arg1, arg2);
81
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
82
tcg_gen_mul_i64(t0, t0, arg2);
83
tcg_gen_sub_i64(ret, arg1, t0);
84
tcg_temp_free_i64(t0);
85
- } else if (tcg_op_supported(INDEX_op_divu2_i64, TCG_TYPE_I64, 0)) {
86
+ } else if (tcg_op_supported(INDEX_op_divu2, TCG_TYPE_I64, 0)) {
87
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
88
TCGv_i64 zero = tcg_constant_i64(0);
89
- tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2);
90
+ tcg_gen_op5_i64(INDEX_op_divu2, t0, ret, arg1, zero, arg2);
91
tcg_temp_free_i64(t0);
92
} else {
93
gen_helper_remu_i64(ret, arg1, arg2);
94
diff --git a/tcg/tcg.c b/tcg/tcg.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/tcg/tcg.c
97
+++ b/tcg/tcg.c
98
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
99
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
100
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
101
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
102
- OUTOP(INDEX_op_divu2_i32, TCGOutOpDivRem, outop_divu2),
103
- OUTOP(INDEX_op_divu2_i64, TCGOutOpDivRem, outop_divu2),
104
+ OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
105
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
106
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
107
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
108
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
109
break;
110
111
case INDEX_op_divs2:
112
- case INDEX_op_divu2_i32:
113
- case INDEX_op_divu2_i64:
114
+ case INDEX_op_divu2:
115
{
116
const TCGOutOpDivRem *out =
117
container_of(all_outop[op->opc], TCGOutOpDivRem, base);
118
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
119
index XXXXXXX..XXXXXXX 100644
120
--- a/docs/devel/tcg-ops.rst
121
+++ b/docs/devel/tcg-ops.rst
122
@@ -XXX,XX +XXX,XX @@ Arithmetic
123
pass *nh* as a simple sign-extension of *nl*, so the only
124
overflow should be *INT_MIN* / -1.
125
126
+ * - divu2 *q*, *r*, *nl*, *nh*, *d*
127
+
128
+ - | *q* = *nh:nl* / *d* (unsigned)
129
+ | *r* = *nh:nl* % *d*
130
+ | Undefined behaviour if division by zero, or the double-word
131
+ numerator divided by the single-word divisor does not fit
132
+ within the single-word quotient. The code generator will
133
+ pass 0 to *nh* to make a simple zero-extension of *nl*,
134
+ so overflow should never occur.
135
+
136
Logical
137
-------
138
139
--
140
2.43.0
141
142
diff view generated by jsdifflib
Deleted patch
1
For TCI, we're losing type information in the interpreter.
2
Introduce a tci-specific opcode to handle the difference.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 6 ++++--
8
tcg/tci.c | 4 ++--
9
tcg/aarch64/tcg-target.c.inc | 19 ++++++++++------
10
tcg/arm/tcg-target.c.inc | 4 ++++
11
tcg/i386/tcg-target.c.inc | 4 ++++
12
tcg/loongarch64/tcg-target.c.inc | 24 +++++++++++++--------
13
tcg/mips/tcg-target.c.inc | 37 ++++++++++++++++++--------------
14
tcg/ppc/tcg-target.c.inc | 27 +++++++++++++++--------
15
tcg/riscv/tcg-target.c.inc | 21 ++++++++++--------
16
tcg/s390x/tcg-target.c.inc | 4 ++++
17
tcg/sparc64/tcg-target.c.inc | 4 ++++
18
tcg/tci/tcg-target-opc.h.inc | 1 +
19
tcg/tci/tcg-target.c.inc | 17 ++++++++++++---
20
13 files changed, 115 insertions(+), 57 deletions(-)
21
22
diff --git a/tcg/tcg.c b/tcg/tcg.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/tcg.c
25
+++ b/tcg/tcg.c
26
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
27
OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
28
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
29
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
30
+ OUTOP(INDEX_op_rem_i32, TCGOutOpBinary, outop_rems),
31
+ OUTOP(INDEX_op_rem_i64, TCGOutOpBinary, outop_rems),
32
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
33
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
34
};
35
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
36
37
case INDEX_op_negsetcond_i32:
38
return TCG_TARGET_HAS_negsetcond_i32;
39
- case INDEX_op_rem_i32:
40
case INDEX_op_remu_i32:
41
return TCG_TARGET_HAS_rem_i32;
42
case INDEX_op_rotl_i32:
43
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
44
45
case INDEX_op_negsetcond_i64:
46
return TCG_TARGET_HAS_negsetcond_i64;
47
- case INDEX_op_rem_i64:
48
case INDEX_op_remu_i64:
49
return TCG_TARGET_HAS_rem_i64;
50
case INDEX_op_rotl_i64:
51
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
52
case INDEX_op_nor:
53
case INDEX_op_or:
54
case INDEX_op_orc:
55
+ case INDEX_op_rem_i32:
56
+ case INDEX_op_rem_i64:
57
case INDEX_op_xor:
58
{
59
const TCGOutOpBinary *out =
60
diff --git a/tcg/tci.c b/tcg/tci.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/tci.c
63
+++ b/tcg/tci.c
64
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
65
tci_args_rrr(insn, &r0, &r1, &r2);
66
regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
67
break;
68
- case INDEX_op_rem_i32:
69
+ case INDEX_op_tci_rems32:
70
tci_args_rrr(insn, &r0, &r1, &r2);
71
regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
72
break;
73
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
74
case INDEX_op_orc:
75
case INDEX_op_sub:
76
case INDEX_op_xor:
77
- case INDEX_op_rem_i32:
78
case INDEX_op_rem_i64:
79
case INDEX_op_remu_i32:
80
case INDEX_op_remu_i64:
81
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
82
case INDEX_op_ctz_i64:
83
case INDEX_op_tci_divs32:
84
case INDEX_op_tci_divu32:
85
+ case INDEX_op_tci_rems32:
86
tci_args_rrr(insn, &r0, &r1, &r2);
87
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
88
op_name, str_r(r0), str_r(r1), str_r(r2));
89
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/aarch64/tcg-target.c.inc
92
+++ b/tcg/aarch64/tcg-target.c.inc
93
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
94
.out_rrr = tgen_orc,
95
};
96
97
+static void tgen_rems(TCGContext *s, TCGType type,
98
+ TCGReg a0, TCGReg a1, TCGReg a2)
99
+{
100
+ tcg_out_insn(s, 3508, SDIV, type, TCG_REG_TMP0, a1, a2);
101
+ tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
102
+}
103
+
104
+static const TCGOutOpBinary outop_rems = {
105
+ .base.static_constraint = C_O1_I2(r, r, r),
106
+ .out_rrr = tgen_rems,
107
+};
108
+
109
static void tgen_sub(TCGContext *s, TCGType type,
110
TCGReg a0, TCGReg a1, TCGReg a2)
111
{
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
113
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
114
break;
115
116
- case INDEX_op_rem_i64:
117
- case INDEX_op_rem_i32:
118
- tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP0, a1, a2);
119
- tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
120
- break;
121
case INDEX_op_remu_i64:
122
case INDEX_op_remu_i32:
123
tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2);
124
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
125
case INDEX_op_negsetcond_i64:
126
return C_O1_I2(r, r, rC);
127
128
- case INDEX_op_rem_i32:
129
- case INDEX_op_rem_i64:
130
case INDEX_op_remu_i32:
131
case INDEX_op_remu_i64:
132
return C_O1_I2(r, r, r);
133
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/arm/tcg-target.c.inc
136
+++ b/tcg/arm/tcg-target.c.inc
137
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
138
.base.static_constraint = C_NotImplemented,
139
};
140
141
+static const TCGOutOpBinary outop_rems = {
142
+ .base.static_constraint = C_NotImplemented,
143
+};
144
+
145
static void tgen_sub(TCGContext *s, TCGType type,
146
TCGReg a0, TCGReg a1, TCGReg a2)
147
{
148
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
149
index XXXXXXX..XXXXXXX 100644
150
--- a/tcg/i386/tcg-target.c.inc
151
+++ b/tcg/i386/tcg-target.c.inc
152
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
153
.base.static_constraint = C_NotImplemented,
154
};
155
156
+static const TCGOutOpBinary outop_rems = {
157
+ .base.static_constraint = C_NotImplemented,
158
+};
159
+
160
static void tgen_sub(TCGContext *s, TCGType type,
161
TCGReg a0, TCGReg a1, TCGReg a2)
162
{
163
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/loongarch64/tcg-target.c.inc
166
+++ b/tcg/loongarch64/tcg-target.c.inc
167
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
168
.out_rrr = tgen_orc,
169
};
170
171
+static void tgen_rems(TCGContext *s, TCGType type,
172
+ TCGReg a0, TCGReg a1, TCGReg a2)
173
+{
174
+ if (type == TCG_TYPE_I32) {
175
+ tcg_out_opc_mod_w(s, a0, a1, a2);
176
+ } else {
177
+ tcg_out_opc_mod_d(s, a0, a1, a2);
178
+ }
179
+}
180
+
181
+static const TCGOutOpBinary outop_rems = {
182
+ .base.static_constraint = C_O1_I2(r, r, r),
183
+ .out_rrr = tgen_rems,
184
+};
185
+
186
static void tgen_sub(TCGContext *s, TCGType type,
187
TCGReg a0, TCGReg a1, TCGReg a2)
188
{
189
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
190
}
191
break;
192
193
- case INDEX_op_rem_i32:
194
- tcg_out_opc_mod_w(s, a0, a1, a2);
195
- break;
196
- case INDEX_op_rem_i64:
197
- tcg_out_opc_mod_d(s, a0, a1, a2);
198
- break;
199
-
200
case INDEX_op_remu_i32:
201
tcg_out_opc_mod_wu(s, a0, a1, a2);
202
break;
203
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
204
case INDEX_op_setcond_i64:
205
return C_O1_I2(r, rz, rJ);
206
207
- case INDEX_op_rem_i32:
208
- case INDEX_op_rem_i64:
209
case INDEX_op_remu_i32:
210
case INDEX_op_remu_i64:
211
return C_O1_I2(r, rz, rz);
212
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/mips/tcg-target.c.inc
215
+++ b/tcg/mips/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
217
.base.static_constraint = C_NotImplemented,
218
};
219
220
+static void tgen_rems(TCGContext *s, TCGType type,
221
+ TCGReg a0, TCGReg a1, TCGReg a2)
222
+{
223
+ if (use_mips32r6_instructions) {
224
+ if (type == TCG_TYPE_I32) {
225
+ tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
226
+ } else {
227
+ tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
228
+ }
229
+ } else {
230
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIV : OPC_DDIV;
231
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
232
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
233
+ }
234
+}
235
+
236
+static const TCGOutOpBinary outop_rems = {
237
+ .base.static_constraint = C_O1_I2(r, r, r),
238
+ .out_rrr = tgen_rems,
239
+};
240
+
241
static void tgen_sub(TCGContext *s, TCGType type,
242
TCGReg a0, TCGReg a1, TCGReg a2)
243
{
244
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
245
tcg_out_ldst(s, i1, a0, a1, a2);
246
break;
247
248
- case INDEX_op_rem_i32:
249
- if (use_mips32r6_instructions) {
250
- tcg_out_opc_reg(s, OPC_MOD, a0, a1, a2);
251
- break;
252
- }
253
- i1 = OPC_DIV, i2 = OPC_MFHI;
254
- goto do_hilo1;
255
case INDEX_op_remu_i32:
256
if (use_mips32r6_instructions) {
257
tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
258
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
259
}
260
i1 = OPC_DIVU, i2 = OPC_MFHI;
261
goto do_hilo1;
262
- case INDEX_op_rem_i64:
263
- if (use_mips32r6_instructions) {
264
- tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
265
- break;
266
- }
267
- i1 = OPC_DDIV, i2 = OPC_MFHI;
268
- goto do_hilo1;
269
case INDEX_op_remu_i64:
270
if (use_mips32r6_instructions) {
271
tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
272
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
273
case INDEX_op_st_i64:
274
return C_O0_I2(rz, r);
275
276
- case INDEX_op_rem_i32:
277
case INDEX_op_remu_i32:
278
case INDEX_op_setcond_i32:
279
- case INDEX_op_rem_i64:
280
case INDEX_op_remu_i64:
281
case INDEX_op_setcond_i64:
282
return C_O1_I2(r, rz, rz);
283
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
284
index XXXXXXX..XXXXXXX 100644
285
--- a/tcg/ppc/tcg-target.c.inc
286
+++ b/tcg/ppc/tcg-target.c.inc
287
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
288
.out_rrr = tgen_orc,
289
};
290
291
+static TCGConstraintSetIndex cset_mod(TCGType type, unsigned flags)
292
+{
293
+ return have_isa_3_00 ? C_O1_I2(r, r, r) : C_NotImplemented;
294
+}
295
+
296
+static void tgen_rems(TCGContext *s, TCGType type,
297
+ TCGReg a0, TCGReg a1, TCGReg a2)
298
+{
299
+ uint32_t insn = type == TCG_TYPE_I32 ? MODSW : MODSD;
300
+ tcg_out32(s, insn | TAB(a0, a1, a2));
301
+}
302
+
303
+static const TCGOutOpBinary outop_rems = {
304
+ .base.static_constraint = C_Dynamic,
305
+ .base.dynamic_constraint = cset_mod,
306
+ .out_rrr = tgen_rems,
307
+};
308
+
309
static void tgen_sub(TCGContext *s, TCGType type,
310
TCGReg a0, TCGReg a1, TCGReg a2)
311
{
312
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
313
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
314
break;
315
316
- case INDEX_op_rem_i32:
317
- tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
318
- break;
319
-
320
case INDEX_op_remu_i32:
321
tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
322
break;
323
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
324
}
325
break;
326
327
- case INDEX_op_rem_i64:
328
- tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
329
- break;
330
case INDEX_op_remu_i64:
331
tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
332
break;
333
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
334
case INDEX_op_rotr_i64:
335
return C_O1_I2(r, r, ri);
336
337
- case INDEX_op_rem_i32:
338
case INDEX_op_remu_i32:
339
- case INDEX_op_rem_i64:
340
case INDEX_op_remu_i64:
341
return C_O1_I2(r, r, r);
342
343
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/riscv/tcg-target.c.inc
346
+++ b/tcg/riscv/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
348
.out_rrr = tgen_orc,
349
};
350
351
+static void tgen_rems(TCGContext *s, TCGType type,
352
+ TCGReg a0, TCGReg a1, TCGReg a2)
353
+{
354
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMW : OPC_REM;
355
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
356
+}
357
+
358
+static const TCGOutOpBinary outop_rems = {
359
+ .base.static_constraint = C_O1_I2(r, r, r),
360
+ .out_rrr = tgen_rems,
361
+};
362
+
363
static void tgen_sub(TCGContext *s, TCGType type,
364
TCGReg a0, TCGReg a1, TCGReg a2)
365
{
366
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
367
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
368
break;
369
370
- case INDEX_op_rem_i32:
371
- tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
372
- break;
373
- case INDEX_op_rem_i64:
374
- tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
375
- break;
376
-
377
case INDEX_op_remu_i32:
378
tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
379
break;
380
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
381
case INDEX_op_negsetcond_i64:
382
return C_O1_I2(r, r, rI);
383
384
- case INDEX_op_rem_i32:
385
case INDEX_op_remu_i32:
386
- case INDEX_op_rem_i64:
387
case INDEX_op_remu_i64:
388
return C_O1_I2(r, rz, rz);
389
390
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
391
index XXXXXXX..XXXXXXX 100644
392
--- a/tcg/s390x/tcg-target.c.inc
393
+++ b/tcg/s390x/tcg-target.c.inc
394
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
395
.out_rrr = tgen_orc,
396
};
397
398
+static const TCGOutOpBinary outop_rems = {
399
+ .base.static_constraint = C_NotImplemented,
400
+};
401
+
402
static void tgen_sub(TCGContext *s, TCGType type,
403
TCGReg a0, TCGReg a1, TCGReg a2)
404
{
405
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
406
index XXXXXXX..XXXXXXX 100644
407
--- a/tcg/sparc64/tcg-target.c.inc
408
+++ b/tcg/sparc64/tcg-target.c.inc
409
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
410
.out_rrr = tgen_orc,
411
};
412
413
+static const TCGOutOpBinary outop_rems = {
414
+ .base.static_constraint = C_NotImplemented,
415
+};
416
+
417
static void tgen_sub(TCGContext *s, TCGType type,
418
TCGReg a0, TCGReg a1, TCGReg a2)
419
{
420
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
421
index XXXXXXX..XXXXXXX 100644
422
--- a/tcg/tci/tcg-target-opc.h.inc
423
+++ b/tcg/tci/tcg-target-opc.h.inc
424
@@ -XXX,XX +XXX,XX @@ DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
425
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
426
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
427
DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
428
+DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
429
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
430
index XXXXXXX..XXXXXXX 100644
431
--- a/tcg/tci/tcg-target.c.inc
432
+++ b/tcg/tci/tcg-target.c.inc
433
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
434
case INDEX_op_st_i64:
435
return C_O0_I2(r, r);
436
437
- case INDEX_op_rem_i32:
438
- case INDEX_op_rem_i64:
439
case INDEX_op_remu_i32:
440
case INDEX_op_remu_i64:
441
case INDEX_op_shl_i32:
442
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_orc = {
443
.out_rrr = tgen_orc,
444
};
445
446
+static void tgen_rems(TCGContext *s, TCGType type,
447
+ TCGReg a0, TCGReg a1, TCGReg a2)
448
+{
449
+ TCGOpcode opc = (type == TCG_TYPE_I32
450
+ ? INDEX_op_tci_rems32
451
+ : INDEX_op_rem_i64);
452
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
453
+}
454
+
455
+static const TCGOutOpBinary outop_rems = {
456
+ .base.static_constraint = C_O1_I2(r, r, r),
457
+ .out_rrr = tgen_rems,
458
+};
459
+
460
static void tgen_sub(TCGContext *s, TCGType type,
461
TCGReg a0, TCGReg a1, TCGReg a2)
462
{
463
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
464
CASE_32_64(sar)
465
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
466
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
467
- CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
468
CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
469
CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
470
CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
471
--
472
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Rename to INDEX_op_rems to emphasize signed inputs,
2
and mirroring INDEX_op_remu_*.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-opc.h | 3 +--
8
tcg/optimize.c | 12 +++++++-----
9
tcg/tcg-op.c | 8 ++++----
10
tcg/tcg.c | 6 ++----
11
tcg/tci.c | 4 ++--
12
docs/devel/tcg-ops.rst | 2 +-
13
tcg/tci/tcg-target.c.inc | 2 +-
14
7 files changed, 18 insertions(+), 19 deletions(-)
15
16
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/tcg/tcg-opc.h
19
+++ b/include/tcg/tcg-opc.h
20
@@ -XXX,XX +XXX,XX @@ DEF(nor, 1, 2, 0, TCG_OPF_INT)
21
DEF(not, 1, 1, 0, TCG_OPF_INT)
22
DEF(or, 1, 2, 0, TCG_OPF_INT)
23
DEF(orc, 1, 2, 0, TCG_OPF_INT)
24
+DEF(rems, 1, 2, 0, TCG_OPF_INT)
25
DEF(sub, 1, 2, 0, TCG_OPF_INT)
26
DEF(xor, 1, 2, 0, TCG_OPF_INT)
27
28
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
29
DEF(st16_i32, 0, 2, 1, 0)
30
DEF(st_i32, 0, 2, 1, 0)
31
/* arith */
32
-DEF(rem_i32, 1, 2, 0, 0)
33
DEF(remu_i32, 1, 2, 0, 0)
34
/* shifts/rotates */
35
DEF(shl_i32, 1, 2, 0, 0)
36
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
37
DEF(st32_i64, 0, 2, 1, 0)
38
DEF(st_i64, 0, 2, 1, 0)
39
/* arith */
40
-DEF(rem_i64, 1, 2, 0, 0)
41
DEF(remu_i64, 1, 2, 0, 0)
42
/* shifts/rotates */
43
DEF(shl_i64, 1, 2, 0, 0)
44
diff --git a/tcg/optimize.c b/tcg/optimize.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/optimize.c
47
+++ b/tcg/optimize.c
48
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
49
}
50
return (uint64_t)x / ((uint64_t)y ? : 1);
51
52
- case INDEX_op_rem_i32:
53
- return (int32_t)x % ((int32_t)y ? : 1);
54
+ case INDEX_op_rems:
55
+ if (type == TCG_TYPE_I32) {
56
+ return (int32_t)x % ((int32_t)y ? : 1);
57
+ }
58
+ return (int64_t)x % ((int64_t)y ? : 1);
59
+
60
case INDEX_op_remu_i32:
61
return (uint32_t)x % ((uint32_t)y ? : 1);
62
- case INDEX_op_rem_i64:
63
- return (int64_t)x % ((int64_t)y ? : 1);
64
case INDEX_op_remu_i64:
65
return (uint64_t)x % ((uint64_t)y ? : 1);
66
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
case INDEX_op_qemu_st_i128:
69
done = fold_qemu_st(&ctx, op);
70
break;
71
- CASE_OP_32_64(rem):
72
+ case INDEX_op_rems:
73
CASE_OP_32_64(remu):
74
done = fold_remainder(&ctx, op);
75
break;
76
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tcg-op.c
79
+++ b/tcg/tcg-op.c
80
@@ -XXX,XX +XXX,XX @@ void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
81
82
void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
83
{
84
- if (TCG_TARGET_HAS_rem_i32) {
85
- tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2);
86
+ if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I32, 0)) {
87
+ tcg_gen_op3_i32(INDEX_op_rems, ret, arg1, arg2);
88
} else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) {
89
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
90
tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2);
91
@@ -XXX,XX +XXX,XX @@ void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
92
93
void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
94
{
95
- if (TCG_TARGET_HAS_rem_i64) {
96
- tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2);
97
+ if (tcg_op_supported(INDEX_op_rems, TCG_TYPE_I64, 0)) {
98
+ tcg_gen_op3_i64(INDEX_op_rems, ret, arg1, arg2);
99
} else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) {
100
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
101
tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2);
102
diff --git a/tcg/tcg.c b/tcg/tcg.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/tcg.c
105
+++ b/tcg/tcg.c
106
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
107
OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
108
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
109
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
110
- OUTOP(INDEX_op_rem_i32, TCGOutOpBinary, outop_rems),
111
- OUTOP(INDEX_op_rem_i64, TCGOutOpBinary, outop_rems),
112
+ OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
113
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
114
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
115
};
116
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
117
case INDEX_op_nor:
118
case INDEX_op_or:
119
case INDEX_op_orc:
120
- case INDEX_op_rem_i32:
121
- case INDEX_op_rem_i64:
122
+ case INDEX_op_rems:
123
case INDEX_op_xor:
124
{
125
const TCGOutOpBinary *out =
126
diff --git a/tcg/tci.c b/tcg/tci.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/tci.c
129
+++ b/tcg/tci.c
130
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
131
tci_args_rrr(insn, &r0, &r1, &r2);
132
regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
133
break;
134
- case INDEX_op_rem_i64:
135
+ case INDEX_op_rems:
136
tci_args_rrr(insn, &r0, &r1, &r2);
137
regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
138
break;
139
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
140
case INDEX_op_nor:
141
case INDEX_op_or:
142
case INDEX_op_orc:
143
+ case INDEX_op_rems:
144
case INDEX_op_sub:
145
case INDEX_op_xor:
146
- case INDEX_op_rem_i64:
147
case INDEX_op_remu_i32:
148
case INDEX_op_remu_i64:
149
case INDEX_op_shl_i32:
150
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
151
index XXXXXXX..XXXXXXX 100644
152
--- a/docs/devel/tcg-ops.rst
153
+++ b/docs/devel/tcg-ops.rst
154
@@ -XXX,XX +XXX,XX @@ Arithmetic
155
- | *t0* = *t1* / *t2* (unsigned)
156
| Undefined behavior if division by zero.
157
158
- * - rem_i32/i64 *t0*, *t1*, *t2*
159
+ * - rems *t0*, *t1*, *t2*
160
161
- | *t0* = *t1* % *t2* (signed)
162
| Undefined behavior if division by zero or overflow.
163
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/tci/tcg-target.c.inc
166
+++ b/tcg/tci/tcg-target.c.inc
167
@@ -XXX,XX +XXX,XX @@ static void tgen_rems(TCGContext *s, TCGType type,
168
{
169
TCGOpcode opc = (type == TCG_TYPE_I32
170
? INDEX_op_tci_rems32
171
- : INDEX_op_rem_i64);
172
+ : INDEX_op_rems);
173
tcg_out_op_rrr(s, opc, a0, a1, a2);
174
}
175
176
--
177
2.43.0
diff view generated by jsdifflib
Deleted patch
1
For TCI, we're losing type information in the interpreter.
2
Introduce a tci-specific opcode to handle the difference.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-has.h | 2 --
8
tcg/arm/tcg-target-has.h | 1 -
9
tcg/loongarch64/tcg-target-con-set.h | 1 -
10
tcg/loongarch64/tcg-target-has.h | 2 --
11
tcg/mips/tcg-target-has.h | 2 --
12
tcg/ppc/tcg-target-has.h | 2 --
13
tcg/riscv/tcg-target-con-set.h | 1 -
14
tcg/riscv/tcg-target-has.h | 2 --
15
tcg/sparc64/tcg-target-has.h | 2 --
16
tcg/tcg-has.h | 9 ------
17
tcg/tci/tcg-target-has.h | 2 --
18
tcg/tcg-op.c | 4 +--
19
tcg/tcg.c | 8 +++---
20
tcg/tci.c | 4 +--
21
tcg/aarch64/tcg-target.c.inc | 22 ++++++++-------
22
tcg/arm/tcg-target.c.inc | 4 +++
23
tcg/i386/tcg-target.c.inc | 4 +++
24
tcg/loongarch64/tcg-target.c.inc | 26 ++++++++++--------
25
tcg/mips/tcg-target.c.inc | 41 ++++++++++++++--------------
26
tcg/ppc/tcg-target.c.inc | 25 +++++++++--------
27
tcg/riscv/tcg-target.c.inc | 23 ++++++++--------
28
tcg/s390x/tcg-target.c.inc | 4 +++
29
tcg/sparc64/tcg-target.c.inc | 4 +++
30
tcg/tci/tcg-target-opc.h.inc | 1 +
31
tcg/tci/tcg-target.c.inc | 17 ++++++++++--
32
25 files changed, 112 insertions(+), 101 deletions(-)
33
34
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tcg/aarch64/tcg-target-has.h
37
+++ b/tcg/aarch64/tcg-target-has.h
38
@@ -XXX,XX +XXX,XX @@
39
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
40
41
/* optional instructions */
42
-#define TCG_TARGET_HAS_rem_i32 1
43
#define TCG_TARGET_HAS_bswap16_i32 1
44
#define TCG_TARGET_HAS_bswap32_i32 1
45
#define TCG_TARGET_HAS_rot_i32 1
46
@@ -XXX,XX +XXX,XX @@
47
#define TCG_TARGET_HAS_extr_i64_i32 0
48
#define TCG_TARGET_HAS_qemu_st8_i32 0
49
50
-#define TCG_TARGET_HAS_rem_i64 1
51
#define TCG_TARGET_HAS_bswap16_i64 1
52
#define TCG_TARGET_HAS_bswap32_i64 1
53
#define TCG_TARGET_HAS_bswap64_i64 1
54
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/arm/tcg-target-has.h
57
+++ b/tcg/arm/tcg-target-has.h
58
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
59
#define TCG_TARGET_HAS_negsetcond_i32 1
60
#define TCG_TARGET_HAS_mulu2_i32 1
61
#define TCG_TARGET_HAS_muls2_i32 1
62
-#define TCG_TARGET_HAS_rem_i32 0
63
#define TCG_TARGET_HAS_qemu_st8_i32 0
64
65
#define TCG_TARGET_HAS_qemu_ldst_i128 0
66
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/loongarch64/tcg-target-con-set.h
69
+++ b/tcg/loongarch64/tcg-target-con-set.h
70
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rW)
71
C_O1_I2(r, 0, rz)
72
C_O1_I2(r, rz, ri)
73
C_O1_I2(r, rz, rJ)
74
-C_O1_I2(r, rz, rz)
75
C_O1_I2(w, w, w)
76
C_O1_I2(w, w, wM)
77
C_O1_I2(w, w, wA)
78
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
79
index XXXXXXX..XXXXXXX 100644
80
--- a/tcg/loongarch64/tcg-target-has.h
81
+++ b/tcg/loongarch64/tcg-target-has.h
82
@@ -XXX,XX +XXX,XX @@
83
84
/* optional instructions */
85
#define TCG_TARGET_HAS_negsetcond_i32 0
86
-#define TCG_TARGET_HAS_rem_i32 1
87
#define TCG_TARGET_HAS_rot_i32 1
88
#define TCG_TARGET_HAS_extract2_i32 0
89
#define TCG_TARGET_HAS_add2_i32 0
90
@@ -XXX,XX +XXX,XX @@
91
92
/* 64-bit operations */
93
#define TCG_TARGET_HAS_negsetcond_i64 0
94
-#define TCG_TARGET_HAS_rem_i64 1
95
#define TCG_TARGET_HAS_rot_i64 1
96
#define TCG_TARGET_HAS_extract2_i64 0
97
#define TCG_TARGET_HAS_extr_i64_i32 1
98
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tcg/mips/tcg-target-has.h
101
+++ b/tcg/mips/tcg-target-has.h
102
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
103
#endif
104
105
/* optional instructions */
106
-#define TCG_TARGET_HAS_rem_i32 1
107
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
108
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
109
#define TCG_TARGET_HAS_bswap16_i32 1
110
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
111
#define TCG_TARGET_HAS_add2_i32 0
112
#define TCG_TARGET_HAS_sub2_i32 0
113
#define TCG_TARGET_HAS_extr_i64_i32 1
114
-#define TCG_TARGET_HAS_rem_i64 1
115
#define TCG_TARGET_HAS_add2_i64 0
116
#define TCG_TARGET_HAS_sub2_i64 0
117
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
118
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/ppc/tcg-target-has.h
121
+++ b/tcg/ppc/tcg-target-has.h
122
@@ -XXX,XX +XXX,XX @@
123
#define have_vsx (cpuinfo & CPUINFO_VSX)
124
125
/* optional instructions */
126
-#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
127
#define TCG_TARGET_HAS_rot_i32 1
128
#define TCG_TARGET_HAS_bswap16_i32 1
129
#define TCG_TARGET_HAS_bswap32_i32 1
130
@@ -XXX,XX +XXX,XX @@
131
#define TCG_TARGET_HAS_add2_i32 0
132
#define TCG_TARGET_HAS_sub2_i32 0
133
#define TCG_TARGET_HAS_extr_i64_i32 0
134
-#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
135
#define TCG_TARGET_HAS_rot_i64 1
136
#define TCG_TARGET_HAS_bswap16_i64 1
137
#define TCG_TARGET_HAS_bswap32_i64 1
138
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/riscv/tcg-target-con-set.h
141
+++ b/tcg/riscv/tcg-target-con-set.h
142
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
143
C_O1_I2(r, r, r)
144
C_O1_I2(r, r, ri)
145
C_O1_I2(r, r, rI)
146
-C_O1_I2(r, rz, rz)
147
C_N1_I2(r, r, rM)
148
C_O1_I4(r, r, rI, rM, rM)
149
C_O2_I4(r, r, rz, rz, rM, rM)
150
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/riscv/tcg-target-has.h
153
+++ b/tcg/riscv/tcg-target-has.h
154
@@ -XXX,XX +XXX,XX @@
155
156
/* optional instructions */
157
#define TCG_TARGET_HAS_negsetcond_i32 1
158
-#define TCG_TARGET_HAS_rem_i32 1
159
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
160
#define TCG_TARGET_HAS_extract2_i32 0
161
#define TCG_TARGET_HAS_add2_i32 1
162
@@ -XXX,XX +XXX,XX @@
163
#define TCG_TARGET_HAS_qemu_st8_i32 0
164
165
#define TCG_TARGET_HAS_negsetcond_i64 1
166
-#define TCG_TARGET_HAS_rem_i64 1
167
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
168
#define TCG_TARGET_HAS_extract2_i64 0
169
#define TCG_TARGET_HAS_extr_i64_i32 1
170
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tcg/sparc64/tcg-target-has.h
173
+++ b/tcg/sparc64/tcg-target-has.h
174
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
175
#endif
176
177
/* optional instructions */
178
-#define TCG_TARGET_HAS_rem_i32        0
179
#define TCG_TARGET_HAS_rot_i32 0
180
#define TCG_TARGET_HAS_bswap16_i32 0
181
#define TCG_TARGET_HAS_bswap32_i32 0
182
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
183
#define TCG_TARGET_HAS_qemu_st8_i32 0
184
185
#define TCG_TARGET_HAS_extr_i64_i32 0
186
-#define TCG_TARGET_HAS_rem_i64 0
187
#define TCG_TARGET_HAS_rot_i64 0
188
#define TCG_TARGET_HAS_bswap16_i64 0
189
#define TCG_TARGET_HAS_bswap32_i64 0
190
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/tcg-has.h
193
+++ b/tcg/tcg-has.h
194
@@ -XXX,XX +XXX,XX @@
195
#if TCG_TARGET_REG_BITS == 32
196
/* Turn some undef macros into false macros. */
197
#define TCG_TARGET_HAS_extr_i64_i32 0
198
-#define TCG_TARGET_HAS_div_i64 0
199
-#define TCG_TARGET_HAS_rem_i64 0
200
#define TCG_TARGET_HAS_rot_i64 0
201
#define TCG_TARGET_HAS_bswap16_i64 0
202
#define TCG_TARGET_HAS_bswap32_i64 0
203
@@ -XXX,XX +XXX,XX @@
204
#define TCG_TARGET_HAS_sub2_i32 1
205
#endif
206
207
-#ifndef TCG_TARGET_HAS_rem_i32
208
-#define TCG_TARGET_HAS_rem_i32 0
209
-#endif
210
-#ifndef TCG_TARGET_HAS_rem_i64
211
-#define TCG_TARGET_HAS_rem_i64 0
212
-#endif
213
-
214
#if !defined(TCG_TARGET_HAS_v64) \
215
&& !defined(TCG_TARGET_HAS_v128) \
216
&& !defined(TCG_TARGET_HAS_v256)
217
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
218
index XXXXXXX..XXXXXXX 100644
219
--- a/tcg/tci/tcg-target-has.h
220
+++ b/tcg/tci/tcg-target-has.h
221
@@ -XXX,XX +XXX,XX @@
222
223
#define TCG_TARGET_HAS_bswap16_i32 1
224
#define TCG_TARGET_HAS_bswap32_i32 1
225
-#define TCG_TARGET_HAS_rem_i32 1
226
#define TCG_TARGET_HAS_extract2_i32 0
227
#define TCG_TARGET_HAS_clz_i32 1
228
#define TCG_TARGET_HAS_ctz_i32 1
229
@@ -XXX,XX +XXX,XX @@
230
#define TCG_TARGET_HAS_bswap32_i64 1
231
#define TCG_TARGET_HAS_bswap64_i64 1
232
#define TCG_TARGET_HAS_extract2_i64 0
233
-#define TCG_TARGET_HAS_rem_i64 1
234
#define TCG_TARGET_HAS_clz_i64 1
235
#define TCG_TARGET_HAS_ctz_i64 1
236
#define TCG_TARGET_HAS_ctpop_i64 1
237
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/tcg-op.c
240
+++ b/tcg/tcg-op.c
241
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
242
243
void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
244
{
245
- if (TCG_TARGET_HAS_rem_i32) {
246
+ if (tcg_op_supported(INDEX_op_remu_i32, TCG_TYPE_I32, 0)) {
247
tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
248
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
249
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
250
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
251
252
void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
253
{
254
- if (TCG_TARGET_HAS_rem_i64) {
255
+ if (tcg_op_supported(INDEX_op_remu_i64, TCG_TYPE_I64, 0)) {
256
tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
257
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
258
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
259
diff --git a/tcg/tcg.c b/tcg/tcg.c
260
index XXXXXXX..XXXXXXX 100644
261
--- a/tcg/tcg.c
262
+++ b/tcg/tcg.c
263
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
264
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
265
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
266
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
267
+ OUTOP(INDEX_op_remu_i32, TCGOutOpBinary, outop_remu),
268
+ OUTOP(INDEX_op_remu_i64, TCGOutOpBinary, outop_remu),
269
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
270
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
271
};
272
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
273
274
case INDEX_op_negsetcond_i32:
275
return TCG_TARGET_HAS_negsetcond_i32;
276
- case INDEX_op_remu_i32:
277
- return TCG_TARGET_HAS_rem_i32;
278
case INDEX_op_rotl_i32:
279
case INDEX_op_rotr_i32:
280
return TCG_TARGET_HAS_rot_i32;
281
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
282
283
case INDEX_op_negsetcond_i64:
284
return TCG_TARGET_HAS_negsetcond_i64;
285
- case INDEX_op_remu_i64:
286
- return TCG_TARGET_HAS_rem_i64;
287
case INDEX_op_rotl_i64:
288
case INDEX_op_rotr_i64:
289
return TCG_TARGET_HAS_rot_i64;
290
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
291
case INDEX_op_or:
292
case INDEX_op_orc:
293
case INDEX_op_rems:
294
+ case INDEX_op_remu_i32:
295
+ case INDEX_op_remu_i64:
296
case INDEX_op_xor:
297
{
298
const TCGOutOpBinary *out =
299
diff --git a/tcg/tci.c b/tcg/tci.c
300
index XXXXXXX..XXXXXXX 100644
301
--- a/tcg/tci.c
302
+++ b/tcg/tci.c
303
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
304
tci_args_rrr(insn, &r0, &r1, &r2);
305
regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
306
break;
307
- case INDEX_op_remu_i32:
308
+ case INDEX_op_tci_remu32:
309
tci_args_rrr(insn, &r0, &r1, &r2);
310
regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
311
break;
312
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
313
case INDEX_op_rems:
314
case INDEX_op_sub:
315
case INDEX_op_xor:
316
- case INDEX_op_remu_i32:
317
case INDEX_op_remu_i64:
318
case INDEX_op_shl_i32:
319
case INDEX_op_shl_i64:
320
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
321
case INDEX_op_tci_divs32:
322
case INDEX_op_tci_divu32:
323
case INDEX_op_tci_rems32:
324
+ case INDEX_op_tci_remu32:
325
tci_args_rrr(insn, &r0, &r1, &r2);
326
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
327
op_name, str_r(r0), str_r(r1), str_r(r2));
328
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
329
index XXXXXXX..XXXXXXX 100644
330
--- a/tcg/aarch64/tcg-target.c.inc
331
+++ b/tcg/aarch64/tcg-target.c.inc
332
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
333
.out_rrr = tgen_rems,
334
};
335
336
+static void tgen_remu(TCGContext *s, TCGType type,
337
+ TCGReg a0, TCGReg a1, TCGReg a2)
338
+{
339
+ tcg_out_insn(s, 3508, UDIV, type, TCG_REG_TMP0, a1, a2);
340
+ tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
341
+}
342
+
343
+static const TCGOutOpBinary outop_remu = {
344
+ .base.static_constraint = C_O1_I2(r, r, r),
345
+ .out_rrr = tgen_remu,
346
+};
347
+
348
static void tgen_sub(TCGContext *s, TCGType type,
349
TCGReg a0, TCGReg a1, TCGReg a2)
350
{
351
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
352
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
353
break;
354
355
- case INDEX_op_remu_i64:
356
- case INDEX_op_remu_i32:
357
- tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP0, a1, a2);
358
- tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP0, a2, a1);
359
- break;
360
-
361
case INDEX_op_shl_i64:
362
case INDEX_op_shl_i32:
363
if (c2) {
364
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
365
case INDEX_op_negsetcond_i64:
366
return C_O1_I2(r, r, rC);
367
368
- case INDEX_op_remu_i32:
369
- case INDEX_op_remu_i64:
370
- return C_O1_I2(r, r, r);
371
-
372
case INDEX_op_shl_i32:
373
case INDEX_op_shr_i32:
374
case INDEX_op_sar_i32:
375
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
376
index XXXXXXX..XXXXXXX 100644
377
--- a/tcg/arm/tcg-target.c.inc
378
+++ b/tcg/arm/tcg-target.c.inc
379
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
380
.base.static_constraint = C_NotImplemented,
381
};
382
383
+static const TCGOutOpBinary outop_remu = {
384
+ .base.static_constraint = C_NotImplemented,
385
+};
386
+
387
static void tgen_sub(TCGContext *s, TCGType type,
388
TCGReg a0, TCGReg a1, TCGReg a2)
389
{
390
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
391
index XXXXXXX..XXXXXXX 100644
392
--- a/tcg/i386/tcg-target.c.inc
393
+++ b/tcg/i386/tcg-target.c.inc
394
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
395
.base.static_constraint = C_NotImplemented,
396
};
397
398
+static const TCGOutOpBinary outop_remu = {
399
+ .base.static_constraint = C_NotImplemented,
400
+};
401
+
402
static void tgen_sub(TCGContext *s, TCGType type,
403
TCGReg a0, TCGReg a1, TCGReg a2)
404
{
405
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
406
index XXXXXXX..XXXXXXX 100644
407
--- a/tcg/loongarch64/tcg-target.c.inc
408
+++ b/tcg/loongarch64/tcg-target.c.inc
409
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
410
.out_rrr = tgen_rems,
411
};
412
413
+static void tgen_remu(TCGContext *s, TCGType type,
414
+ TCGReg a0, TCGReg a1, TCGReg a2)
415
+{
416
+ if (type == TCG_TYPE_I32) {
417
+ tcg_out_opc_mod_wu(s, a0, a1, a2);
418
+ } else {
419
+ tcg_out_opc_mod_du(s, a0, a1, a2);
420
+ }
421
+}
422
+
423
+static const TCGOutOpBinary outop_remu = {
424
+ .base.static_constraint = C_O1_I2(r, r, r),
425
+ .out_rrr = tgen_remu,
426
+};
427
+
428
static void tgen_sub(TCGContext *s, TCGType type,
429
TCGReg a0, TCGReg a1, TCGReg a2)
430
{
431
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
432
}
433
break;
434
435
- case INDEX_op_remu_i32:
436
- tcg_out_opc_mod_wu(s, a0, a1, a2);
437
- break;
438
- case INDEX_op_remu_i64:
439
- tcg_out_opc_mod_du(s, a0, a1, a2);
440
- break;
441
-
442
case INDEX_op_setcond_i32:
443
case INDEX_op_setcond_i64:
444
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
445
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
446
case INDEX_op_setcond_i64:
447
return C_O1_I2(r, rz, rJ);
448
449
- case INDEX_op_remu_i32:
450
- case INDEX_op_remu_i64:
451
- return C_O1_I2(r, rz, rz);
452
-
453
case INDEX_op_movcond_i32:
454
case INDEX_op_movcond_i64:
455
return C_O1_I4(r, rz, rJ, rz, rz);
456
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
457
index XXXXXXX..XXXXXXX 100644
458
--- a/tcg/mips/tcg-target.c.inc
459
+++ b/tcg/mips/tcg-target.c.inc
460
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
461
.out_rrr = tgen_rems,
462
};
463
464
+static void tgen_remu(TCGContext *s, TCGType type,
465
+ TCGReg a0, TCGReg a1, TCGReg a2)
466
+{
467
+ if (use_mips32r6_instructions) {
468
+ if (type == TCG_TYPE_I32) {
469
+ tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
470
+ } else {
471
+ tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
472
+ }
473
+ } else {
474
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_DIVU : OPC_DDIVU;
475
+ tcg_out_opc_reg(s, insn, 0, a1, a2);
476
+ tcg_out_opc_reg(s, OPC_MFHI, a0, 0, 0);
477
+ }
478
+}
479
+
480
+static const TCGOutOpBinary outop_remu = {
481
+ .base.static_constraint = C_O1_I2(r, r, r),
482
+ .out_rrr = tgen_remu,
483
+};
484
+
485
static void tgen_sub(TCGContext *s, TCGType type,
486
TCGReg a0, TCGReg a1, TCGReg a2)
487
{
488
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
489
tcg_out_ldst(s, i1, a0, a1, a2);
490
break;
491
492
- case INDEX_op_remu_i32:
493
- if (use_mips32r6_instructions) {
494
- tcg_out_opc_reg(s, OPC_MODU, a0, a1, a2);
495
- break;
496
- }
497
- i1 = OPC_DIVU, i2 = OPC_MFHI;
498
- goto do_hilo1;
499
- case INDEX_op_remu_i64:
500
- if (use_mips32r6_instructions) {
501
- tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
502
- break;
503
- }
504
- i1 = OPC_DDIVU, i2 = OPC_MFHI;
505
- do_hilo1:
506
- tcg_out_opc_reg(s, i1, 0, a1, a2);
507
- tcg_out_opc_reg(s, i2, a0, 0, 0);
508
- break;
509
-
510
case INDEX_op_muls2_i32:
511
i1 = OPC_MULT;
512
goto do_hilo2;
513
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
514
case INDEX_op_st_i64:
515
return C_O0_I2(rz, r);
516
517
- case INDEX_op_remu_i32:
518
case INDEX_op_setcond_i32:
519
- case INDEX_op_remu_i64:
520
case INDEX_op_setcond_i64:
521
return C_O1_I2(r, rz, rz);
522
case INDEX_op_muls2_i32:
523
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
524
index XXXXXXX..XXXXXXX 100644
525
--- a/tcg/ppc/tcg-target.c.inc
526
+++ b/tcg/ppc/tcg-target.c.inc
527
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
528
.out_rrr = tgen_rems,
529
};
530
531
+static void tgen_remu(TCGContext *s, TCGType type,
532
+ TCGReg a0, TCGReg a1, TCGReg a2)
533
+{
534
+ uint32_t insn = type == TCG_TYPE_I32 ? MODUW : MODUD;
535
+ tcg_out32(s, insn | TAB(a0, a1, a2));
536
+}
537
+
538
+static const TCGOutOpBinary outop_remu = {
539
+ .base.static_constraint = C_Dynamic,
540
+ .base.dynamic_constraint = cset_mod,
541
+ .out_rrr = tgen_remu,
542
+};
543
+
544
static void tgen_sub(TCGContext *s, TCGType type,
545
TCGReg a0, TCGReg a1, TCGReg a2)
546
{
547
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
548
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
549
break;
550
551
- case INDEX_op_remu_i32:
552
- tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
553
- break;
554
-
555
case INDEX_op_shl_i32:
556
if (const_args[2]) {
557
/* Limit immediate shift count lest we create an illegal insn. */
558
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
559
}
560
break;
561
562
- case INDEX_op_remu_i64:
563
- tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
564
- break;
565
-
566
case INDEX_op_qemu_ld_i32:
567
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
568
break;
569
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
570
case INDEX_op_rotr_i64:
571
return C_O1_I2(r, r, ri);
572
573
- case INDEX_op_remu_i32:
574
- case INDEX_op_remu_i64:
575
- return C_O1_I2(r, r, r);
576
-
577
case INDEX_op_clz_i32:
578
case INDEX_op_ctz_i32:
579
case INDEX_op_clz_i64:
580
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
581
index XXXXXXX..XXXXXXX 100644
582
--- a/tcg/riscv/tcg-target.c.inc
583
+++ b/tcg/riscv/tcg-target.c.inc
584
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
585
.out_rrr = tgen_rems,
586
};
587
588
+static void tgen_remu(TCGContext *s, TCGType type,
589
+ TCGReg a0, TCGReg a1, TCGReg a2)
590
+{
591
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMUW : OPC_REMU;
592
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
593
+}
594
+
595
+static const TCGOutOpBinary outop_remu = {
596
+ .base.static_constraint = C_O1_I2(r, r, r),
597
+ .out_rrr = tgen_remu,
598
+};
599
+
600
static void tgen_sub(TCGContext *s, TCGType type,
601
TCGReg a0, TCGReg a1, TCGReg a2)
602
{
603
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
604
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
605
break;
606
607
- case INDEX_op_remu_i32:
608
- tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
609
- break;
610
- case INDEX_op_remu_i64:
611
- tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
612
- break;
613
-
614
case INDEX_op_shl_i32:
615
if (c2) {
616
tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
617
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
618
case INDEX_op_negsetcond_i64:
619
return C_O1_I2(r, r, rI);
620
621
- case INDEX_op_remu_i32:
622
- case INDEX_op_remu_i64:
623
- return C_O1_I2(r, rz, rz);
624
-
625
case INDEX_op_shl_i32:
626
case INDEX_op_shr_i32:
627
case INDEX_op_sar_i32:
628
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
629
index XXXXXXX..XXXXXXX 100644
630
--- a/tcg/s390x/tcg-target.c.inc
631
+++ b/tcg/s390x/tcg-target.c.inc
632
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
633
.base.static_constraint = C_NotImplemented,
634
};
635
636
+static const TCGOutOpBinary outop_remu = {
637
+ .base.static_constraint = C_NotImplemented,
638
+};
639
+
640
static void tgen_sub(TCGContext *s, TCGType type,
641
TCGReg a0, TCGReg a1, TCGReg a2)
642
{
643
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
644
index XXXXXXX..XXXXXXX 100644
645
--- a/tcg/sparc64/tcg-target.c.inc
646
+++ b/tcg/sparc64/tcg-target.c.inc
647
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
648
.base.static_constraint = C_NotImplemented,
649
};
650
651
+static const TCGOutOpBinary outop_remu = {
652
+ .base.static_constraint = C_NotImplemented,
653
+};
654
+
655
static void tgen_sub(TCGContext *s, TCGType type,
656
TCGReg a0, TCGReg a1, TCGReg a2)
657
{
658
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
659
index XXXXXXX..XXXXXXX 100644
660
--- a/tcg/tci/tcg-target-opc.h.inc
661
+++ b/tcg/tci/tcg-target-opc.h.inc
662
@@ -XXX,XX +XXX,XX @@ DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
663
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
664
DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
665
DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
666
+DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
667
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
668
index XXXXXXX..XXXXXXX 100644
669
--- a/tcg/tci/tcg-target.c.inc
670
+++ b/tcg/tci/tcg-target.c.inc
671
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
672
case INDEX_op_st_i64:
673
return C_O0_I2(r, r);
674
675
- case INDEX_op_remu_i32:
676
- case INDEX_op_remu_i64:
677
case INDEX_op_shl_i32:
678
case INDEX_op_shl_i64:
679
case INDEX_op_shr_i32:
680
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_rems = {
681
.out_rrr = tgen_rems,
682
};
683
684
+static void tgen_remu(TCGContext *s, TCGType type,
685
+ TCGReg a0, TCGReg a1, TCGReg a2)
686
+{
687
+ TCGOpcode opc = (type == TCG_TYPE_I32
688
+ ? INDEX_op_tci_remu32
689
+ : INDEX_op_remu_i64);
690
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
691
+}
692
+
693
+static const TCGOutOpBinary outop_remu = {
694
+ .base.static_constraint = C_O1_I2(r, r, r),
695
+ .out_rrr = tgen_remu,
696
+};
697
+
698
static void tgen_sub(TCGContext *s, TCGType type,
699
TCGReg a0, TCGReg a1, TCGReg a2)
700
{
701
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
702
CASE_32_64(sar)
703
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
704
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
705
- CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
706
CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
707
CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
708
tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
709
--
710
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 5 +----
5
tcg/optimize.c | 9 +++++----
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 4 ++--
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 16 insertions(+), 20 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(not, 1, 1, 0, TCG_OPF_INT)
18
DEF(or, 1, 2, 0, TCG_OPF_INT)
19
DEF(orc, 1, 2, 0, TCG_OPF_INT)
20
DEF(rems, 1, 2, 0, TCG_OPF_INT)
21
+DEF(remu, 1, 2, 0, TCG_OPF_INT)
22
DEF(sub, 1, 2, 0, TCG_OPF_INT)
23
DEF(xor, 1, 2, 0, TCG_OPF_INT)
24
25
@@ -XXX,XX +XXX,XX @@ DEF(ld_i32, 1, 1, 1, 0)
26
DEF(st8_i32, 0, 2, 1, 0)
27
DEF(st16_i32, 0, 2, 1, 0)
28
DEF(st_i32, 0, 2, 1, 0)
29
-/* arith */
30
-DEF(remu_i32, 1, 2, 0, 0)
31
/* shifts/rotates */
32
DEF(shl_i32, 1, 2, 0, 0)
33
DEF(shr_i32, 1, 2, 0, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(st8_i64, 0, 2, 1, 0)
35
DEF(st16_i64, 0, 2, 1, 0)
36
DEF(st32_i64, 0, 2, 1, 0)
37
DEF(st_i64, 0, 2, 1, 0)
38
-/* arith */
39
-DEF(remu_i64, 1, 2, 0, 0)
40
/* shifts/rotates */
41
DEF(shl_i64, 1, 2, 0, 0)
42
DEF(shr_i64, 1, 2, 0, 0)
43
diff --git a/tcg/optimize.c b/tcg/optimize.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/optimize.c
46
+++ b/tcg/optimize.c
47
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
48
}
49
return (int64_t)x % ((int64_t)y ? : 1);
50
51
- case INDEX_op_remu_i32:
52
- return (uint32_t)x % ((uint32_t)y ? : 1);
53
- case INDEX_op_remu_i64:
54
+ case INDEX_op_remu:
55
+ if (type == TCG_TYPE_I32) {
56
+ return (uint32_t)x % ((uint32_t)y ? : 1);
57
+ }
58
return (uint64_t)x % ((uint64_t)y ? : 1);
59
60
default:
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
done = fold_qemu_st(&ctx, op);
63
break;
64
case INDEX_op_rems:
65
- CASE_OP_32_64(remu):
66
+ case INDEX_op_remu:
67
done = fold_remainder(&ctx, op);
68
break;
69
CASE_OP_32_64(rotl):
70
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/tcg-op.c
73
+++ b/tcg/tcg-op.c
74
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
75
76
void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
77
{
78
- if (tcg_op_supported(INDEX_op_remu_i32, TCG_TYPE_I32, 0)) {
79
- tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2);
80
+ if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I32, 0)) {
81
+ tcg_gen_op3_i32(INDEX_op_remu, ret, arg1, arg2);
82
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I32, 0)) {
83
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
84
tcg_gen_op3_i32(INDEX_op_divu, t0, arg1, arg2);
85
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
86
87
void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
88
{
89
- if (tcg_op_supported(INDEX_op_remu_i64, TCG_TYPE_I64, 0)) {
90
- tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2);
91
+ if (tcg_op_supported(INDEX_op_remu, TCG_TYPE_I64, 0)) {
92
+ tcg_gen_op3_i64(INDEX_op_remu, ret, arg1, arg2);
93
} else if (tcg_op_supported(INDEX_op_divu, TCG_TYPE_I64, 0)) {
94
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
95
tcg_gen_op3_i64(INDEX_op_divu, t0, arg1, arg2);
96
diff --git a/tcg/tcg.c b/tcg/tcg.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/tcg.c
99
+++ b/tcg/tcg.c
100
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
101
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
102
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
103
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
104
- OUTOP(INDEX_op_remu_i32, TCGOutOpBinary, outop_remu),
105
- OUTOP(INDEX_op_remu_i64, TCGOutOpBinary, outop_remu),
106
+ OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
107
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
108
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
109
};
110
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
111
case INDEX_op_or:
112
case INDEX_op_orc:
113
case INDEX_op_rems:
114
- case INDEX_op_remu_i32:
115
- case INDEX_op_remu_i64:
116
+ case INDEX_op_remu:
117
case INDEX_op_xor:
118
{
119
const TCGOutOpBinary *out =
120
diff --git a/tcg/tci.c b/tcg/tci.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/tcg/tci.c
123
+++ b/tcg/tci.c
124
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
125
tci_args_rrr(insn, &r0, &r1, &r2);
126
regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
127
break;
128
- case INDEX_op_remu_i64:
129
+ case INDEX_op_remu:
130
tci_args_rrr(insn, &r0, &r1, &r2);
131
regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
132
break;
133
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
134
case INDEX_op_or:
135
case INDEX_op_orc:
136
case INDEX_op_rems:
137
+ case INDEX_op_remu:
138
case INDEX_op_sub:
139
case INDEX_op_xor:
140
- case INDEX_op_remu_i64:
141
case INDEX_op_shl_i32:
142
case INDEX_op_shl_i64:
143
case INDEX_op_shr_i32:
144
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
145
index XXXXXXX..XXXXXXX 100644
146
--- a/docs/devel/tcg-ops.rst
147
+++ b/docs/devel/tcg-ops.rst
148
@@ -XXX,XX +XXX,XX @@ Arithmetic
149
- | *t0* = *t1* % *t2* (signed)
150
| Undefined behavior if division by zero or overflow.
151
152
- * - remu_i32/i64 *t0*, *t1*, *t2*
153
+ * - remu *t0*, *t1*, *t2*
154
155
- | *t0* = *t1* % *t2* (unsigned)
156
| Undefined behavior if division by zero.
157
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/tci/tcg-target.c.inc
160
+++ b/tcg/tci/tcg-target.c.inc
161
@@ -XXX,XX +XXX,XX @@ static void tgen_remu(TCGContext *s, TCGType type,
162
{
163
TCGOpcode opc = (type == TCG_TYPE_I32
164
? INDEX_op_tci_remu32
165
- : INDEX_op_remu_i64);
166
+ : INDEX_op_remu);
167
tcg_out_op_rrr(s, opc, a0, a1, a2);
168
}
169
170
--
171
2.43.0
172
173
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 6 ++--
5
tcg/aarch64/tcg-target.c.inc | 38 ++++++++++----------
6
tcg/arm/tcg-target.c.inc | 25 +++++++++----
7
tcg/i386/tcg-target.c.inc | 60 +++++++++++++++++++++++---------
8
tcg/loongarch64/tcg-target.c.inc | 43 ++++++++++++++---------
9
tcg/mips/tcg-target.c.inc | 35 ++++++++++++-------
10
tcg/ppc/tcg-target.c.inc | 42 ++++++++++++----------
11
tcg/riscv/tcg-target.c.inc | 38 +++++++++++---------
12
tcg/s390x/tcg-target.c.inc | 37 ++++++++++++++++----
13
tcg/sparc64/tcg-target.c.inc | 27 ++++++++++----
14
tcg/tci/tcg-target.c.inc | 14 ++++++--
15
11 files changed, 241 insertions(+), 124 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
23
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
24
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
25
+ OUTOP(INDEX_op_shl_i32, TCGOutOpBinary, outop_shl),
26
+ OUTOP(INDEX_op_shl_i64, TCGOutOpBinary, outop_shl),
27
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
28
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
29
};
30
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
31
case INDEX_op_st8_i32:
32
case INDEX_op_st16_i32:
33
case INDEX_op_st_i32:
34
- case INDEX_op_shl_i32:
35
case INDEX_op_shr_i32:
36
case INDEX_op_sar_i32:
37
case INDEX_op_extract_i32:
38
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
39
case INDEX_op_st16_i64:
40
case INDEX_op_st32_i64:
41
case INDEX_op_st_i64:
42
- case INDEX_op_shl_i64:
43
case INDEX_op_shr_i64:
44
case INDEX_op_sar_i64:
45
case INDEX_op_ext_i32_i64:
46
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
47
case INDEX_op_orc:
48
case INDEX_op_rems:
49
case INDEX_op_remu:
50
+ case INDEX_op_shl_i32:
51
+ case INDEX_op_shl_i64:
52
case INDEX_op_xor:
53
{
54
const TCGOutOpBinary *out =
55
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/aarch64/tcg-target.c.inc
58
+++ b/tcg/aarch64/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
60
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
61
}
62
63
-static inline void tcg_out_shl(TCGContext *s, TCGType ext,
64
- TCGReg rd, TCGReg rn, unsigned int m)
65
-{
66
- int bits = ext ? 64 : 32;
67
- int max = bits - 1;
68
- tcg_out_ubfm(s, ext, rd, rn, (bits - m) & max, (max - m) & max);
69
-}
70
-
71
static inline void tcg_out_shr(TCGContext *s, TCGType ext,
72
TCGReg rd, TCGReg rn, unsigned int m)
73
{
74
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
75
.out_rrr = tgen_remu,
76
};
77
78
+static void tgen_shl(TCGContext *s, TCGType type,
79
+ TCGReg a0, TCGReg a1, TCGReg a2)
80
+{
81
+ tcg_out_insn(s, 3508, LSLV, type, a0, a1, a2);
82
+}
83
+
84
+static void tgen_shli(TCGContext *s, TCGType type,
85
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
86
+{
87
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
88
+ tcg_out_ubfm(s, type, a0, a1, -a2 & max, ~a2 & max);
89
+}
90
+
91
+static const TCGOutOpBinary outop_shl = {
92
+ .base.static_constraint = C_O1_I2(r, r, ri),
93
+ .out_rrr = tgen_shl,
94
+ .out_rri = tgen_shli,
95
+};
96
+
97
static void tgen_sub(TCGContext *s, TCGType type,
98
TCGReg a0, TCGReg a1, TCGReg a2)
99
{
100
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
101
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
102
break;
103
104
- case INDEX_op_shl_i64:
105
- case INDEX_op_shl_i32:
106
- if (c2) {
107
- tcg_out_shl(s, ext, a0, a1, a2);
108
- } else {
109
- tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
110
- }
111
- break;
112
-
113
case INDEX_op_shr_i64:
114
case INDEX_op_shr_i32:
115
if (c2) {
116
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
117
case INDEX_op_negsetcond_i64:
118
return C_O1_I2(r, r, rC);
119
120
- case INDEX_op_shl_i32:
121
case INDEX_op_shr_i32:
122
case INDEX_op_sar_i32:
123
case INDEX_op_rotl_i32:
124
case INDEX_op_rotr_i32:
125
- case INDEX_op_shl_i64:
126
case INDEX_op_shr_i64:
127
case INDEX_op_sar_i64:
128
case INDEX_op_rotl_i64:
129
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/arm/tcg-target.c.inc
132
+++ b/tcg/arm/tcg-target.c.inc
133
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
134
.base.static_constraint = C_NotImplemented,
135
};
136
137
+static void tgen_shl(TCGContext *s, TCGType type,
138
+ TCGReg a0, TCGReg a1, TCGReg a2)
139
+{
140
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2));
141
+}
142
+
143
+static void tgen_shli(TCGContext *s, TCGType type,
144
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
145
+{
146
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
147
+ SHIFT_IMM_LSL(a2 & 0x1f));
148
+}
149
+
150
+static const TCGOutOpBinary outop_shl = {
151
+ .base.static_constraint = C_O1_I2(r, r, ri),
152
+ .out_rrr = tgen_shl,
153
+ .out_rri = tgen_shli,
154
+};
155
+
156
static void tgen_sub(TCGContext *s, TCGType type,
157
TCGReg a0, TCGReg a1, TCGReg a2)
158
{
159
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
160
case INDEX_op_muls2_i32:
161
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
162
break;
163
- /* XXX: Perhaps args[2] & 0x1f is wrong */
164
- case INDEX_op_shl_i32:
165
- c = const_args[2] ?
166
- SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
167
- goto gen_shift32;
168
case INDEX_op_shr_i32:
169
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
170
SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
171
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
172
case INDEX_op_muls2_i32:
173
return C_O2_I2(r, r, r, r);
174
175
- case INDEX_op_shl_i32:
176
case INDEX_op_shr_i32:
177
case INDEX_op_sar_i32:
178
case INDEX_op_rotl_i32:
179
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/i386/tcg-target.c.inc
182
+++ b/tcg/i386/tcg-target.c.inc
183
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
184
.base.static_constraint = C_NotImplemented,
185
};
186
187
+static TCGConstraintSetIndex cset_shift(TCGType type, unsigned flags)
188
+{
189
+ return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
190
+}
191
+
192
+static void tgen_shl(TCGContext *s, TCGType type,
193
+ TCGReg a0, TCGReg a1, TCGReg a2)
194
+{
195
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
196
+ if (have_bmi2) {
197
+ tcg_out_vex_modrm(s, OPC_SHLX + rexw, a0, a2, a1);
198
+ } else {
199
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHL, a0);
200
+ }
201
+}
202
+
203
+static void tgen_shli(TCGContext *s, TCGType type,
204
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
205
+{
206
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
207
+
208
+ /* For small constant 3-operand shift, use LEA. */
209
+ if (a0 != a1 && a2 >= 1 && a2 <= 3) {
210
+ if (a2 == 1) {
211
+ /* shl $1,a1,a0 -> lea (a1,a1),a0 */
212
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
213
+ } else {
214
+ /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
215
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
216
+ }
217
+ return;
218
+ }
219
+ tcg_out_mov(s, type, a0, a1);
220
+ tcg_out_shifti(s, SHIFT_SHL + rexw, a0, a2);
221
+}
222
+
223
+static const TCGOutOpBinary outop_shl = {
224
+ .base.static_constraint = C_Dynamic,
225
+ .base.dynamic_constraint = cset_shift,
226
+ .out_rrr = tgen_shl,
227
+ .out_rri = tgen_shli,
228
+};
229
+
230
static void tgen_sub(TCGContext *s, TCGType type,
231
TCGReg a0, TCGReg a1, TCGReg a2)
232
{
233
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
234
}
235
break;
236
237
- OP_32_64(shl):
238
- /* For small constant 3-operand shift, use LEA. */
239
- if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
240
- if (a2 - 1 == 0) {
241
- /* shl $1,a1,a0 -> lea (a1,a1),a0 */
242
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
243
- } else {
244
- /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
245
- tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
246
- }
247
- break;
248
- }
249
- c = SHIFT_SHL;
250
- vexop = OPC_SHLX;
251
- goto gen_shift_maybe_vex;
252
OP_32_64(shr):
253
c = SHIFT_SHR;
254
vexop = OPC_SHRX;
255
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
256
case INDEX_op_st_i64:
257
return C_O0_I2(re, r);
258
259
- case INDEX_op_shl_i32:
260
- case INDEX_op_shl_i64:
261
case INDEX_op_shr_i32:
262
case INDEX_op_shr_i64:
263
case INDEX_op_sar_i32:
264
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/tcg/loongarch64/tcg-target.c.inc
267
+++ b/tcg/loongarch64/tcg-target.c.inc
268
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
269
.out_rrr = tgen_remu,
270
};
271
272
+static void tgen_shl(TCGContext *s, TCGType type,
273
+ TCGReg a0, TCGReg a1, TCGReg a2)
274
+{
275
+ if (type == TCG_TYPE_I32) {
276
+ tcg_out_opc_sll_w(s, a0, a1, a2);
277
+ } else {
278
+ tcg_out_opc_sll_d(s, a0, a1, a2);
279
+ }
280
+}
281
+
282
+static void tgen_shli(TCGContext *s, TCGType type,
283
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
284
+{
285
+ if (type == TCG_TYPE_I32) {
286
+ tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
287
+ } else {
288
+ tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
289
+ }
290
+}
291
+
292
+static const TCGOutOpBinary outop_shl = {
293
+ .base.static_constraint = C_O1_I2(r, r, ri),
294
+ .out_rrr = tgen_shl,
295
+ .out_rri = tgen_shli,
296
+};
297
+
298
static void tgen_sub(TCGContext *s, TCGType type,
299
TCGReg a0, TCGReg a1, TCGReg a2)
300
{
301
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
302
tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
303
break;
304
305
- case INDEX_op_shl_i32:
306
- if (c2) {
307
- tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
308
- } else {
309
- tcg_out_opc_sll_w(s, a0, a1, a2);
310
- }
311
- break;
312
- case INDEX_op_shl_i64:
313
- if (c2) {
314
- tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
315
- } else {
316
- tcg_out_opc_sll_d(s, a0, a1, a2);
317
- }
318
- break;
319
-
320
case INDEX_op_shr_i32:
321
if (c2) {
322
tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
323
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
324
case INDEX_op_qemu_ld_i64:
325
return C_O1_I1(r, r);
326
327
- case INDEX_op_shl_i32:
328
- case INDEX_op_shl_i64:
329
case INDEX_op_shr_i32:
330
case INDEX_op_shr_i64:
331
case INDEX_op_sar_i32:
332
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
333
index XXXXXXX..XXXXXXX 100644
334
--- a/tcg/mips/tcg-target.c.inc
335
+++ b/tcg/mips/tcg-target.c.inc
336
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
337
.out_rrr = tgen_remu,
338
};
339
340
+static void tgen_shl(TCGContext *s, TCGType type,
341
+ TCGReg a0, TCGReg a1, TCGReg a2)
342
+{
343
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SLLV : OPC_DSLLV;
344
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
345
+}
346
+
347
+static void tgen_shli(TCGContext *s, TCGType type,
348
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
349
+{
350
+ if (type == TCG_TYPE_I32) {
351
+ tcg_out_opc_sa(s, OPC_SLL, a0, a1, a2);
352
+ } else {
353
+ tcg_out_dsll(s, a0, a1, a2);
354
+ }
355
+}
356
+
357
+static const TCGOutOpBinary outop_shl = {
358
+ .base.static_constraint = C_O1_I2(r, r, ri),
359
+ .out_rrr = tgen_shl,
360
+ .out_rri = tgen_shli,
361
+};
362
+
363
static void tgen_sub(TCGContext *s, TCGType type,
364
TCGReg a0, TCGReg a1, TCGReg a2)
365
{
366
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
367
case INDEX_op_sar_i32:
368
i1 = OPC_SRAV, i2 = OPC_SRA;
369
goto do_shift;
370
- case INDEX_op_shl_i32:
371
- i1 = OPC_SLLV, i2 = OPC_SLL;
372
- goto do_shift;
373
case INDEX_op_shr_i32:
374
i1 = OPC_SRLV, i2 = OPC_SRL;
375
goto do_shift;
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
377
}
378
i1 = OPC_DSRAV;
379
goto do_shiftv;
380
- case INDEX_op_shl_i64:
381
- if (c2) {
382
- tcg_out_dsll(s, a0, a1, a2);
383
- break;
384
- }
385
- i1 = OPC_DSLLV;
386
- goto do_shiftv;
387
case INDEX_op_shr_i64:
388
if (c2) {
389
tcg_out_dsrl(s, a0, a1, a2);
390
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
391
case INDEX_op_muls2_i64:
392
case INDEX_op_mulu2_i64:
393
return C_O2_I2(r, r, r, r);
394
- case INDEX_op_shl_i32:
395
case INDEX_op_shr_i32:
396
case INDEX_op_sar_i32:
397
case INDEX_op_rotr_i32:
398
case INDEX_op_rotl_i32:
399
- case INDEX_op_shl_i64:
400
case INDEX_op_shr_i64:
401
case INDEX_op_sar_i64:
402
case INDEX_op_rotr_i64:
403
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
404
index XXXXXXX..XXXXXXX 100644
405
--- a/tcg/ppc/tcg-target.c.inc
406
+++ b/tcg/ppc/tcg-target.c.inc
407
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
408
.out_rrr = tgen_remu,
409
};
410
411
+static void tgen_shl(TCGContext *s, TCGType type,
412
+ TCGReg a0, TCGReg a1, TCGReg a2)
413
+{
414
+ uint32_t insn = type == TCG_TYPE_I32 ? SLW : SLD;
415
+ tcg_out32(s, insn | SAB(a1, a0, a2));
416
+}
417
+
418
+static void tgen_shli(TCGContext *s, TCGType type,
419
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
420
+{
421
+ /* Limit immediate shift count lest we create an illegal insn. */
422
+ if (type == TCG_TYPE_I32) {
423
+ tcg_out_shli32(s, a0, a1, a2 & 31);
424
+ } else {
425
+ tcg_out_shli64(s, a0, a1, a2 & 63);
426
+ }
427
+}
428
+
429
+static const TCGOutOpBinary outop_shl = {
430
+ .base.static_constraint = C_O1_I2(r, r, ri),
431
+ .out_rrr = tgen_shl,
432
+ .out_rri = tgen_shli,
433
+};
434
+
435
static void tgen_sub(TCGContext *s, TCGType type,
436
TCGReg a0, TCGReg a1, TCGReg a2)
437
{
438
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
439
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
440
break;
441
442
- case INDEX_op_shl_i32:
443
- if (const_args[2]) {
444
- /* Limit immediate shift count lest we create an illegal insn. */
445
- tcg_out_shli32(s, args[0], args[1], args[2] & 31);
446
- } else {
447
- tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
448
- }
449
- break;
450
case INDEX_op_shr_i32:
451
if (const_args[2]) {
452
/* Limit immediate shift count lest we create an illegal insn. */
453
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
454
tcg_out_brcond2(s, args, const_args);
455
break;
456
457
- case INDEX_op_shl_i64:
458
- if (const_args[2]) {
459
- /* Limit immediate shift count lest we create an illegal insn. */
460
- tcg_out_shli64(s, args[0], args[1], args[2] & 63);
461
- } else {
462
- tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
463
- }
464
- break;
465
case INDEX_op_shr_i64:
466
if (const_args[2]) {
467
/* Limit immediate shift count lest we create an illegal insn. */
468
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
469
case INDEX_op_st_i64:
470
return C_O0_I2(r, r);
471
472
- case INDEX_op_shl_i32:
473
case INDEX_op_shr_i32:
474
case INDEX_op_sar_i32:
475
case INDEX_op_rotl_i32:
476
case INDEX_op_rotr_i32:
477
- case INDEX_op_shl_i64:
478
case INDEX_op_shr_i64:
479
case INDEX_op_sar_i64:
480
case INDEX_op_rotl_i64:
481
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
482
index XXXXXXX..XXXXXXX 100644
483
--- a/tcg/riscv/tcg-target.c.inc
484
+++ b/tcg/riscv/tcg-target.c.inc
485
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
486
.out_rrr = tgen_remu,
487
};
488
489
+static void tgen_shl(TCGContext *s, TCGType type,
490
+ TCGReg a0, TCGReg a1, TCGReg a2)
491
+{
492
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLW : OPC_SLL;
493
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
494
+}
495
+
496
+static void tgen_shli(TCGContext *s, TCGType type,
497
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
498
+{
499
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLIW : OPC_SLLI;
500
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
501
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
502
+}
503
+
504
+static const TCGOutOpBinary outop_shl = {
505
+ .base.static_constraint = C_O1_I2(r, r, ri),
506
+ .out_rrr = tgen_shl,
507
+ .out_rri = tgen_shli,
508
+};
509
+
510
static void tgen_sub(TCGContext *s, TCGType type,
511
TCGReg a0, TCGReg a1, TCGReg a2)
512
{
513
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
514
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
515
break;
516
517
- case INDEX_op_shl_i32:
518
- if (c2) {
519
- tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
520
- } else {
521
- tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
522
- }
523
- break;
524
- case INDEX_op_shl_i64:
525
- if (c2) {
526
- tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
527
- } else {
528
- tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
529
- }
530
- break;
531
-
532
case INDEX_op_shr_i32:
533
if (c2) {
534
tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
535
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
536
case INDEX_op_negsetcond_i64:
537
return C_O1_I2(r, r, rI);
538
539
- case INDEX_op_shl_i32:
540
case INDEX_op_shr_i32:
541
case INDEX_op_sar_i32:
542
case INDEX_op_rotl_i32:
543
case INDEX_op_rotr_i32:
544
- case INDEX_op_shl_i64:
545
case INDEX_op_shr_i64:
546
case INDEX_op_sar_i64:
547
case INDEX_op_rotl_i64:
548
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
549
index XXXXXXX..XXXXXXX 100644
550
--- a/tcg/s390x/tcg-target.c.inc
551
+++ b/tcg/s390x/tcg-target.c.inc
552
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
553
.base.static_constraint = C_NotImplemented,
554
};
555
556
+static void tgen_shl_int(TCGContext *s, TCGType type, TCGReg dst,
557
+ TCGReg src, TCGReg v, tcg_target_long i)
558
+{
559
+ if (type != TCG_TYPE_I32) {
560
+ tcg_out_sh64(s, RSY_SLLG, dst, src, v, i);
561
+ } else if (dst == src) {
562
+ tcg_out_sh32(s, RS_SLL, dst, v, i);
563
+ } else {
564
+ tcg_out_sh64(s, RSY_SLLK, dst, src, v, i);
565
+ }
566
+}
567
+
568
+static void tgen_shl(TCGContext *s, TCGType type,
569
+ TCGReg a0, TCGReg a1, TCGReg a2)
570
+{
571
+ tgen_shl_int(s, type, a0, a1, a2, 0);
572
+}
573
+
574
+static void tgen_shli(TCGContext *s, TCGType type,
575
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
576
+{
577
+ tgen_shl_int(s, type, a0, a1, TCG_REG_NONE, a2);
578
+}
579
+
580
+static const TCGOutOpBinary outop_shl = {
581
+ .base.static_constraint = C_O1_I2(r, r, ri),
582
+ .out_rrr = tgen_shl,
583
+ .out_rri = tgen_shli,
584
+};
585
+
586
static void tgen_sub(TCGContext *s, TCGType type,
587
TCGReg a0, TCGReg a1, TCGReg a2)
588
{
589
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
590
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
591
break;
592
593
- case INDEX_op_shl_i32:
594
- op = RS_SLL;
595
- op2 = RSY_SLLK;
596
do_shift32:
597
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
598
if (a0 == a1) {
599
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
600
tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
601
break;
602
603
- case INDEX_op_shl_i64:
604
- op = RSY_SLLG;
605
do_shift64:
606
if (const_args[2]) {
607
tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
608
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
609
case INDEX_op_st_i64:
610
return C_O0_I2(r, r);
611
612
- case INDEX_op_shl_i64:
613
case INDEX_op_shr_i64:
614
case INDEX_op_sar_i64:
615
case INDEX_op_rotl_i32:
616
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
617
case INDEX_op_clz_i64:
618
return C_O1_I2(r, r, rI);
619
620
- case INDEX_op_shl_i32:
621
case INDEX_op_shr_i32:
622
case INDEX_op_sar_i32:
623
return C_O1_I2(r, r, ri);
624
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
625
index XXXXXXX..XXXXXXX 100644
626
--- a/tcg/sparc64/tcg-target.c.inc
627
+++ b/tcg/sparc64/tcg-target.c.inc
628
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
629
.base.static_constraint = C_NotImplemented,
630
};
631
632
+static void tgen_shl(TCGContext *s, TCGType type,
633
+ TCGReg a0, TCGReg a1, TCGReg a2)
634
+{
635
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
636
+ tcg_out_arith(s, a0, a1, a2, insn);
637
+}
638
+
639
+static void tgen_shli(TCGContext *s, TCGType type,
640
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
641
+{
642
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
643
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
644
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
645
+}
646
+
647
+static const TCGOutOpBinary outop_shl = {
648
+ .base.static_constraint = C_O1_I2(r, r, rJ),
649
+ .out_rrr = tgen_shl,
650
+ .out_rri = tgen_shli,
651
+};
652
+
653
static void tgen_sub(TCGContext *s, TCGType type,
654
TCGReg a0, TCGReg a1, TCGReg a2)
655
{
656
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
657
case INDEX_op_st32_i64:
658
tcg_out_ldst(s, a0, a1, a2, STW);
659
break;
660
- case INDEX_op_shl_i32:
661
- c = SHIFT_SLL;
662
do_shift32:
663
/* Limit immediate shift count lest we create an illegal insn. */
664
tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
665
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
666
case INDEX_op_st_i64:
667
tcg_out_ldst(s, a0, a1, a2, STX);
668
break;
669
- case INDEX_op_shl_i64:
670
- c = SHIFT_SLLX;
671
do_shift64:
672
/* Limit immediate shift count lest we create an illegal insn. */
673
tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
674
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
675
case INDEX_op_qemu_st_i64:
676
return C_O0_I2(rz, r);
677
678
- case INDEX_op_shl_i32:
679
- case INDEX_op_shl_i64:
680
case INDEX_op_shr_i32:
681
case INDEX_op_shr_i64:
682
case INDEX_op_sar_i32:
683
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
684
index XXXXXXX..XXXXXXX 100644
685
--- a/tcg/tci/tcg-target.c.inc
686
+++ b/tcg/tci/tcg-target.c.inc
687
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
688
case INDEX_op_st_i64:
689
return C_O0_I2(r, r);
690
691
- case INDEX_op_shl_i32:
692
- case INDEX_op_shl_i64:
693
case INDEX_op_shr_i32:
694
case INDEX_op_shr_i64:
695
case INDEX_op_sar_i32:
696
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
697
.out_rrr = tgen_remu,
698
};
699
700
+static void tgen_shl(TCGContext *s, TCGType type,
701
+ TCGReg a0, TCGReg a1, TCGReg a2)
702
+{
703
+ tcg_out_op_rrr(s, glue(INDEX_op_shl_i,TCG_TARGET_REG_BITS), a0, a1, a2);
704
+}
705
+
706
+static const TCGOutOpBinary outop_shl = {
707
+ .base.static_constraint = C_O1_I2(r, r, r),
708
+ .out_rrr = tgen_shl,
709
+};
710
+
711
static void tgen_sub(TCGContext *s, TCGType type,
712
TCGReg a0, TCGReg a1, TCGReg a2)
713
{
714
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
715
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
716
break;
717
718
- CASE_32_64(shl)
719
CASE_32_64(shr)
720
CASE_32_64(sar)
721
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
722
--
723
2.43.0
724
725
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 10 +++++-----
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 13 ++++---------
9
docs/devel/tcg-ops.rst | 4 ++--
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 17 insertions(+), 25 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(or, 1, 2, 0, TCG_OPF_INT)
18
DEF(orc, 1, 2, 0, TCG_OPF_INT)
19
DEF(rems, 1, 2, 0, TCG_OPF_INT)
20
DEF(remu, 1, 2, 0, TCG_OPF_INT)
21
+DEF(shl, 1, 2, 0, TCG_OPF_INT)
22
DEF(sub, 1, 2, 0, TCG_OPF_INT)
23
DEF(xor, 1, 2, 0, TCG_OPF_INT)
24
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
-DEF(shl_i32, 1, 2, 0, 0)
30
DEF(shr_i32, 1, 2, 0, 0)
31
DEF(sar_i32, 1, 2, 0, 0)
32
DEF(rotl_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
-DEF(shl_i64, 1, 2, 0, 0)
38
DEF(shr_i64, 1, 2, 0, 0)
39
DEF(sar_i64, 1, 2, 0, 0)
40
DEF(rotl_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
case INDEX_op_xor_vec:
47
return x ^ y;
48
49
- case INDEX_op_shl_i32:
50
- return (uint32_t)x << (y & 31);
51
-
52
- case INDEX_op_shl_i64:
53
+ case INDEX_op_shl:
54
+ if (type == TCG_TYPE_I32) {
55
+ return (uint32_t)x << (y & 31);
56
+ }
57
return (uint64_t)x << (y & 63);
58
59
case INDEX_op_shr_i32:
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(rotl):
62
CASE_OP_32_64(rotr):
63
CASE_OP_32_64(sar):
64
- CASE_OP_32_64(shl):
65
+ case INDEX_op_shl:
66
CASE_OP_32_64(shr):
67
done = fold_shift(&ctx, op);
68
break;
69
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg-op.c
72
+++ b/tcg/tcg-op.c
73
@@ -XXX,XX +XXX,XX @@ void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
74
75
void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
76
{
77
- tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
78
+ tcg_gen_op3_i32(INDEX_op_shl, ret, arg1, arg2);
79
}
80
81
void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
82
@@ -XXX,XX +XXX,XX @@ void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
83
void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
84
{
85
if (TCG_TARGET_REG_BITS == 64) {
86
- tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
87
+ tcg_gen_op3_i64(INDEX_op_shl, ret, arg1, arg2);
88
} else {
89
gen_helper_shl_i64(ret, arg1, arg2);
90
}
91
diff --git a/tcg/tcg.c b/tcg/tcg.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/tcg.c
94
+++ b/tcg/tcg.c
95
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
96
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
97
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
98
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
99
- OUTOP(INDEX_op_shl_i32, TCGOutOpBinary, outop_shl),
100
- OUTOP(INDEX_op_shl_i64, TCGOutOpBinary, outop_shl),
101
+ OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
102
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
103
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
104
};
105
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
106
case INDEX_op_orc:
107
case INDEX_op_rems:
108
case INDEX_op_remu:
109
- case INDEX_op_shl_i32:
110
- case INDEX_op_shl_i64:
111
+ case INDEX_op_shl:
112
case INDEX_op_xor:
113
{
114
const TCGOutOpBinary *out =
115
diff --git a/tcg/tci.c b/tcg/tci.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/tci.c
118
+++ b/tcg/tci.c
119
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
break;
121
#endif
122
123
- /* Shift/rotate operations (32 bit). */
124
+ /* Shift/rotate operations. */
125
126
- case INDEX_op_shl_i32:
127
+ case INDEX_op_shl:
128
tci_args_rrr(insn, &r0, &r1, &r2);
129
- regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
130
+ regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS);
131
break;
132
case INDEX_op_shr_i32:
133
tci_args_rrr(insn, &r0, &r1, &r2);
134
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
135
136
/* Shift/rotate operations (64 bit). */
137
138
- case INDEX_op_shl_i64:
139
- tci_args_rrr(insn, &r0, &r1, &r2);
140
- regs[r0] = regs[r1] << (regs[r2] & 63);
141
- break;
142
case INDEX_op_shr_i64:
143
tci_args_rrr(insn, &r0, &r1, &r2);
144
regs[r0] = regs[r1] >> (regs[r2] & 63);
145
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
146
case INDEX_op_orc:
147
case INDEX_op_rems:
148
case INDEX_op_remu:
149
+ case INDEX_op_shl:
150
case INDEX_op_sub:
151
case INDEX_op_xor:
152
- case INDEX_op_shl_i32:
153
- case INDEX_op_shl_i64:
154
case INDEX_op_shr_i32:
155
case INDEX_op_shr_i64:
156
case INDEX_op_sar_i32:
157
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
158
index XXXXXXX..XXXXXXX 100644
159
--- a/docs/devel/tcg-ops.rst
160
+++ b/docs/devel/tcg-ops.rst
161
@@ -XXX,XX +XXX,XX @@ Shifts/Rotates
162
163
.. list-table::
164
165
- * - shl_i32/i64 *t0*, *t1*, *t2*
166
+ * - shl *t0*, *t1*, *t2*
167
168
- | *t0* = *t1* << *t2*
169
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
170
+ | Unspecified behavior for negative or out-of-range shifts.
171
172
* - shr_i32/i64 *t0*, *t1*, *t2*
173
174
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/tci/tcg-target.c.inc
177
+++ b/tcg/tci/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
179
static void tgen_shl(TCGContext *s, TCGType type,
180
TCGReg a0, TCGReg a1, TCGReg a2)
181
{
182
- tcg_out_op_rrr(s, glue(INDEX_op_shl_i,TCG_TARGET_REG_BITS), a0, a1, a2);
183
+ tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
184
}
185
186
static const TCGOutOpBinary outop_shl = {
187
--
188
2.43.0
189
190
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 6 +++--
5
tcg/aarch64/tcg-target.c.inc | 37 ++++++++++++++-------------
6
tcg/arm/tcg-target.c.inc | 24 ++++++++++++++----
7
tcg/i386/tcg-target.c.inc | 33 +++++++++++++++++++-----
8
tcg/loongarch64/tcg-target.c.inc | 43 +++++++++++++++++++-------------
9
tcg/mips/tcg-target.c.inc | 35 +++++++++++++++++---------
10
tcg/ppc/tcg-target.c.inc | 42 ++++++++++++++++++-------------
11
tcg/riscv/tcg-target.c.inc | 38 +++++++++++++++-------------
12
tcg/s390x/tcg-target.c.inc | 39 ++++++++++++++++++++++-------
13
tcg/sparc64/tcg-target.c.inc | 29 +++++++++++++++------
14
tcg/tci/tcg-target.c.inc | 18 ++++++++++---
15
11 files changed, 229 insertions(+), 115 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
23
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
24
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
25
+ OUTOP(INDEX_op_shr_i32, TCGOutOpBinary, outop_shr),
26
+ OUTOP(INDEX_op_shr_i64, TCGOutOpBinary, outop_shr),
27
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
28
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
29
};
30
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
31
case INDEX_op_st8_i32:
32
case INDEX_op_st16_i32:
33
case INDEX_op_st_i32:
34
- case INDEX_op_shr_i32:
35
case INDEX_op_sar_i32:
36
case INDEX_op_extract_i32:
37
case INDEX_op_sextract_i32:
38
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
39
case INDEX_op_st16_i64:
40
case INDEX_op_st32_i64:
41
case INDEX_op_st_i64:
42
- case INDEX_op_shr_i64:
43
case INDEX_op_sar_i64:
44
case INDEX_op_ext_i32_i64:
45
case INDEX_op_extu_i32_i64:
46
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
47
case INDEX_op_rems:
48
case INDEX_op_remu:
49
case INDEX_op_shl:
50
+ case INDEX_op_shr_i32:
51
+ case INDEX_op_shr_i64:
52
case INDEX_op_xor:
53
{
54
const TCGOutOpBinary *out =
55
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/aarch64/tcg-target.c.inc
58
+++ b/tcg/aarch64/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
60
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
61
}
62
63
-static inline void tcg_out_shr(TCGContext *s, TCGType ext,
64
- TCGReg rd, TCGReg rn, unsigned int m)
65
-{
66
- int max = ext ? 63 : 31;
67
- tcg_out_ubfm(s, ext, rd, rn, m & max, max);
68
-}
69
-
70
static inline void tcg_out_sar(TCGContext *s, TCGType ext,
71
TCGReg rd, TCGReg rn, unsigned int m)
72
{
73
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
74
.out_rri = tgen_shli,
75
};
76
77
+static void tgen_shr(TCGContext *s, TCGType type,
78
+ TCGReg a0, TCGReg a1, TCGReg a2)
79
+{
80
+ tcg_out_insn(s, 3508, LSRV, type, a0, a1, a2);
81
+}
82
+
83
+static void tgen_shri(TCGContext *s, TCGType type,
84
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
85
+{
86
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
87
+ tcg_out_ubfm(s, type, a0, a1, a2 & max, max);
88
+}
89
+
90
+static const TCGOutOpBinary outop_shr = {
91
+ .base.static_constraint = C_O1_I2(r, r, ri),
92
+ .out_rrr = tgen_shr,
93
+ .out_rri = tgen_shri,
94
+};
95
+
96
static void tgen_sub(TCGContext *s, TCGType type,
97
TCGReg a0, TCGReg a1, TCGReg a2)
98
{
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
100
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
101
break;
102
103
- case INDEX_op_shr_i64:
104
- case INDEX_op_shr_i32:
105
- if (c2) {
106
- tcg_out_shr(s, ext, a0, a1, a2);
107
- } else {
108
- tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
109
- }
110
- break;
111
-
112
case INDEX_op_sar_i64:
113
case INDEX_op_sar_i32:
114
if (c2) {
115
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
116
case INDEX_op_negsetcond_i64:
117
return C_O1_I2(r, r, rC);
118
119
- case INDEX_op_shr_i32:
120
case INDEX_op_sar_i32:
121
case INDEX_op_rotl_i32:
122
case INDEX_op_rotr_i32:
123
- case INDEX_op_shr_i64:
124
case INDEX_op_sar_i64:
125
case INDEX_op_rotl_i64:
126
case INDEX_op_rotr_i64:
127
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
128
index XXXXXXX..XXXXXXX 100644
129
--- a/tcg/arm/tcg-target.c.inc
130
+++ b/tcg/arm/tcg-target.c.inc
131
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
132
.out_rri = tgen_shli,
133
};
134
135
+static void tgen_shr(TCGContext *s, TCGType type,
136
+ TCGReg a0, TCGReg a1, TCGReg a2)
137
+{
138
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2));
139
+}
140
+
141
+static void tgen_shri(TCGContext *s, TCGType type,
142
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
143
+{
144
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
145
+ SHIFT_IMM_LSR(a2 & 0x1f));
146
+}
147
+
148
+static const TCGOutOpBinary outop_shr = {
149
+ .base.static_constraint = C_O1_I2(r, r, ri),
150
+ .out_rrr = tgen_shr,
151
+ .out_rri = tgen_shri,
152
+};
153
+
154
static void tgen_sub(TCGContext *s, TCGType type,
155
TCGReg a0, TCGReg a1, TCGReg a2)
156
{
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
158
case INDEX_op_muls2_i32:
159
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
160
break;
161
- case INDEX_op_shr_i32:
162
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
163
- SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
164
- goto gen_shift32;
165
case INDEX_op_sar_i32:
166
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
167
SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
168
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
169
case INDEX_op_muls2_i32:
170
return C_O2_I2(r, r, r, r);
171
172
- case INDEX_op_shr_i32:
173
case INDEX_op_sar_i32:
174
case INDEX_op_rotl_i32:
175
case INDEX_op_rotr_i32:
176
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/i386/tcg-target.c.inc
179
+++ b/tcg/i386/tcg-target.c.inc
180
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
181
.out_rri = tgen_shli,
182
};
183
184
+static void tgen_shr(TCGContext *s, TCGType type,
185
+ TCGReg a0, TCGReg a1, TCGReg a2)
186
+{
187
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
188
+ if (have_bmi2) {
189
+ tcg_out_vex_modrm(s, OPC_SHRX + rexw, a0, a2, a1);
190
+ } else {
191
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SHR, a0);
192
+ }
193
+}
194
+
195
+static void tgen_shri(TCGContext *s, TCGType type,
196
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
197
+{
198
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
199
+
200
+ tcg_out_mov(s, type, a0, a1);
201
+ tcg_out_shifti(s, SHIFT_SHR + rexw, a0, a2);
202
+}
203
+
204
+static const TCGOutOpBinary outop_shr = {
205
+ .base.static_constraint = C_Dynamic,
206
+ .base.dynamic_constraint = cset_shift,
207
+ .out_rrr = tgen_shr,
208
+ .out_rri = tgen_shri,
209
+};
210
+
211
static void tgen_sub(TCGContext *s, TCGType type,
212
TCGReg a0, TCGReg a1, TCGReg a2)
213
{
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
215
}
216
break;
217
218
- OP_32_64(shr):
219
- c = SHIFT_SHR;
220
- vexop = OPC_SHRX;
221
- goto gen_shift_maybe_vex;
222
OP_32_64(sar):
223
c = SHIFT_SAR;
224
vexop = OPC_SARX;
225
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
226
case INDEX_op_st_i64:
227
return C_O0_I2(re, r);
228
229
- case INDEX_op_shr_i32:
230
- case INDEX_op_shr_i64:
231
case INDEX_op_sar_i32:
232
case INDEX_op_sar_i64:
233
return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
234
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
235
index XXXXXXX..XXXXXXX 100644
236
--- a/tcg/loongarch64/tcg-target.c.inc
237
+++ b/tcg/loongarch64/tcg-target.c.inc
238
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
239
.out_rri = tgen_shli,
240
};
241
242
+static void tgen_shr(TCGContext *s, TCGType type,
243
+ TCGReg a0, TCGReg a1, TCGReg a2)
244
+{
245
+ if (type == TCG_TYPE_I32) {
246
+ tcg_out_opc_srl_w(s, a0, a1, a2);
247
+ } else {
248
+ tcg_out_opc_srl_d(s, a0, a1, a2);
249
+ }
250
+}
251
+
252
+static void tgen_shri(TCGContext *s, TCGType type,
253
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
254
+{
255
+ if (type == TCG_TYPE_I32) {
256
+ tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
257
+ } else {
258
+ tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
259
+ }
260
+}
261
+
262
+static const TCGOutOpBinary outop_shr = {
263
+ .base.static_constraint = C_O1_I2(r, r, ri),
264
+ .out_rrr = tgen_shr,
265
+ .out_rri = tgen_shri,
266
+};
267
+
268
static void tgen_sub(TCGContext *s, TCGType type,
269
TCGReg a0, TCGReg a1, TCGReg a2)
270
{
271
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
272
tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
273
break;
274
275
- case INDEX_op_shr_i32:
276
- if (c2) {
277
- tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
278
- } else {
279
- tcg_out_opc_srl_w(s, a0, a1, a2);
280
- }
281
- break;
282
- case INDEX_op_shr_i64:
283
- if (c2) {
284
- tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
285
- } else {
286
- tcg_out_opc_srl_d(s, a0, a1, a2);
287
- }
288
- break;
289
-
290
case INDEX_op_sar_i32:
291
if (c2) {
292
tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
293
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
294
case INDEX_op_qemu_ld_i64:
295
return C_O1_I1(r, r);
296
297
- case INDEX_op_shr_i32:
298
- case INDEX_op_shr_i64:
299
case INDEX_op_sar_i32:
300
case INDEX_op_sar_i64:
301
case INDEX_op_rotl_i32:
302
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
303
index XXXXXXX..XXXXXXX 100644
304
--- a/tcg/mips/tcg-target.c.inc
305
+++ b/tcg/mips/tcg-target.c.inc
306
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
307
.out_rri = tgen_shli,
308
};
309
310
+static void tgen_shr(TCGContext *s, TCGType type,
311
+ TCGReg a0, TCGReg a1, TCGReg a2)
312
+{
313
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRLV : OPC_DSRLV;
314
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
315
+}
316
+
317
+static void tgen_shri(TCGContext *s, TCGType type,
318
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
319
+{
320
+ if (type == TCG_TYPE_I32) {
321
+ tcg_out_opc_sa(s, OPC_SRL, a0, a1, a2);
322
+ } else {
323
+ tcg_out_dsrl(s, a0, a1, a2);
324
+ }
325
+}
326
+
327
+static const TCGOutOpBinary outop_shr = {
328
+ .base.static_constraint = C_O1_I2(r, r, ri),
329
+ .out_rrr = tgen_shr,
330
+ .out_rri = tgen_shri,
331
+};
332
+
333
static void tgen_sub(TCGContext *s, TCGType type,
334
TCGReg a0, TCGReg a1, TCGReg a2)
335
{
336
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
337
case INDEX_op_sar_i32:
338
i1 = OPC_SRAV, i2 = OPC_SRA;
339
goto do_shift;
340
- case INDEX_op_shr_i32:
341
- i1 = OPC_SRLV, i2 = OPC_SRL;
342
- goto do_shift;
343
case INDEX_op_rotr_i32:
344
i1 = OPC_ROTRV, i2 = OPC_ROTR;
345
do_shift:
346
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
347
}
348
i1 = OPC_DSRAV;
349
goto do_shiftv;
350
- case INDEX_op_shr_i64:
351
- if (c2) {
352
- tcg_out_dsrl(s, a0, a1, a2);
353
- break;
354
- }
355
- i1 = OPC_DSRLV;
356
- goto do_shiftv;
357
case INDEX_op_rotr_i64:
358
if (c2) {
359
tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
360
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
361
case INDEX_op_muls2_i64:
362
case INDEX_op_mulu2_i64:
363
return C_O2_I2(r, r, r, r);
364
- case INDEX_op_shr_i32:
365
case INDEX_op_sar_i32:
366
case INDEX_op_rotr_i32:
367
case INDEX_op_rotl_i32:
368
- case INDEX_op_shr_i64:
369
case INDEX_op_sar_i64:
370
case INDEX_op_rotr_i64:
371
case INDEX_op_rotl_i64:
372
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
373
index XXXXXXX..XXXXXXX 100644
374
--- a/tcg/ppc/tcg-target.c.inc
375
+++ b/tcg/ppc/tcg-target.c.inc
376
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
377
.out_rri = tgen_shli,
378
};
379
380
+static void tgen_shr(TCGContext *s, TCGType type,
381
+ TCGReg a0, TCGReg a1, TCGReg a2)
382
+{
383
+ uint32_t insn = type == TCG_TYPE_I32 ? SRW : SRD;
384
+ tcg_out32(s, insn | SAB(a1, a0, a2));
385
+}
386
+
387
+static void tgen_shri(TCGContext *s, TCGType type,
388
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
389
+{
390
+ /* Limit immediate shift count lest we create an illegal insn. */
391
+ if (type == TCG_TYPE_I32) {
392
+ tcg_out_shri32(s, a0, a1, a2 & 31);
393
+ } else {
394
+ tcg_out_shri64(s, a0, a1, a2 & 63);
395
+ }
396
+}
397
+
398
+static const TCGOutOpBinary outop_shr = {
399
+ .base.static_constraint = C_O1_I2(r, r, ri),
400
+ .out_rrr = tgen_shr,
401
+ .out_rri = tgen_shri,
402
+};
403
+
404
static void tgen_sub(TCGContext *s, TCGType type,
405
TCGReg a0, TCGReg a1, TCGReg a2)
406
{
407
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
408
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
409
break;
410
411
- case INDEX_op_shr_i32:
412
- if (const_args[2]) {
413
- /* Limit immediate shift count lest we create an illegal insn. */
414
- tcg_out_shri32(s, args[0], args[1], args[2] & 31);
415
- } else {
416
- tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
417
- }
418
- break;
419
case INDEX_op_sar_i32:
420
if (const_args[2]) {
421
tcg_out_sari32(s, args[0], args[1], args[2]);
422
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
423
tcg_out_brcond2(s, args, const_args);
424
break;
425
426
- case INDEX_op_shr_i64:
427
- if (const_args[2]) {
428
- /* Limit immediate shift count lest we create an illegal insn. */
429
- tcg_out_shri64(s, args[0], args[1], args[2] & 63);
430
- } else {
431
- tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
432
- }
433
- break;
434
case INDEX_op_sar_i64:
435
if (const_args[2]) {
436
tcg_out_sari64(s, args[0], args[1], args[2]);
437
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
438
case INDEX_op_st_i64:
439
return C_O0_I2(r, r);
440
441
- case INDEX_op_shr_i32:
442
case INDEX_op_sar_i32:
443
case INDEX_op_rotl_i32:
444
case INDEX_op_rotr_i32:
445
- case INDEX_op_shr_i64:
446
case INDEX_op_sar_i64:
447
case INDEX_op_rotl_i64:
448
case INDEX_op_rotr_i64:
449
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
450
index XXXXXXX..XXXXXXX 100644
451
--- a/tcg/riscv/tcg-target.c.inc
452
+++ b/tcg/riscv/tcg-target.c.inc
453
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
454
.out_rri = tgen_shli,
455
};
456
457
+static void tgen_shr(TCGContext *s, TCGType type,
458
+ TCGReg a0, TCGReg a1, TCGReg a2)
459
+{
460
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLW : OPC_SRL;
461
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
462
+}
463
+
464
+static void tgen_shri(TCGContext *s, TCGType type,
465
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
466
+{
467
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLIW : OPC_SRLI;
468
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
469
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
470
+}
471
+
472
+static const TCGOutOpBinary outop_shr = {
473
+ .base.static_constraint = C_O1_I2(r, r, ri),
474
+ .out_rrr = tgen_shr,
475
+ .out_rri = tgen_shri,
476
+};
477
+
478
static void tgen_sub(TCGContext *s, TCGType type,
479
TCGReg a0, TCGReg a1, TCGReg a2)
480
{
481
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
482
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
483
break;
484
485
- case INDEX_op_shr_i32:
486
- if (c2) {
487
- tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
488
- } else {
489
- tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
490
- }
491
- break;
492
- case INDEX_op_shr_i64:
493
- if (c2) {
494
- tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
495
- } else {
496
- tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
497
- }
498
- break;
499
-
500
case INDEX_op_sar_i32:
501
if (c2) {
502
tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
503
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
504
case INDEX_op_negsetcond_i64:
505
return C_O1_I2(r, r, rI);
506
507
- case INDEX_op_shr_i32:
508
case INDEX_op_sar_i32:
509
case INDEX_op_rotl_i32:
510
case INDEX_op_rotr_i32:
511
- case INDEX_op_shr_i64:
512
case INDEX_op_sar_i64:
513
case INDEX_op_rotl_i64:
514
case INDEX_op_rotr_i64:
515
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
516
index XXXXXXX..XXXXXXX 100644
517
--- a/tcg/s390x/tcg-target.c.inc
518
+++ b/tcg/s390x/tcg-target.c.inc
519
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
520
.out_rri = tgen_shli,
521
};
522
523
+static void tgen_shr_int(TCGContext *s, TCGType type, TCGReg dst,
524
+ TCGReg src, TCGReg v, tcg_target_long i)
525
+{
526
+ if (type != TCG_TYPE_I32) {
527
+ tcg_out_sh64(s, RSY_SRLG, dst, src, v, i);
528
+ } else if (dst == src) {
529
+ tcg_out_sh32(s, RS_SRL, dst, v, i);
530
+ } else {
531
+ tcg_out_sh64(s, RSY_SRLK, dst, src, v, i);
532
+ }
533
+}
534
+
535
+static void tgen_shr(TCGContext *s, TCGType type,
536
+ TCGReg a0, TCGReg a1, TCGReg a2)
537
+{
538
+ tgen_shr_int(s, type, a0, a1, a2, 0);
539
+}
540
+
541
+static void tgen_shri(TCGContext *s, TCGType type,
542
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
543
+{
544
+ tgen_shr_int(s, type, a0, a1, TCG_REG_NONE, a2);
545
+}
546
+
547
+static const TCGOutOpBinary outop_shr = {
548
+ .base.static_constraint = C_O1_I2(r, r, ri),
549
+ .out_rrr = tgen_shr,
550
+ .out_rri = tgen_shri,
551
+};
552
+
553
static void tgen_sub(TCGContext *s, TCGType type,
554
TCGReg a0, TCGReg a1, TCGReg a2)
555
{
556
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
557
}
558
}
559
break;
560
- case INDEX_op_shr_i32:
561
- op = RS_SRL;
562
- op2 = RSY_SRLK;
563
- goto do_shift32;
564
case INDEX_op_sar_i32:
565
op = RS_SRA;
566
op2 = RSY_SRAK;
567
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
568
tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
569
}
570
break;
571
- case INDEX_op_shr_i64:
572
- op = RSY_SRLG;
573
- goto do_shift64;
574
case INDEX_op_sar_i64:
575
op = RSY_SRAG;
576
goto do_shift64;
577
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
578
case INDEX_op_st_i64:
579
return C_O0_I2(r, r);
580
581
- case INDEX_op_shr_i64:
582
case INDEX_op_sar_i64:
583
case INDEX_op_rotl_i32:
584
case INDEX_op_rotl_i64:
585
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
586
case INDEX_op_clz_i64:
587
return C_O1_I2(r, r, rI);
588
589
- case INDEX_op_shr_i32:
590
case INDEX_op_sar_i32:
591
return C_O1_I2(r, r, ri);
592
593
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
594
index XXXXXXX..XXXXXXX 100644
595
--- a/tcg/sparc64/tcg-target.c.inc
596
+++ b/tcg/sparc64/tcg-target.c.inc
597
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
598
.out_rri = tgen_shli,
599
};
600
601
+static void tgen_shr(TCGContext *s, TCGType type,
602
+ TCGReg a0, TCGReg a1, TCGReg a2)
603
+{
604
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
605
+ tcg_out_arith(s, a0, a1, a2, insn);
606
+}
607
+
608
+static void tgen_shri(TCGContext *s, TCGType type,
609
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
610
+{
611
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
612
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
613
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
614
+}
615
+
616
+static const TCGOutOpBinary outop_shr = {
617
+ .base.static_constraint = C_O1_I2(r, r, rJ),
618
+ .out_rrr = tgen_shr,
619
+ .out_rri = tgen_shri,
620
+};
621
+
622
static void tgen_sub(TCGContext *s, TCGType type,
623
TCGReg a0, TCGReg a1, TCGReg a2)
624
{
625
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
626
/* Limit immediate shift count lest we create an illegal insn. */
627
tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
628
break;
629
- case INDEX_op_shr_i32:
630
- c = SHIFT_SRL;
631
- goto do_shift32;
632
case INDEX_op_sar_i32:
633
c = SHIFT_SRA;
634
goto do_shift32;
635
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
636
/* Limit immediate shift count lest we create an illegal insn. */
637
tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
638
break;
639
- case INDEX_op_shr_i64:
640
- c = SHIFT_SRLX;
641
- goto do_shift64;
642
case INDEX_op_sar_i64:
643
c = SHIFT_SRAX;
644
goto do_shift64;
645
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
646
case INDEX_op_qemu_st_i64:
647
return C_O0_I2(rz, r);
648
649
- case INDEX_op_shr_i32:
650
- case INDEX_op_shr_i64:
651
case INDEX_op_sar_i32:
652
case INDEX_op_sar_i64:
653
case INDEX_op_setcond_i32:
654
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
655
index XXXXXXX..XXXXXXX 100644
656
--- a/tcg/tci/tcg-target.c.inc
657
+++ b/tcg/tci/tcg-target.c.inc
658
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
659
case INDEX_op_st_i64:
660
return C_O0_I2(r, r);
661
662
- case INDEX_op_shr_i32:
663
- case INDEX_op_shr_i64:
664
case INDEX_op_sar_i32:
665
case INDEX_op_sar_i64:
666
case INDEX_op_rotl_i32:
667
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shl = {
668
.out_rrr = tgen_shl,
669
};
670
671
+static void tgen_shr(TCGContext *s, TCGType type,
672
+ TCGReg a0, TCGReg a1, TCGReg a2)
673
+{
674
+ if (type < TCG_TYPE_REG) {
675
+ tcg_out_ext32u(s, TCG_REG_TMP, a1);
676
+ a1 = TCG_REG_TMP;
677
+ }
678
+ tcg_out_op_rrr(s, glue(INDEX_op_shr_i,TCG_TARGET_REG_BITS), a0, a1, a2);
679
+}
680
+
681
+static const TCGOutOpBinary outop_shr = {
682
+ .base.static_constraint = C_O1_I2(r, r, r),
683
+ .out_rrr = tgen_shr,
684
+};
685
+
686
static void tgen_sub(TCGContext *s, TCGType type,
687
TCGReg a0, TCGReg a1, TCGReg a2)
688
{
689
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
690
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
691
break;
692
693
- CASE_32_64(shr)
694
CASE_32_64(sar)
695
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
696
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
697
--
698
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 17 +++++++----------
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 11 +++--------
9
docs/devel/tcg-ops.rst | 4 ++--
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 18 insertions(+), 29 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(orc, 1, 2, 0, TCG_OPF_INT)
18
DEF(rems, 1, 2, 0, TCG_OPF_INT)
19
DEF(remu, 1, 2, 0, TCG_OPF_INT)
20
DEF(shl, 1, 2, 0, TCG_OPF_INT)
21
+DEF(shr, 1, 2, 0, TCG_OPF_INT)
22
DEF(sub, 1, 2, 0, TCG_OPF_INT)
23
DEF(xor, 1, 2, 0, TCG_OPF_INT)
24
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
-DEF(shr_i32, 1, 2, 0, 0)
30
DEF(sar_i32, 1, 2, 0, 0)
31
DEF(rotl_i32, 1, 2, 0, 0)
32
DEF(rotr_i32, 1, 2, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
-DEF(shr_i64, 1, 2, 0, 0)
38
DEF(sar_i64, 1, 2, 0, 0)
39
DEF(rotl_i64, 1, 2, 0, 0)
40
DEF(rotr_i64, 1, 2, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
}
47
return (uint64_t)x << (y & 63);
48
49
- case INDEX_op_shr_i32:
50
- return (uint32_t)x >> (y & 31);
51
-
52
- case INDEX_op_shr_i64:
53
+ case INDEX_op_shr:
54
+ if (type == TCG_TYPE_I32) {
55
+ return (uint32_t)x >> (y & 31);
56
+ }
57
return (uint64_t)x >> (y & 63);
58
59
case INDEX_op_sar_i32:
60
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
61
62
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
63
{
64
- TCGOpcode shr_opc;
65
TCGOpcode uext_opc = 0, sext_opc = 0;
66
TCGCond cond = op->args[3];
67
TCGArg ret, src1, src2;
68
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
69
70
switch (ctx->type) {
71
case TCG_TYPE_I32:
72
- shr_opc = INDEX_op_shr_i32;
73
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
74
uext_opc = INDEX_op_extract_i32;
75
}
76
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
77
}
78
break;
79
case TCG_TYPE_I64:
80
- shr_opc = INDEX_op_shr_i64;
81
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
82
uext_opc = INDEX_op_extract_i64;
83
}
84
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
85
op->args[3] = 1;
86
} else {
87
if (sh) {
88
- op2 = opt_insert_before(ctx, op, shr_opc, 3);
89
+ op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
90
op2->args[0] = ret;
91
op2->args[1] = src1;
92
op2->args[2] = arg_new_constant(ctx, sh);
93
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
94
* input sign repetitions.
95
*/
96
return fold_masks_s(ctx, op, s_mask);
97
- CASE_OP_32_64(shr):
98
+ case INDEX_op_shr:
99
/*
100
* If the sign bit is known zero, then logical right shift
101
* will not reduce the number of input sign repetitions.
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
CASE_OP_32_64(rotr):
104
CASE_OP_32_64(sar):
105
case INDEX_op_shl:
106
- CASE_OP_32_64(shr):
107
+ case INDEX_op_shr:
108
done = fold_shift(&ctx, op);
109
break;
110
CASE_OP_32_64(setcond):
111
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/tcg-op.c
114
+++ b/tcg/tcg-op.c
115
@@ -XXX,XX +XXX,XX @@ void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
116
117
void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
118
{
119
- tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
120
+ tcg_gen_op3_i32(INDEX_op_shr, ret, arg1, arg2);
121
}
122
123
void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
124
@@ -XXX,XX +XXX,XX @@ void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
125
void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
126
{
127
if (TCG_TARGET_REG_BITS == 64) {
128
- tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
129
+ tcg_gen_op3_i64(INDEX_op_shr, ret, arg1, arg2);
130
} else {
131
gen_helper_shr_i64(ret, arg1, arg2);
132
}
133
diff --git a/tcg/tcg.c b/tcg/tcg.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/tcg.c
136
+++ b/tcg/tcg.c
137
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
138
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
139
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
140
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
141
- OUTOP(INDEX_op_shr_i32, TCGOutOpBinary, outop_shr),
142
- OUTOP(INDEX_op_shr_i64, TCGOutOpBinary, outop_shr),
143
+ OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
144
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
145
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
146
};
147
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
148
case INDEX_op_rems:
149
case INDEX_op_remu:
150
case INDEX_op_shl:
151
- case INDEX_op_shr_i32:
152
- case INDEX_op_shr_i64:
153
+ case INDEX_op_shr:
154
case INDEX_op_xor:
155
{
156
const TCGOutOpBinary *out =
157
diff --git a/tcg/tci.c b/tcg/tci.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/tci.c
160
+++ b/tcg/tci.c
161
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
162
tci_args_rrr(insn, &r0, &r1, &r2);
163
regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS);
164
break;
165
- case INDEX_op_shr_i32:
166
+ case INDEX_op_shr:
167
tci_args_rrr(insn, &r0, &r1, &r2);
168
- regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
169
+ regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS);
170
break;
171
case INDEX_op_sar_i32:
172
tci_args_rrr(insn, &r0, &r1, &r2);
173
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
174
175
/* Shift/rotate operations (64 bit). */
176
177
- case INDEX_op_shr_i64:
178
- tci_args_rrr(insn, &r0, &r1, &r2);
179
- regs[r0] = regs[r1] >> (regs[r2] & 63);
180
- break;
181
case INDEX_op_sar_i64:
182
tci_args_rrr(insn, &r0, &r1, &r2);
183
regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
184
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
185
case INDEX_op_rems:
186
case INDEX_op_remu:
187
case INDEX_op_shl:
188
+ case INDEX_op_shr:
189
case INDEX_op_sub:
190
case INDEX_op_xor:
191
- case INDEX_op_shr_i32:
192
- case INDEX_op_shr_i64:
193
case INDEX_op_sar_i32:
194
case INDEX_op_sar_i64:
195
case INDEX_op_rotl_i32:
196
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
197
index XXXXXXX..XXXXXXX 100644
198
--- a/docs/devel/tcg-ops.rst
199
+++ b/docs/devel/tcg-ops.rst
200
@@ -XXX,XX +XXX,XX @@ Shifts/Rotates
201
- | *t0* = *t1* << *t2*
202
| Unspecified behavior for negative or out-of-range shifts.
203
204
- * - shr_i32/i64 *t0*, *t1*, *t2*
205
+ * - shr *t0*, *t1*, *t2*
206
207
- | *t0* = *t1* >> *t2* (unsigned)
208
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
209
+ | Unspecified behavior for negative or out-of-range shifts.
210
211
* - sar_i32/i64 *t0*, *t1*, *t2*
212
213
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tci/tcg-target.c.inc
216
+++ b/tcg/tci/tcg-target.c.inc
217
@@ -XXX,XX +XXX,XX @@ static void tgen_shr(TCGContext *s, TCGType type,
218
tcg_out_ext32u(s, TCG_REG_TMP, a1);
219
a1 = TCG_REG_TMP;
220
}
221
- tcg_out_op_rrr(s, glue(INDEX_op_shr_i,TCG_TARGET_REG_BITS), a0, a1, a2);
222
+ tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
223
}
224
225
static const TCGOutOpBinary outop_shr = {
226
--
227
2.43.0
228
229
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 6 ++-
5
tcg/aarch64/tcg-target.c.inc | 37 ++++++++---------
6
tcg/arm/tcg-target.c.inc | 26 ++++++++----
7
tcg/i386/tcg-target.c.inc | 46 ++++++++++++---------
8
tcg/loongarch64/tcg-target.c.inc | 43 ++++++++++++--------
9
tcg/mips/tcg-target.c.inc | 36 +++++++++++------
10
tcg/ppc/tcg-target.c.inc | 40 +++++++++++--------
11
tcg/riscv/tcg-target.c.inc | 38 ++++++++++--------
12
tcg/s390x/tcg-target.c.inc | 68 ++++++++++++++------------------
13
tcg/sparc64/tcg-target.c.inc | 37 +++++++++--------
14
tcg/tci/tcg-target.c.inc | 17 +++++++-
15
11 files changed, 230 insertions(+), 164 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
23
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
24
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
25
+ OUTOP(INDEX_op_sar_i32, TCGOutOpBinary, outop_sar),
26
+ OUTOP(INDEX_op_sar_i64, TCGOutOpBinary, outop_sar),
27
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
28
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
29
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
30
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
31
case INDEX_op_st8_i32:
32
case INDEX_op_st16_i32:
33
case INDEX_op_st_i32:
34
- case INDEX_op_sar_i32:
35
case INDEX_op_extract_i32:
36
case INDEX_op_sextract_i32:
37
case INDEX_op_deposit_i32:
38
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
39
case INDEX_op_st16_i64:
40
case INDEX_op_st32_i64:
41
case INDEX_op_st_i64:
42
- case INDEX_op_sar_i64:
43
case INDEX_op_ext_i32_i64:
44
case INDEX_op_extu_i32_i64:
45
case INDEX_op_extract_i64:
46
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
47
case INDEX_op_orc:
48
case INDEX_op_rems:
49
case INDEX_op_remu:
50
+ case INDEX_op_sar_i32:
51
+ case INDEX_op_sar_i64:
52
case INDEX_op_shl:
53
case INDEX_op_shr:
54
case INDEX_op_xor:
55
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/aarch64/tcg-target.c.inc
58
+++ b/tcg/aarch64/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
60
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
61
}
62
63
-static inline void tcg_out_sar(TCGContext *s, TCGType ext,
64
- TCGReg rd, TCGReg rn, unsigned int m)
65
-{
66
- int max = ext ? 63 : 31;
67
- tcg_out_sbfm(s, ext, rd, rn, m & max, max);
68
-}
69
-
70
static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
71
TCGReg rd, TCGReg rn, unsigned int m)
72
{
73
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
74
.out_rrr = tgen_remu,
75
};
76
77
+static void tgen_sar(TCGContext *s, TCGType type,
78
+ TCGReg a0, TCGReg a1, TCGReg a2)
79
+{
80
+ tcg_out_insn(s, 3508, ASRV, type, a0, a1, a2);
81
+}
82
+
83
+static void tgen_sari(TCGContext *s, TCGType type,
84
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
85
+{
86
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
87
+ tcg_out_sbfm(s, type, a0, a1, a2 & max, max);
88
+}
89
+
90
+static const TCGOutOpBinary outop_sar = {
91
+ .base.static_constraint = C_O1_I2(r, r, ri),
92
+ .out_rrr = tgen_sar,
93
+ .out_rri = tgen_sari,
94
+};
95
+
96
static void tgen_shl(TCGContext *s, TCGType type,
97
TCGReg a0, TCGReg a1, TCGReg a2)
98
{
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
100
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
101
break;
102
103
- case INDEX_op_sar_i64:
104
- case INDEX_op_sar_i32:
105
- if (c2) {
106
- tcg_out_sar(s, ext, a0, a1, a2);
107
- } else {
108
- tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
109
- }
110
- break;
111
-
112
case INDEX_op_rotr_i64:
113
case INDEX_op_rotr_i32:
114
if (c2) {
115
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
116
case INDEX_op_negsetcond_i64:
117
return C_O1_I2(r, r, rC);
118
119
- case INDEX_op_sar_i32:
120
case INDEX_op_rotl_i32:
121
case INDEX_op_rotr_i32:
122
- case INDEX_op_sar_i64:
123
case INDEX_op_rotl_i64:
124
case INDEX_op_rotr_i64:
125
return C_O1_I2(r, r, ri);
126
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/arm/tcg-target.c.inc
129
+++ b/tcg/arm/tcg-target.c.inc
130
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
131
.base.static_constraint = C_NotImplemented,
132
};
133
134
+static void tgen_sar(TCGContext *s, TCGType type,
135
+ TCGReg a0, TCGReg a1, TCGReg a2)
136
+{
137
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2));
138
+}
139
+
140
+static void tgen_sari(TCGContext *s, TCGType type,
141
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
142
+{
143
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
144
+ SHIFT_IMM_ASR(a2 & 0x1f));
145
+}
146
+
147
+static const TCGOutOpBinary outop_sar = {
148
+ .base.static_constraint = C_O1_I2(r, r, ri),
149
+ .out_rrr = tgen_sar,
150
+ .out_rri = tgen_sari,
151
+};
152
+
153
static void tgen_shl(TCGContext *s, TCGType type,
154
TCGReg a0, TCGReg a1, TCGReg a2)
155
{
156
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
157
case INDEX_op_muls2_i32:
158
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
159
break;
160
- case INDEX_op_sar_i32:
161
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
162
- SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
163
- goto gen_shift32;
164
case INDEX_op_rotr_i32:
165
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
166
SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
167
- /* Fall through. */
168
- gen_shift32:
169
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
170
break;
171
172
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
173
case INDEX_op_muls2_i32:
174
return C_O2_I2(r, r, r, r);
175
176
- case INDEX_op_sar_i32:
177
case INDEX_op_rotl_i32:
178
case INDEX_op_rotr_i32:
179
return C_O1_I2(r, r, ri);
180
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
181
index XXXXXXX..XXXXXXX 100644
182
--- a/tcg/i386/tcg-target.c.inc
183
+++ b/tcg/i386/tcg-target.c.inc
184
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex cset_shift(TCGType type, unsigned flags)
185
return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
186
}
187
188
+static void tgen_sar(TCGContext *s, TCGType type,
189
+ TCGReg a0, TCGReg a1, TCGReg a2)
190
+{
191
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
192
+ if (have_bmi2) {
193
+ tcg_out_vex_modrm(s, OPC_SARX + rexw, a0, a2, a1);
194
+ } else {
195
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_SAR, a0);
196
+ }
197
+}
198
+
199
+static void tgen_sari(TCGContext *s, TCGType type,
200
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
201
+{
202
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
203
+
204
+ tcg_out_mov(s, type, a0, a1);
205
+ tcg_out_shifti(s, SHIFT_SAR + rexw, a0, a2);
206
+}
207
+
208
+static const TCGOutOpBinary outop_sar = {
209
+ .base.static_constraint = C_Dynamic,
210
+ .base.dynamic_constraint = cset_shift,
211
+ .out_rrr = tgen_sar,
212
+ .out_rri = tgen_sari,
213
+};
214
+
215
static void tgen_shl(TCGContext *s, TCGType type,
216
TCGReg a0, TCGReg a1, TCGReg a2)
217
{
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
219
const int const_args[TCG_MAX_OP_ARGS])
220
{
221
TCGArg a0, a1, a2;
222
- int c, const_a2, vexop, rexw;
223
+ int c, const_a2, rexw;
224
225
#if TCG_TARGET_REG_BITS == 64
226
# define OP_32_64(x) \
227
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
228
}
229
break;
230
231
- OP_32_64(sar):
232
- c = SHIFT_SAR;
233
- vexop = OPC_SARX;
234
- goto gen_shift_maybe_vex;
235
OP_32_64(rotl):
236
c = SHIFT_ROL;
237
goto gen_shift;
238
OP_32_64(rotr):
239
c = SHIFT_ROR;
240
goto gen_shift;
241
- gen_shift_maybe_vex:
242
- if (have_bmi2) {
243
- if (!const_a2) {
244
- tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
245
- break;
246
- }
247
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
248
- }
249
- /* FALLTHRU */
250
gen_shift:
251
if (const_a2) {
252
tcg_out_shifti(s, c + rexw, a0, a2);
253
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
254
case INDEX_op_st_i64:
255
return C_O0_I2(re, r);
256
257
- case INDEX_op_sar_i32:
258
- case INDEX_op_sar_i64:
259
- return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
260
-
261
case INDEX_op_rotl_i32:
262
case INDEX_op_rotl_i64:
263
case INDEX_op_rotr_i32:
264
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/tcg/loongarch64/tcg-target.c.inc
267
+++ b/tcg/loongarch64/tcg-target.c.inc
268
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
269
.out_rrr = tgen_remu,
270
};
271
272
+static void tgen_sar(TCGContext *s, TCGType type,
273
+ TCGReg a0, TCGReg a1, TCGReg a2)
274
+{
275
+ if (type == TCG_TYPE_I32) {
276
+ tcg_out_opc_sra_w(s, a0, a1, a2);
277
+ } else {
278
+ tcg_out_opc_sra_d(s, a0, a1, a2);
279
+ }
280
+}
281
+
282
+static void tgen_sari(TCGContext *s, TCGType type,
283
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
284
+{
285
+ if (type == TCG_TYPE_I32) {
286
+ tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
287
+ } else {
288
+ tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
289
+ }
290
+}
291
+
292
+static const TCGOutOpBinary outop_sar = {
293
+ .base.static_constraint = C_O1_I2(r, r, ri),
294
+ .out_rrr = tgen_sar,
295
+ .out_rri = tgen_sari,
296
+};
297
+
298
static void tgen_shl(TCGContext *s, TCGType type,
299
TCGReg a0, TCGReg a1, TCGReg a2)
300
{
301
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
302
tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
303
break;
304
305
- case INDEX_op_sar_i32:
306
- if (c2) {
307
- tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
308
- } else {
309
- tcg_out_opc_sra_w(s, a0, a1, a2);
310
- }
311
- break;
312
- case INDEX_op_sar_i64:
313
- if (c2) {
314
- tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
315
- } else {
316
- tcg_out_opc_sra_d(s, a0, a1, a2);
317
- }
318
- break;
319
-
320
case INDEX_op_rotl_i32:
321
/* transform into equivalent rotr/rotri */
322
if (c2) {
323
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
324
case INDEX_op_qemu_ld_i64:
325
return C_O1_I1(r, r);
326
327
- case INDEX_op_sar_i32:
328
- case INDEX_op_sar_i64:
329
case INDEX_op_rotl_i32:
330
case INDEX_op_rotl_i64:
331
case INDEX_op_rotr_i32:
332
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
333
index XXXXXXX..XXXXXXX 100644
334
--- a/tcg/mips/tcg-target.c.inc
335
+++ b/tcg/mips/tcg-target.c.inc
336
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
337
.out_rrr = tgen_remu,
338
};
339
340
+static void tgen_sar(TCGContext *s, TCGType type,
341
+ TCGReg a0, TCGReg a1, TCGReg a2)
342
+{
343
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_SRAV : OPC_DSRAV;
344
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
345
+}
346
+
347
+static void tgen_sari(TCGContext *s, TCGType type,
348
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
349
+{
350
+ if (type == TCG_TYPE_I32) {
351
+ tcg_out_opc_sa(s, OPC_SRA, a0, a1, a2);
352
+ } else {
353
+ tcg_out_dsra(s, a0, a1, a2);
354
+ }
355
+}
356
+
357
+static const TCGOutOpBinary outop_sar = {
358
+ .base.static_constraint = C_O1_I2(r, r, ri),
359
+ .out_rrr = tgen_sar,
360
+ .out_rri = tgen_sari,
361
+};
362
+
363
static void tgen_shl(TCGContext *s, TCGType type,
364
TCGReg a0, TCGReg a1, TCGReg a2)
365
{
366
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
367
tcg_out_dsra(s, a0, a1, 32);
368
break;
369
370
- case INDEX_op_sar_i32:
371
- i1 = OPC_SRAV, i2 = OPC_SRA;
372
- goto do_shift;
373
case INDEX_op_rotr_i32:
374
i1 = OPC_ROTRV, i2 = OPC_ROTR;
375
- do_shift:
376
if (c2) {
377
tcg_out_opc_sa(s, i2, a0, a1, a2);
378
break;
379
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
380
tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
381
}
382
break;
383
- case INDEX_op_sar_i64:
384
- if (c2) {
385
- tcg_out_dsra(s, a0, a1, a2);
386
- break;
387
- }
388
- i1 = OPC_DSRAV;
389
- goto do_shiftv;
390
case INDEX_op_rotr_i64:
391
if (c2) {
392
tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
393
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
394
case INDEX_op_muls2_i64:
395
case INDEX_op_mulu2_i64:
396
return C_O2_I2(r, r, r, r);
397
- case INDEX_op_sar_i32:
398
case INDEX_op_rotr_i32:
399
case INDEX_op_rotl_i32:
400
- case INDEX_op_sar_i64:
401
case INDEX_op_rotr_i64:
402
case INDEX_op_rotl_i64:
403
return C_O1_I2(r, r, ri);
404
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
405
index XXXXXXX..XXXXXXX 100644
406
--- a/tcg/ppc/tcg-target.c.inc
407
+++ b/tcg/ppc/tcg-target.c.inc
408
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
409
.out_rrr = tgen_remu,
410
};
411
412
+static void tgen_sar(TCGContext *s, TCGType type,
413
+ TCGReg a0, TCGReg a1, TCGReg a2)
414
+{
415
+ uint32_t insn = type == TCG_TYPE_I32 ? SRAW : SRAD;
416
+ tcg_out32(s, insn | SAB(a1, a0, a2));
417
+}
418
+
419
+static void tgen_sari(TCGContext *s, TCGType type,
420
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
421
+{
422
+ /* Limit immediate shift count lest we create an illegal insn. */
423
+ if (type == TCG_TYPE_I32) {
424
+ tcg_out_sari32(s, a0, a1, a2 & 31);
425
+ } else {
426
+ tcg_out_sari64(s, a0, a1, a2 & 63);
427
+ }
428
+}
429
+
430
+static const TCGOutOpBinary outop_sar = {
431
+ .base.static_constraint = C_O1_I2(r, r, ri),
432
+ .out_rrr = tgen_sar,
433
+ .out_rri = tgen_sari,
434
+};
435
+
436
static void tgen_shl(TCGContext *s, TCGType type,
437
TCGReg a0, TCGReg a1, TCGReg a2)
438
{
439
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
440
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
441
break;
442
443
- case INDEX_op_sar_i32:
444
- if (const_args[2]) {
445
- tcg_out_sari32(s, args[0], args[1], args[2]);
446
- } else {
447
- tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
448
- }
449
- break;
450
case INDEX_op_rotl_i32:
451
if (const_args[2]) {
452
tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
453
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
454
tcg_out_brcond2(s, args, const_args);
455
break;
456
457
- case INDEX_op_sar_i64:
458
- if (const_args[2]) {
459
- tcg_out_sari64(s, args[0], args[1], args[2]);
460
- } else {
461
- tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
462
- }
463
- break;
464
case INDEX_op_rotl_i64:
465
if (const_args[2]) {
466
tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
467
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
468
case INDEX_op_st_i64:
469
return C_O0_I2(r, r);
470
471
- case INDEX_op_sar_i32:
472
case INDEX_op_rotl_i32:
473
case INDEX_op_rotr_i32:
474
- case INDEX_op_sar_i64:
475
case INDEX_op_rotl_i64:
476
case INDEX_op_rotr_i64:
477
return C_O1_I2(r, r, ri);
478
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
479
index XXXXXXX..XXXXXXX 100644
480
--- a/tcg/riscv/tcg-target.c.inc
481
+++ b/tcg/riscv/tcg-target.c.inc
482
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
483
.out_rrr = tgen_remu,
484
};
485
486
+static void tgen_sar(TCGContext *s, TCGType type,
487
+ TCGReg a0, TCGReg a1, TCGReg a2)
488
+{
489
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAW : OPC_SRA;
490
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
491
+}
492
+
493
+static void tgen_sari(TCGContext *s, TCGType type,
494
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
495
+{
496
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAIW : OPC_SRAI;
497
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
498
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
499
+}
500
+
501
+static const TCGOutOpBinary outop_sar = {
502
+ .base.static_constraint = C_O1_I2(r, r, ri),
503
+ .out_rrr = tgen_sar,
504
+ .out_rri = tgen_sari,
505
+};
506
+
507
static void tgen_shl(TCGContext *s, TCGType type,
508
TCGReg a0, TCGReg a1, TCGReg a2)
509
{
510
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
511
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
512
break;
513
514
- case INDEX_op_sar_i32:
515
- if (c2) {
516
- tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
517
- } else {
518
- tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
519
- }
520
- break;
521
- case INDEX_op_sar_i64:
522
- if (c2) {
523
- tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
524
- } else {
525
- tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
526
- }
527
- break;
528
-
529
case INDEX_op_rotl_i32:
530
if (c2) {
531
tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
532
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
533
case INDEX_op_negsetcond_i64:
534
return C_O1_I2(r, r, rI);
535
536
- case INDEX_op_sar_i32:
537
case INDEX_op_rotl_i32:
538
case INDEX_op_rotr_i32:
539
- case INDEX_op_sar_i64:
540
case INDEX_op_rotl_i64:
541
case INDEX_op_rotr_i64:
542
return C_O1_I2(r, r, ri);
543
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
544
index XXXXXXX..XXXXXXX 100644
545
--- a/tcg/s390x/tcg-target.c.inc
546
+++ b/tcg/s390x/tcg-target.c.inc
547
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
548
.base.static_constraint = C_NotImplemented,
549
};
550
551
+static void tgen_sar_int(TCGContext *s, TCGType type, TCGReg dst,
552
+ TCGReg src, TCGReg v, tcg_target_long i)
553
+{
554
+ if (type != TCG_TYPE_I32) {
555
+ tcg_out_sh64(s, RSY_SRAG, dst, src, v, i);
556
+ } else if (dst == src) {
557
+ tcg_out_sh32(s, RS_SRA, dst, v, i);
558
+ } else {
559
+ tcg_out_sh64(s, RSY_SRAK, dst, src, v, i);
560
+ }
561
+}
562
+
563
+static void tgen_sar(TCGContext *s, TCGType type,
564
+ TCGReg a0, TCGReg a1, TCGReg a2)
565
+{
566
+ tgen_sar_int(s, type, a0, a1, a2, 0);
567
+}
568
+
569
+static void tgen_sari(TCGContext *s, TCGType type,
570
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
571
+{
572
+ tgen_sar_int(s, type, a0, a1, TCG_REG_NONE, a2);
573
+}
574
+
575
+static const TCGOutOpBinary outop_sar = {
576
+ .base.static_constraint = C_O1_I2(r, r, ri),
577
+ .out_rrr = tgen_sar,
578
+ .out_rri = tgen_sari,
579
+};
580
+
581
static void tgen_shl_int(TCGContext *s, TCGType type, TCGReg dst,
582
TCGReg src, TCGReg v, tcg_target_long i)
583
{
584
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
585
const TCGArg args[TCG_MAX_OP_ARGS],
586
const int const_args[TCG_MAX_OP_ARGS])
587
{
588
- S390Opcode op, op2;
589
TCGArg a0, a1, a2;
590
591
switch (opc) {
592
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
593
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
594
break;
595
596
- do_shift32:
597
- a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
598
- if (a0 == a1) {
599
- if (const_args[2]) {
600
- tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
601
- } else {
602
- tcg_out_sh32(s, op, a0, a2, 0);
603
- }
604
- } else {
605
- /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
606
- if (const_args[2]) {
607
- tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
608
- } else {
609
- tcg_out_sh64(s, op2, a0, a1, a2, 0);
610
- }
611
- }
612
- break;
613
- case INDEX_op_sar_i32:
614
- op = RS_SRA;
615
- op2 = RSY_SRAK;
616
- goto do_shift32;
617
-
618
case INDEX_op_rotl_i32:
619
/* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
620
if (const_args[2]) {
621
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
622
tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
623
break;
624
625
- do_shift64:
626
- if (const_args[2]) {
627
- tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
628
- } else {
629
- tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
630
- }
631
- break;
632
- case INDEX_op_sar_i64:
633
- op = RSY_SRAG;
634
- goto do_shift64;
635
-
636
case INDEX_op_rotl_i64:
637
if (const_args[2]) {
638
tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
639
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
640
case INDEX_op_st_i64:
641
return C_O0_I2(r, r);
642
643
- case INDEX_op_sar_i64:
644
case INDEX_op_rotl_i32:
645
case INDEX_op_rotl_i64:
646
case INDEX_op_rotr_i32:
647
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
648
case INDEX_op_clz_i64:
649
return C_O1_I2(r, r, rI);
650
651
- case INDEX_op_sar_i32:
652
- return C_O1_I2(r, r, ri);
653
-
654
case INDEX_op_brcond_i32:
655
return C_O0_I2(r, ri);
656
case INDEX_op_brcond_i64:
657
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
658
index XXXXXXX..XXXXXXX 100644
659
--- a/tcg/sparc64/tcg-target.c.inc
660
+++ b/tcg/sparc64/tcg-target.c.inc
661
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
662
.base.static_constraint = C_NotImplemented,
663
};
664
665
+static void tgen_sar(TCGContext *s, TCGType type,
666
+ TCGReg a0, TCGReg a1, TCGReg a2)
667
+{
668
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
669
+ tcg_out_arith(s, a0, a1, a2, insn);
670
+}
671
+
672
+static void tgen_sari(TCGContext *s, TCGType type,
673
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
674
+{
675
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
676
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
677
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
678
+}
679
+
680
+static const TCGOutOpBinary outop_sar = {
681
+ .base.static_constraint = C_O1_I2(r, r, rJ),
682
+ .out_rrr = tgen_sar,
683
+ .out_rri = tgen_sari,
684
+};
685
+
686
static void tgen_shl(TCGContext *s, TCGType type,
687
TCGReg a0, TCGReg a1, TCGReg a2)
688
{
689
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
690
case INDEX_op_st32_i64:
691
tcg_out_ldst(s, a0, a1, a2, STW);
692
break;
693
- do_shift32:
694
- /* Limit immediate shift count lest we create an illegal insn. */
695
- tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
696
- break;
697
- case INDEX_op_sar_i32:
698
- c = SHIFT_SRA;
699
- goto do_shift32;
700
701
case INDEX_op_brcond_i32:
702
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
703
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
704
case INDEX_op_st_i64:
705
tcg_out_ldst(s, a0, a1, a2, STX);
706
break;
707
- do_shift64:
708
- /* Limit immediate shift count lest we create an illegal insn. */
709
- tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
710
- break;
711
- case INDEX_op_sar_i64:
712
- c = SHIFT_SRAX;
713
- goto do_shift64;
714
715
case INDEX_op_brcond_i64:
716
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
717
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
718
case INDEX_op_qemu_st_i64:
719
return C_O0_I2(rz, r);
720
721
- case INDEX_op_sar_i32:
722
- case INDEX_op_sar_i64:
723
case INDEX_op_setcond_i32:
724
case INDEX_op_setcond_i64:
725
case INDEX_op_negsetcond_i32:
726
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
727
index XXXXXXX..XXXXXXX 100644
728
--- a/tcg/tci/tcg-target.c.inc
729
+++ b/tcg/tci/tcg-target.c.inc
730
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
731
case INDEX_op_st_i64:
732
return C_O0_I2(r, r);
733
734
- case INDEX_op_sar_i32:
735
- case INDEX_op_sar_i64:
736
case INDEX_op_rotl_i32:
737
case INDEX_op_rotl_i64:
738
case INDEX_op_rotr_i32:
739
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
740
.out_rrr = tgen_remu,
741
};
742
743
+static void tgen_sar(TCGContext *s, TCGType type,
744
+ TCGReg a0, TCGReg a1, TCGReg a2)
745
+{
746
+ if (type < TCG_TYPE_REG) {
747
+ tcg_out_ext32s(s, TCG_REG_TMP, a1);
748
+ a1 = TCG_REG_TMP;
749
+ }
750
+ tcg_out_op_rrr(s, glue(INDEX_op_sar_i,TCG_TARGET_REG_BITS), a0, a1, a2);
751
+}
752
+
753
+static const TCGOutOpBinary outop_sar = {
754
+ .base.static_constraint = C_O1_I2(r, r, r),
755
+ .out_rrr = tgen_sar,
756
+};
757
+
758
static void tgen_shl(TCGContext *s, TCGType type,
759
TCGReg a0, TCGReg a1, TCGReg a2)
760
{
761
--
762
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 12 ++++++------
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 12 ++++--------
9
docs/devel/tcg-ops.rst | 4 ++--
10
tcg/tci/tcg-target.c.inc | 3 +--
11
7 files changed, 18 insertions(+), 26 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(or, 1, 2, 0, TCG_OPF_INT)
18
DEF(orc, 1, 2, 0, TCG_OPF_INT)
19
DEF(rems, 1, 2, 0, TCG_OPF_INT)
20
DEF(remu, 1, 2, 0, TCG_OPF_INT)
21
+DEF(sar, 1, 2, 0, TCG_OPF_INT)
22
DEF(shl, 1, 2, 0, TCG_OPF_INT)
23
DEF(shr, 1, 2, 0, TCG_OPF_INT)
24
DEF(sub, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
-DEF(sar_i32, 1, 2, 0, 0)
30
DEF(rotl_i32, 1, 2, 0, 0)
31
DEF(rotr_i32, 1, 2, 0, 0)
32
DEF(deposit_i32, 1, 2, 2, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
-DEF(sar_i64, 1, 2, 0, 0)
38
DEF(rotl_i64, 1, 2, 0, 0)
39
DEF(rotr_i64, 1, 2, 0, 0)
40
DEF(deposit_i64, 1, 2, 2, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
}
47
return (uint64_t)x >> (y & 63);
48
49
- case INDEX_op_sar_i32:
50
- return (int32_t)x >> (y & 31);
51
-
52
- case INDEX_op_sar_i64:
53
+ case INDEX_op_sar:
54
+ if (type == TCG_TYPE_I32) {
55
+ return (int32_t)x >> (y & 31);
56
+ }
57
return (int64_t)x >> (y & 63);
58
59
case INDEX_op_rotr_i32:
60
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
61
}
62
63
switch (op->opc) {
64
- CASE_OP_32_64(sar):
65
+ case INDEX_op_sar:
66
/*
67
* Arithmetic right shift will not reduce the number of
68
* input sign repetitions.
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
break;
71
CASE_OP_32_64(rotl):
72
CASE_OP_32_64(rotr):
73
- CASE_OP_32_64(sar):
74
+ case INDEX_op_sar:
75
case INDEX_op_shl:
76
case INDEX_op_shr:
77
done = fold_shift(&ctx, op);
78
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/tcg/tcg-op.c
81
+++ b/tcg/tcg-op.c
82
@@ -XXX,XX +XXX,XX @@ void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
83
84
void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
85
{
86
- tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
87
+ tcg_gen_op3_i32(INDEX_op_sar, ret, arg1, arg2);
88
}
89
90
void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
91
@@ -XXX,XX +XXX,XX @@ void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
92
void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
93
{
94
if (TCG_TARGET_REG_BITS == 64) {
95
- tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
96
+ tcg_gen_op3_i64(INDEX_op_sar, ret, arg1, arg2);
97
} else {
98
gen_helper_sar_i64(ret, arg1, arg2);
99
}
100
diff --git a/tcg/tcg.c b/tcg/tcg.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg.c
103
+++ b/tcg/tcg.c
104
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
105
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
106
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
107
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
108
- OUTOP(INDEX_op_sar_i32, TCGOutOpBinary, outop_sar),
109
- OUTOP(INDEX_op_sar_i64, TCGOutOpBinary, outop_sar),
110
+ OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
111
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
112
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
113
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
114
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
115
case INDEX_op_orc:
116
case INDEX_op_rems:
117
case INDEX_op_remu:
118
- case INDEX_op_sar_i32:
119
- case INDEX_op_sar_i64:
120
+ case INDEX_op_sar:
121
case INDEX_op_shl:
122
case INDEX_op_shr:
123
case INDEX_op_xor:
124
diff --git a/tcg/tci.c b/tcg/tci.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/tci.c
127
+++ b/tcg/tci.c
128
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
129
tci_args_rrr(insn, &r0, &r1, &r2);
130
regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS);
131
break;
132
- case INDEX_op_sar_i32:
133
+ case INDEX_op_sar:
134
tci_args_rrr(insn, &r0, &r1, &r2);
135
- regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
136
+ regs[r0] = ((tcg_target_long)regs[r1]
137
+ >> (regs[r2] % TCG_TARGET_REG_BITS));
138
break;
139
#if TCG_TARGET_HAS_rot_i32
140
case INDEX_op_rotl_i32:
141
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
142
143
/* Shift/rotate operations (64 bit). */
144
145
- case INDEX_op_sar_i64:
146
- tci_args_rrr(insn, &r0, &r1, &r2);
147
- regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
148
- break;
149
#if TCG_TARGET_HAS_rot_i64
150
case INDEX_op_rotl_i64:
151
tci_args_rrr(insn, &r0, &r1, &r2);
152
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
153
case INDEX_op_orc:
154
case INDEX_op_rems:
155
case INDEX_op_remu:
156
+ case INDEX_op_sar:
157
case INDEX_op_shl:
158
case INDEX_op_shr:
159
case INDEX_op_sub:
160
case INDEX_op_xor:
161
- case INDEX_op_sar_i32:
162
- case INDEX_op_sar_i64:
163
case INDEX_op_rotl_i32:
164
case INDEX_op_rotl_i64:
165
case INDEX_op_rotr_i32:
166
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
167
index XXXXXXX..XXXXXXX 100644
168
--- a/docs/devel/tcg-ops.rst
169
+++ b/docs/devel/tcg-ops.rst
170
@@ -XXX,XX +XXX,XX @@ Shifts/Rotates
171
- | *t0* = *t1* >> *t2* (unsigned)
172
| Unspecified behavior for negative or out-of-range shifts.
173
174
- * - sar_i32/i64 *t0*, *t1*, *t2*
175
+ * - sar *t0*, *t1*, *t2*
176
177
- | *t0* = *t1* >> *t2* (signed)
178
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
179
+ | Unspecified behavior for negative or out-of-range shifts.
180
181
* - rotl_i32/i64 *t0*, *t1*, *t2*
182
183
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/tci/tcg-target.c.inc
186
+++ b/tcg/tci/tcg-target.c.inc
187
@@ -XXX,XX +XXX,XX @@ static void tgen_sar(TCGContext *s, TCGType type,
188
tcg_out_ext32s(s, TCG_REG_TMP, a1);
189
a1 = TCG_REG_TMP;
190
}
191
- tcg_out_op_rrr(s, glue(INDEX_op_sar_i,TCG_TARGET_REG_BITS), a0, a1, a2);
192
+ tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
193
}
194
195
static const TCGOutOpBinary outop_sar = {
196
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
197
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
198
break;
199
200
- CASE_32_64(sar)
201
CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
202
CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
203
CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
204
--
205
2.43.0
206
207
diff view generated by jsdifflib
Deleted patch
1
Many host architectures do not implement both rotate right
2
and rotate left and require the compiler to negate the
3
shift count to rotate the opposite direction. We have been
4
requiring the backend to perform this transformation.
5
Do this during opcode expansion so that the next patch
6
can drop support where possible in the backend.
7
1
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/tcg-op.c | 98 +++++++++++++++++++++++++++++-----------------------
12
1 file changed, 54 insertions(+), 44 deletions(-)
13
14
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg-op.c
17
+++ b/tcg/tcg-op.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
19
20
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
21
{
22
- if (TCG_TARGET_HAS_rot_i32) {
23
+ if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
24
tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
25
+ } else if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
26
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
27
+ tcg_gen_neg_i32(t0, arg2);
28
+ tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, t0);
29
+ tcg_temp_free_i32(t0);
30
} else {
31
- TCGv_i32 t0, t1;
32
-
33
- t0 = tcg_temp_ebb_new_i32();
34
- t1 = tcg_temp_ebb_new_i32();
35
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
36
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
37
tcg_gen_shl_i32(t0, arg1, arg2);
38
- tcg_gen_subfi_i32(t1, 32, arg2);
39
+ tcg_gen_neg_i32(t1, arg2);
40
tcg_gen_shr_i32(t1, arg1, t1);
41
tcg_gen_or_i32(ret, t0, t1);
42
tcg_temp_free_i32(t0);
43
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
44
/* some cases can be optimized here */
45
if (arg2 == 0) {
46
tcg_gen_mov_i32(ret, arg1);
47
- } else if (TCG_TARGET_HAS_rot_i32) {
48
- tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2));
49
+ } else if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
50
+ TCGv_i32 t0 = tcg_constant_i32(arg2);
51
+ tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, t0);
52
+ } else if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
53
+ TCGv_i32 t0 = tcg_constant_i32(32 - arg2);
54
+ tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, t0);
55
} else {
56
- TCGv_i32 t0, t1;
57
- t0 = tcg_temp_ebb_new_i32();
58
- t1 = tcg_temp_ebb_new_i32();
59
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
60
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
61
tcg_gen_shli_i32(t0, arg1, arg2);
62
tcg_gen_shri_i32(t1, arg1, 32 - arg2);
63
tcg_gen_or_i32(ret, t0, t1);
64
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
65
66
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
67
{
68
- if (TCG_TARGET_HAS_rot_i32) {
69
+ if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
70
tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
71
+ } else if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
72
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
73
+ tcg_gen_neg_i32(t0, arg2);
74
+ tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, t0);
75
+ tcg_temp_free_i32(t0);
76
} else {
77
- TCGv_i32 t0, t1;
78
-
79
- t0 = tcg_temp_ebb_new_i32();
80
- t1 = tcg_temp_ebb_new_i32();
81
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
82
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
83
tcg_gen_shr_i32(t0, arg1, arg2);
84
- tcg_gen_subfi_i32(t1, 32, arg2);
85
+ tcg_gen_neg_i32(t1, arg2);
86
tcg_gen_shl_i32(t1, arg1, t1);
87
tcg_gen_or_i32(ret, t0, t1);
88
tcg_temp_free_i32(t0);
89
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
90
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
91
{
92
tcg_debug_assert(arg2 >= 0 && arg2 < 32);
93
- /* some cases can be optimized here */
94
- if (arg2 == 0) {
95
- tcg_gen_mov_i32(ret, arg1);
96
- } else {
97
- tcg_gen_rotli_i32(ret, arg1, 32 - arg2);
98
- }
99
+ tcg_gen_rotli_i32(ret, arg1, -arg2 & 31);
100
}
101
102
void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
103
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
104
105
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
106
{
107
- if (TCG_TARGET_HAS_rot_i64) {
108
+ if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
109
tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
110
+ } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
111
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
112
+ tcg_gen_neg_i64(t0, arg2);
113
+ tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, t0);
114
+ tcg_temp_free_i64(t0);
115
} else {
116
- TCGv_i64 t0, t1;
117
- t0 = tcg_temp_ebb_new_i64();
118
- t1 = tcg_temp_ebb_new_i64();
119
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
120
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
121
tcg_gen_shl_i64(t0, arg1, arg2);
122
- tcg_gen_subfi_i64(t1, 64, arg2);
123
+ tcg_gen_neg_i64(t1, arg2);
124
tcg_gen_shr_i64(t1, arg1, t1);
125
tcg_gen_or_i64(ret, t0, t1);
126
tcg_temp_free_i64(t0);
127
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
128
/* some cases can be optimized here */
129
if (arg2 == 0) {
130
tcg_gen_mov_i64(ret, arg1);
131
- } else if (TCG_TARGET_HAS_rot_i64) {
132
- tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2));
133
+ } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
134
+ TCGv_i64 t0 = tcg_constant_i64(arg2);
135
+ tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, t0);
136
+ } else if (tcg_op_supported(INDEX_op_rotr_i64, TCG_TYPE_I64, 0)) {
137
+ TCGv_i64 t0 = tcg_constant_i64(64 - arg2);
138
+ tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, t0);
139
} else {
140
- TCGv_i64 t0, t1;
141
- t0 = tcg_temp_ebb_new_i64();
142
- t1 = tcg_temp_ebb_new_i64();
143
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
144
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
145
tcg_gen_shli_i64(t0, arg1, arg2);
146
tcg_gen_shri_i64(t1, arg1, 64 - arg2);
147
tcg_gen_or_i64(ret, t0, t1);
148
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
149
150
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
151
{
152
- if (TCG_TARGET_HAS_rot_i64) {
153
+ if (tcg_op_supported(INDEX_op_rotr_i64, TCG_TYPE_I64, 0)) {
154
tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
155
+ } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
156
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
157
+ tcg_gen_neg_i64(t0, arg2);
158
+ tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, t0);
159
+ tcg_temp_free_i64(t0);
160
} else {
161
- TCGv_i64 t0, t1;
162
- t0 = tcg_temp_ebb_new_i64();
163
- t1 = tcg_temp_ebb_new_i64();
164
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
165
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
166
tcg_gen_shr_i64(t0, arg1, arg2);
167
- tcg_gen_subfi_i64(t1, 64, arg2);
168
+ tcg_gen_neg_i64(t1, arg2);
169
tcg_gen_shl_i64(t1, arg1, t1);
170
tcg_gen_or_i64(ret, t0, t1);
171
tcg_temp_free_i64(t0);
172
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
173
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
174
{
175
tcg_debug_assert(arg2 >= 0 && arg2 < 64);
176
- /* some cases can be optimized here */
177
- if (arg2 == 0) {
178
- tcg_gen_mov_i64(ret, arg1);
179
- } else {
180
- tcg_gen_rotli_i64(ret, arg1, 64 - arg2);
181
- }
182
+ tcg_gen_rotli_i64(ret, arg1, -arg2 & 63);
183
}
184
185
void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
186
--
187
2.43.0
diff view generated by jsdifflib
Deleted patch
1
For aarch64, arm, loongarch64, mips, we can drop rotl.
2
For ppc, s390x we can drop rotr.
3
Only x86, riscv, tci have both rotl and rotr.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target-has.h | 2 -
9
tcg/arm/tcg-target-has.h | 1 -
10
tcg/i386/tcg-target-has.h | 2 -
11
tcg/loongarch64/tcg-target-has.h | 2 -
12
tcg/mips/tcg-target-has.h | 2 -
13
tcg/ppc/tcg-target-has.h | 2 -
14
tcg/riscv/tcg-target-has.h | 2 -
15
tcg/s390x/tcg-target-has.h | 2 -
16
tcg/sparc64/tcg-target-has.h | 2 -
17
tcg/tcg-has.h | 1 -
18
tcg/tci/tcg-target-has.h | 2 -
19
tcg/tcg.c | 14 +++---
20
tcg/tci.c | 12 ++---
21
tcg/aarch64/tcg-target.c.inc | 62 +++++++++---------------
22
tcg/arm/tcg-target.c.inc | 44 ++++++++---------
23
tcg/i386/tcg-target.c.inc | 62 ++++++++++++++++--------
24
tcg/loongarch64/tcg-target.c.inc | 70 ++++++++++++---------------
25
tcg/mips/tcg-target.c.inc | 75 +++++++++++++----------------
26
tcg/ppc/tcg-target.c.inc | 70 ++++++++++++---------------
27
tcg/riscv/tcg-target.c.inc | 83 ++++++++++++++++++--------------
28
tcg/s390x/tcg-target.c.inc | 72 +++++++++++----------------
29
tcg/sparc64/tcg-target.c.inc | 8 +++
30
tcg/tci/tcg-target-opc.h.inc | 2 +
31
tcg/tci/tcg-target.c.inc | 34 ++++++++++---
32
24 files changed, 306 insertions(+), 322 deletions(-)
33
34
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tcg/aarch64/tcg-target-has.h
37
+++ b/tcg/aarch64/tcg-target-has.h
38
@@ -XXX,XX +XXX,XX @@
39
/* optional instructions */
40
#define TCG_TARGET_HAS_bswap16_i32 1
41
#define TCG_TARGET_HAS_bswap32_i32 1
42
-#define TCG_TARGET_HAS_rot_i32 1
43
#define TCG_TARGET_HAS_clz_i32 1
44
#define TCG_TARGET_HAS_ctz_i32 1
45
#define TCG_TARGET_HAS_ctpop_i32 0
46
@@ -XXX,XX +XXX,XX @@
47
#define TCG_TARGET_HAS_bswap16_i64 1
48
#define TCG_TARGET_HAS_bswap32_i64 1
49
#define TCG_TARGET_HAS_bswap64_i64 1
50
-#define TCG_TARGET_HAS_rot_i64 1
51
#define TCG_TARGET_HAS_clz_i64 1
52
#define TCG_TARGET_HAS_ctz_i64 1
53
#define TCG_TARGET_HAS_ctpop_i64 0
54
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/arm/tcg-target-has.h
57
+++ b/tcg/arm/tcg-target-has.h
58
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
59
/* optional instructions */
60
#define TCG_TARGET_HAS_bswap16_i32 1
61
#define TCG_TARGET_HAS_bswap32_i32 1
62
-#define TCG_TARGET_HAS_rot_i32 1
63
#define TCG_TARGET_HAS_clz_i32 1
64
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
65
#define TCG_TARGET_HAS_ctpop_i32 0
66
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/i386/tcg-target-has.h
69
+++ b/tcg/i386/tcg-target-has.h
70
@@ -XXX,XX +XXX,XX @@
71
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
72
73
/* optional instructions */
74
-#define TCG_TARGET_HAS_rot_i32 1
75
#define TCG_TARGET_HAS_bswap16_i32 1
76
#define TCG_TARGET_HAS_bswap32_i32 1
77
#define TCG_TARGET_HAS_clz_i32 1
78
@@ -XXX,XX +XXX,XX @@
79
#if TCG_TARGET_REG_BITS == 64
80
/* Keep 32-bit values zero-extended in a register. */
81
#define TCG_TARGET_HAS_extr_i64_i32 1
82
-#define TCG_TARGET_HAS_rot_i64 1
83
#define TCG_TARGET_HAS_bswap16_i64 1
84
#define TCG_TARGET_HAS_bswap32_i64 1
85
#define TCG_TARGET_HAS_bswap64_i64 1
86
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/loongarch64/tcg-target-has.h
89
+++ b/tcg/loongarch64/tcg-target-has.h
90
@@ -XXX,XX +XXX,XX @@
91
92
/* optional instructions */
93
#define TCG_TARGET_HAS_negsetcond_i32 0
94
-#define TCG_TARGET_HAS_rot_i32 1
95
#define TCG_TARGET_HAS_extract2_i32 0
96
#define TCG_TARGET_HAS_add2_i32 0
97
#define TCG_TARGET_HAS_sub2_i32 0
98
@@ -XXX,XX +XXX,XX @@
99
100
/* 64-bit operations */
101
#define TCG_TARGET_HAS_negsetcond_i64 0
102
-#define TCG_TARGET_HAS_rot_i64 1
103
#define TCG_TARGET_HAS_extract2_i64 0
104
#define TCG_TARGET_HAS_extr_i64_i32 1
105
#define TCG_TARGET_HAS_bswap16_i64 1
106
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/mips/tcg-target-has.h
109
+++ b/tcg/mips/tcg-target-has.h
110
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
111
112
/* optional instructions detected at runtime */
113
#define TCG_TARGET_HAS_extract2_i32 0
114
-#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
115
#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
116
#define TCG_TARGET_HAS_ctz_i32 0
117
#define TCG_TARGET_HAS_ctpop_i32 0
118
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
119
#define TCG_TARGET_HAS_bswap32_i64 1
120
#define TCG_TARGET_HAS_bswap64_i64 1
121
#define TCG_TARGET_HAS_extract2_i64 0
122
-#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
123
#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
124
#define TCG_TARGET_HAS_ctz_i64 0
125
#define TCG_TARGET_HAS_ctpop_i64 0
126
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/ppc/tcg-target-has.h
129
+++ b/tcg/ppc/tcg-target-has.h
130
@@ -XXX,XX +XXX,XX @@
131
#define have_vsx (cpuinfo & CPUINFO_VSX)
132
133
/* optional instructions */
134
-#define TCG_TARGET_HAS_rot_i32 1
135
#define TCG_TARGET_HAS_bswap16_i32 1
136
#define TCG_TARGET_HAS_bswap32_i32 1
137
#define TCG_TARGET_HAS_clz_i32 1
138
@@ -XXX,XX +XXX,XX @@
139
#define TCG_TARGET_HAS_add2_i32 0
140
#define TCG_TARGET_HAS_sub2_i32 0
141
#define TCG_TARGET_HAS_extr_i64_i32 0
142
-#define TCG_TARGET_HAS_rot_i64 1
143
#define TCG_TARGET_HAS_bswap16_i64 1
144
#define TCG_TARGET_HAS_bswap32_i64 1
145
#define TCG_TARGET_HAS_bswap64_i64 1
146
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
147
index XXXXXXX..XXXXXXX 100644
148
--- a/tcg/riscv/tcg-target-has.h
149
+++ b/tcg/riscv/tcg-target-has.h
150
@@ -XXX,XX +XXX,XX @@
151
152
/* optional instructions */
153
#define TCG_TARGET_HAS_negsetcond_i32 1
154
-#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
155
#define TCG_TARGET_HAS_extract2_i32 0
156
#define TCG_TARGET_HAS_add2_i32 1
157
#define TCG_TARGET_HAS_sub2_i32 1
158
@@ -XXX,XX +XXX,XX @@
159
#define TCG_TARGET_HAS_qemu_st8_i32 0
160
161
#define TCG_TARGET_HAS_negsetcond_i64 1
162
-#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
163
#define TCG_TARGET_HAS_extract2_i64 0
164
#define TCG_TARGET_HAS_extr_i64_i32 1
165
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
166
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
167
index XXXXXXX..XXXXXXX 100644
168
--- a/tcg/s390x/tcg-target-has.h
169
+++ b/tcg/s390x/tcg-target-has.h
170
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
171
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
172
173
/* optional instructions */
174
-#define TCG_TARGET_HAS_rot_i32 1
175
#define TCG_TARGET_HAS_bswap16_i32 1
176
#define TCG_TARGET_HAS_bswap32_i32 1
177
#define TCG_TARGET_HAS_clz_i32 0
178
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
179
#define TCG_TARGET_HAS_extr_i64_i32 0
180
#define TCG_TARGET_HAS_qemu_st8_i32 0
181
182
-#define TCG_TARGET_HAS_rot_i64 1
183
#define TCG_TARGET_HAS_bswap16_i64 1
184
#define TCG_TARGET_HAS_bswap32_i64 1
185
#define TCG_TARGET_HAS_bswap64_i64 1
186
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/sparc64/tcg-target-has.h
189
+++ b/tcg/sparc64/tcg-target-has.h
190
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
191
#endif
192
193
/* optional instructions */
194
-#define TCG_TARGET_HAS_rot_i32 0
195
#define TCG_TARGET_HAS_bswap16_i32 0
196
#define TCG_TARGET_HAS_bswap32_i32 0
197
#define TCG_TARGET_HAS_clz_i32 0
198
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
199
#define TCG_TARGET_HAS_qemu_st8_i32 0
200
201
#define TCG_TARGET_HAS_extr_i64_i32 0
202
-#define TCG_TARGET_HAS_rot_i64 0
203
#define TCG_TARGET_HAS_bswap16_i64 0
204
#define TCG_TARGET_HAS_bswap32_i64 0
205
#define TCG_TARGET_HAS_bswap64_i64 0
206
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tcg/tcg-has.h
209
+++ b/tcg/tcg-has.h
210
@@ -XXX,XX +XXX,XX @@
211
#if TCG_TARGET_REG_BITS == 32
212
/* Turn some undef macros into false macros. */
213
#define TCG_TARGET_HAS_extr_i64_i32 0
214
-#define TCG_TARGET_HAS_rot_i64 0
215
#define TCG_TARGET_HAS_bswap16_i64 0
216
#define TCG_TARGET_HAS_bswap32_i64 0
217
#define TCG_TARGET_HAS_bswap64_i64 0
218
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/tci/tcg-target-has.h
221
+++ b/tcg/tci/tcg-target-has.h
222
@@ -XXX,XX +XXX,XX @@
223
#define TCG_TARGET_HAS_clz_i32 1
224
#define TCG_TARGET_HAS_ctz_i32 1
225
#define TCG_TARGET_HAS_ctpop_i32 1
226
-#define TCG_TARGET_HAS_rot_i32 1
227
#define TCG_TARGET_HAS_negsetcond_i32 0
228
#define TCG_TARGET_HAS_muls2_i32 1
229
#define TCG_TARGET_HAS_qemu_st8_i32 0
230
@@ -XXX,XX +XXX,XX @@
231
#define TCG_TARGET_HAS_clz_i64 1
232
#define TCG_TARGET_HAS_ctz_i64 1
233
#define TCG_TARGET_HAS_ctpop_i64 1
234
-#define TCG_TARGET_HAS_rot_i64 1
235
#define TCG_TARGET_HAS_negsetcond_i64 0
236
#define TCG_TARGET_HAS_muls2_i64 1
237
#define TCG_TARGET_HAS_add2_i32 1
238
diff --git a/tcg/tcg.c b/tcg/tcg.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/tcg/tcg.c
241
+++ b/tcg/tcg.c
242
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
243
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
244
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
245
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
246
+ OUTOP(INDEX_op_rotl_i32, TCGOutOpBinary, outop_rotl),
247
+ OUTOP(INDEX_op_rotl_i64, TCGOutOpBinary, outop_rotl),
248
+ OUTOP(INDEX_op_rotr_i32, TCGOutOpBinary, outop_rotr),
249
+ OUTOP(INDEX_op_rotr_i64, TCGOutOpBinary, outop_rotr),
250
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
251
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
252
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
253
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
254
255
case INDEX_op_negsetcond_i32:
256
return TCG_TARGET_HAS_negsetcond_i32;
257
- case INDEX_op_rotl_i32:
258
- case INDEX_op_rotr_i32:
259
- return TCG_TARGET_HAS_rot_i32;
260
case INDEX_op_extract2_i32:
261
return TCG_TARGET_HAS_extract2_i32;
262
case INDEX_op_add2_i32:
263
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
264
265
case INDEX_op_negsetcond_i64:
266
return TCG_TARGET_HAS_negsetcond_i64;
267
- case INDEX_op_rotl_i64:
268
- case INDEX_op_rotr_i64:
269
- return TCG_TARGET_HAS_rot_i64;
270
case INDEX_op_extract2_i64:
271
return TCG_TARGET_HAS_extract2_i64;
272
case INDEX_op_extrl_i64_i32:
273
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
274
case INDEX_op_orc:
275
case INDEX_op_rems:
276
case INDEX_op_remu:
277
+ case INDEX_op_rotl_i32:
278
+ case INDEX_op_rotl_i64:
279
+ case INDEX_op_rotr_i32:
280
+ case INDEX_op_rotr_i64:
281
case INDEX_op_sar:
282
case INDEX_op_shl:
283
case INDEX_op_shr:
284
diff --git a/tcg/tci.c b/tcg/tci.c
285
index XXXXXXX..XXXXXXX 100644
286
--- a/tcg/tci.c
287
+++ b/tcg/tci.c
288
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
289
regs[r0] = ((tcg_target_long)regs[r1]
290
>> (regs[r2] % TCG_TARGET_REG_BITS));
291
break;
292
-#if TCG_TARGET_HAS_rot_i32
293
- case INDEX_op_rotl_i32:
294
+ case INDEX_op_tci_rotl32:
295
tci_args_rrr(insn, &r0, &r1, &r2);
296
regs[r0] = rol32(regs[r1], regs[r2] & 31);
297
break;
298
- case INDEX_op_rotr_i32:
299
+ case INDEX_op_tci_rotr32:
300
tci_args_rrr(insn, &r0, &r1, &r2);
301
regs[r0] = ror32(regs[r1], regs[r2] & 31);
302
break;
303
-#endif
304
case INDEX_op_deposit_i32:
305
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
306
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
307
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
308
309
/* Shift/rotate operations (64 bit). */
310
311
-#if TCG_TARGET_HAS_rot_i64
312
case INDEX_op_rotl_i64:
313
tci_args_rrr(insn, &r0, &r1, &r2);
314
regs[r0] = rol64(regs[r1], regs[r2] & 63);
315
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
316
tci_args_rrr(insn, &r0, &r1, &r2);
317
regs[r0] = ror64(regs[r1], regs[r2] & 63);
318
break;
319
-#endif
320
case INDEX_op_deposit_i64:
321
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
322
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
323
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
324
case INDEX_op_shr:
325
case INDEX_op_sub:
326
case INDEX_op_xor:
327
- case INDEX_op_rotl_i32:
328
case INDEX_op_rotl_i64:
329
- case INDEX_op_rotr_i32:
330
case INDEX_op_rotr_i64:
331
case INDEX_op_clz_i32:
332
case INDEX_op_clz_i64:
333
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
334
case INDEX_op_tci_divu32:
335
case INDEX_op_tci_rems32:
336
case INDEX_op_tci_remu32:
337
+ case INDEX_op_tci_rotl32:
338
+ case INDEX_op_tci_rotr32:
339
tci_args_rrr(insn, &r0, &r1, &r2);
340
info->fprintf_func(info->stream, "%-12s %s, %s, %s",
341
op_name, str_r(r0), str_r(r1), str_r(r2));
342
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
343
index XXXXXXX..XXXXXXX 100644
344
--- a/tcg/aarch64/tcg-target.c.inc
345
+++ b/tcg/aarch64/tcg-target.c.inc
346
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
347
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
348
}
349
350
-static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
351
- TCGReg rd, TCGReg rn, unsigned int m)
352
-{
353
- int max = ext ? 63 : 31;
354
- tcg_out_extr(s, ext, rd, rn, rn, m & max);
355
-}
356
-
357
-static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
358
- TCGReg rd, TCGReg rn, unsigned int m)
359
-{
360
- int max = ext ? 63 : 31;
361
- tcg_out_extr(s, ext, rd, rn, rn, -m & max);
362
-}
363
-
364
static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
365
TCGReg rn, unsigned lsb, unsigned width)
366
{
367
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
368
.out_rrr = tgen_remu,
369
};
370
371
+static const TCGOutOpBinary outop_rotl = {
372
+ .base.static_constraint = C_NotImplemented,
373
+};
374
+
375
+static void tgen_rotr(TCGContext *s, TCGType type,
376
+ TCGReg a0, TCGReg a1, TCGReg a2)
377
+{
378
+ tcg_out_insn(s, 3508, RORV, type, a0, a1, a2);
379
+}
380
+
381
+static void tgen_rotri(TCGContext *s, TCGType type,
382
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
383
+{
384
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
385
+ tcg_out_extr(s, type, a0, a1, a1, a2 & max);
386
+}
387
+
388
+static const TCGOutOpBinary outop_rotr = {
389
+ .base.static_constraint = C_O1_I2(r, r, ri),
390
+ .out_rrr = tgen_rotr,
391
+ .out_rri = tgen_rotri,
392
+};
393
+
394
static void tgen_sar(TCGContext *s, TCGType type,
395
TCGReg a0, TCGReg a1, TCGReg a2)
396
{
397
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
398
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
399
break;
400
401
- case INDEX_op_rotr_i64:
402
- case INDEX_op_rotr_i32:
403
- if (c2) {
404
- tcg_out_rotr(s, ext, a0, a1, a2);
405
- } else {
406
- tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
407
- }
408
- break;
409
-
410
- case INDEX_op_rotl_i64:
411
- case INDEX_op_rotl_i32:
412
- if (c2) {
413
- tcg_out_rotl(s, ext, a0, a1, a2);
414
- } else {
415
- tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP0, TCG_REG_XZR, a2);
416
- tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP0);
417
- }
418
- break;
419
-
420
case INDEX_op_clz_i64:
421
case INDEX_op_clz_i32:
422
tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
423
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
424
case INDEX_op_negsetcond_i64:
425
return C_O1_I2(r, r, rC);
426
427
- case INDEX_op_rotl_i32:
428
- case INDEX_op_rotr_i32:
429
- case INDEX_op_rotl_i64:
430
- case INDEX_op_rotr_i64:
431
- return C_O1_I2(r, r, ri);
432
-
433
case INDEX_op_clz_i32:
434
case INDEX_op_ctz_i32:
435
case INDEX_op_clz_i64:
436
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
437
index XXXXXXX..XXXXXXX 100644
438
--- a/tcg/arm/tcg-target.c.inc
439
+++ b/tcg/arm/tcg-target.c.inc
440
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
441
.base.static_constraint = C_NotImplemented,
442
};
443
444
+static const TCGOutOpBinary outop_rotl = {
445
+ .base.static_constraint = C_NotImplemented,
446
+};
447
+
448
+static void tgen_rotr(TCGContext *s, TCGType type,
449
+ TCGReg a0, TCGReg a1, TCGReg a2)
450
+{
451
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2));
452
+}
453
+
454
+static void tgen_rotri(TCGContext *s, TCGType type,
455
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
456
+{
457
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f));
458
+}
459
+
460
+static const TCGOutOpBinary outop_rotr = {
461
+ .base.static_constraint = C_O1_I2(r, r, ri),
462
+ .out_rrr = tgen_rotr,
463
+ .out_rri = tgen_rotri,
464
+};
465
+
466
static void tgen_sar(TCGContext *s, TCGType type,
467
TCGReg a0, TCGReg a1, TCGReg a2)
468
{
469
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
470
case INDEX_op_muls2_i32:
471
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
472
break;
473
- case INDEX_op_rotr_i32:
474
- c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
475
- SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
476
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
477
- break;
478
-
479
- case INDEX_op_rotl_i32:
480
- if (const_args[2]) {
481
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
482
- ((0x20 - args[2]) & 0x1f) ?
483
- SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
484
- SHIFT_IMM_LSL(0));
485
- } else {
486
- tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
487
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
488
- SHIFT_REG_ROR(TCG_REG_TMP));
489
- }
490
- break;
491
492
case INDEX_op_ctz_i32:
493
tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
494
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
495
case INDEX_op_muls2_i32:
496
return C_O2_I2(r, r, r, r);
497
498
- case INDEX_op_rotl_i32:
499
- case INDEX_op_rotr_i32:
500
- return C_O1_I2(r, r, ri);
501
-
502
case INDEX_op_brcond_i32:
503
return C_O0_I2(r, rIN);
504
case INDEX_op_deposit_i32:
505
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
506
index XXXXXXX..XXXXXXX 100644
507
--- a/tcg/i386/tcg-target.c.inc
508
+++ b/tcg/i386/tcg-target.c.inc
509
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
510
.base.static_constraint = C_NotImplemented,
511
};
512
513
+static void tgen_rotl(TCGContext *s, TCGType type,
514
+ TCGReg a0, TCGReg a1, TCGReg a2)
515
+{
516
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
517
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROL, a0);
518
+}
519
+
520
+static void tgen_rotli(TCGContext *s, TCGType type,
521
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
522
+{
523
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
524
+ tcg_out_shifti(s, SHIFT_ROL + rexw, a0, a2);
525
+}
526
+
527
+static const TCGOutOpBinary outop_rotl = {
528
+ .base.static_constraint = C_O1_I2(r, 0, ci),
529
+ .out_rrr = tgen_rotl,
530
+ .out_rri = tgen_rotli,
531
+};
532
+
533
+static void tgen_rotr(TCGContext *s, TCGType type,
534
+ TCGReg a0, TCGReg a1, TCGReg a2)
535
+{
536
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
537
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, SHIFT_ROR, a0);
538
+}
539
+
540
+static void tgen_rotri(TCGContext *s, TCGType type,
541
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
542
+{
543
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
544
+ tcg_out_shifti(s, SHIFT_ROR + rexw, a0, a2);
545
+}
546
+
547
+static const TCGOutOpBinary outop_rotr = {
548
+ .base.static_constraint = C_O1_I2(r, 0, ci),
549
+ .out_rrr = tgen_rotr,
550
+ .out_rri = tgen_rotri,
551
+};
552
+
553
static TCGConstraintSetIndex cset_shift(TCGType type, unsigned flags)
554
{
555
return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
556
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
557
const int const_args[TCG_MAX_OP_ARGS])
558
{
559
TCGArg a0, a1, a2;
560
- int c, const_a2, rexw;
561
+ int const_a2, rexw;
562
563
#if TCG_TARGET_REG_BITS == 64
564
# define OP_32_64(x) \
565
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
566
}
567
break;
568
569
- OP_32_64(rotl):
570
- c = SHIFT_ROL;
571
- goto gen_shift;
572
- OP_32_64(rotr):
573
- c = SHIFT_ROR;
574
- goto gen_shift;
575
- gen_shift:
576
- if (const_a2) {
577
- tcg_out_shifti(s, c + rexw, a0, a2);
578
- } else {
579
- tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
580
- }
581
- break;
582
-
583
OP_32_64(ctz):
584
tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
585
break;
586
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
587
case INDEX_op_st_i64:
588
return C_O0_I2(re, r);
589
590
- case INDEX_op_rotl_i32:
591
- case INDEX_op_rotl_i64:
592
- case INDEX_op_rotr_i32:
593
- case INDEX_op_rotr_i64:
594
- return C_O1_I2(r, 0, ci);
595
-
596
case INDEX_op_brcond_i32:
597
case INDEX_op_brcond_i64:
598
return C_O0_I2(r, reT);
599
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
600
index XXXXXXX..XXXXXXX 100644
601
--- a/tcg/loongarch64/tcg-target.c.inc
602
+++ b/tcg/loongarch64/tcg-target.c.inc
603
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
604
.out_rrr = tgen_remu,
605
};
606
607
+static const TCGOutOpBinary outop_rotl = {
608
+ .base.static_constraint = C_NotImplemented,
609
+};
610
+
611
+static void tgen_rotr(TCGContext *s, TCGType type,
612
+ TCGReg a0, TCGReg a1, TCGReg a2)
613
+{
614
+ if (type == TCG_TYPE_I32) {
615
+ tcg_out_opc_rotr_w(s, a0, a1, a2);
616
+ } else {
617
+ tcg_out_opc_rotr_d(s, a0, a1, a2);
618
+ }
619
+}
620
+
621
+static void tgen_rotri(TCGContext *s, TCGType type,
622
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
623
+{
624
+ if (type == TCG_TYPE_I32) {
625
+ tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
626
+ } else {
627
+ tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
628
+ }
629
+}
630
+
631
+static const TCGOutOpBinary outop_rotr = {
632
+ .base.static_constraint = C_O1_I2(r, r, ri),
633
+ .out_rrr = tgen_rotr,
634
+ .out_rri = tgen_rotri,
635
+};
636
+
637
static void tgen_sar(TCGContext *s, TCGType type,
638
TCGReg a0, TCGReg a1, TCGReg a2)
639
{
640
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
641
tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
642
break;
643
644
- case INDEX_op_rotl_i32:
645
- /* transform into equivalent rotr/rotri */
646
- if (c2) {
647
- tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
648
- } else {
649
- tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
650
- tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
651
- }
652
- break;
653
- case INDEX_op_rotl_i64:
654
- /* transform into equivalent rotr/rotri */
655
- if (c2) {
656
- tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
657
- } else {
658
- tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
659
- tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
660
- }
661
- break;
662
-
663
- case INDEX_op_rotr_i32:
664
- if (c2) {
665
- tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
666
- } else {
667
- tcg_out_opc_rotr_w(s, a0, a1, a2);
668
- }
669
- break;
670
- case INDEX_op_rotr_i64:
671
- if (c2) {
672
- tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
673
- } else {
674
- tcg_out_opc_rotr_d(s, a0, a1, a2);
675
- }
676
- break;
677
-
678
case INDEX_op_setcond_i32:
679
case INDEX_op_setcond_i64:
680
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
681
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
682
case INDEX_op_qemu_ld_i64:
683
return C_O1_I1(r, r);
684
685
- case INDEX_op_rotl_i32:
686
- case INDEX_op_rotl_i64:
687
- case INDEX_op_rotr_i32:
688
- case INDEX_op_rotr_i64:
689
- return C_O1_I2(r, r, ri);
690
-
691
case INDEX_op_clz_i32:
692
case INDEX_op_clz_i64:
693
case INDEX_op_ctz_i32:
694
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
695
index XXXXXXX..XXXXXXX 100644
696
--- a/tcg/mips/tcg-target.c.inc
697
+++ b/tcg/mips/tcg-target.c.inc
698
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
699
.out_rrr = tgen_remu,
700
};
701
702
+static const TCGOutOpBinary outop_rotl = {
703
+ .base.static_constraint = C_NotImplemented,
704
+};
705
+
706
+static TCGConstraintSetIndex cset_rotr(TCGType type, unsigned flags)
707
+{
708
+ return use_mips32r2_instructions ? C_O1_I2(r, r, ri) : C_NotImplemented;
709
+}
710
+
711
+static void tgen_rotr(TCGContext *s, TCGType type,
712
+ TCGReg a0, TCGReg a1, TCGReg a2)
713
+{
714
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_ROTRV : OPC_DROTRV;
715
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
716
+}
717
+
718
+static void tgen_rotri(TCGContext *s, TCGType type,
719
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
720
+{
721
+ if (type == TCG_TYPE_I32) {
722
+ tcg_out_opc_sa(s, OPC_ROTR, a0, a1, a2);
723
+ } else {
724
+ tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
725
+ }
726
+}
727
+
728
+static const TCGOutOpBinary outop_rotr = {
729
+ .base.static_constraint = C_Dynamic,
730
+ .base.dynamic_constraint = cset_rotr,
731
+ .out_rrr = tgen_rotr,
732
+ .out_rri = tgen_rotri,
733
+};
734
+
735
static void tgen_sar(TCGContext *s, TCGType type,
736
TCGReg a0, TCGReg a1, TCGReg a2)
737
{
738
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
739
const TCGArg args[TCG_MAX_OP_ARGS],
740
const int const_args[TCG_MAX_OP_ARGS])
741
{
742
- MIPSInsn i1, i2;
743
+ MIPSInsn i1;
744
TCGArg a0, a1, a2;
745
- int c2;
746
747
a0 = args[0];
748
a1 = args[1];
749
a2 = args[2];
750
- c2 = const_args[2];
751
752
switch (opc) {
753
case INDEX_op_goto_ptr:
754
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
755
tcg_out_dsra(s, a0, a1, 32);
756
break;
757
758
- case INDEX_op_rotr_i32:
759
- i1 = OPC_ROTRV, i2 = OPC_ROTR;
760
- if (c2) {
761
- tcg_out_opc_sa(s, i2, a0, a1, a2);
762
- break;
763
- }
764
- do_shiftv:
765
- tcg_out_opc_reg(s, i1, a0, a2, a1);
766
- break;
767
- case INDEX_op_rotl_i32:
768
- if (c2) {
769
- tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2);
770
- } else {
771
- tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2);
772
- tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
773
- }
774
- break;
775
- case INDEX_op_rotr_i64:
776
- if (c2) {
777
- tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
778
- break;
779
- }
780
- i1 = OPC_DROTRV;
781
- goto do_shiftv;
782
- case INDEX_op_rotl_i64:
783
- if (c2) {
784
- tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2);
785
- } else {
786
- tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2);
787
- tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1);
788
- }
789
- break;
790
-
791
case INDEX_op_clz_i32:
792
tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
793
break;
794
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
795
case INDEX_op_muls2_i64:
796
case INDEX_op_mulu2_i64:
797
return C_O2_I2(r, r, r, r);
798
- case INDEX_op_rotr_i32:
799
- case INDEX_op_rotl_i32:
800
- case INDEX_op_rotr_i64:
801
- case INDEX_op_rotl_i64:
802
- return C_O1_I2(r, r, ri);
803
case INDEX_op_clz_i32:
804
case INDEX_op_clz_i64:
805
return C_O1_I2(r, r, rzW);
806
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
807
index XXXXXXX..XXXXXXX 100644
808
--- a/tcg/ppc/tcg-target.c.inc
809
+++ b/tcg/ppc/tcg-target.c.inc
810
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
811
.out_rrr = tgen_remu,
812
};
813
814
+static void tgen_rotl(TCGContext *s, TCGType type,
815
+ TCGReg a0, TCGReg a1, TCGReg a2)
816
+{
817
+ if (type == TCG_TYPE_I32) {
818
+ tcg_out32(s, RLWNM | SAB(a1, a0, a2) | MB(0) | ME(31));
819
+ } else {
820
+ tcg_out32(s, RLDCL | SAB(a1, a0, a2) | MB64(0));
821
+ }
822
+}
823
+
824
+static void tgen_rotli(TCGContext *s, TCGType type,
825
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
826
+{
827
+ if (type == TCG_TYPE_I32) {
828
+ tcg_out_rlw(s, RLWINM, a0, a1, a2, 0, 31);
829
+ } else {
830
+ tcg_out_rld(s, RLDICL, a0, a1, a2, 0);
831
+ }
832
+}
833
+
834
+static const TCGOutOpBinary outop_rotl = {
835
+ .base.static_constraint = C_O1_I2(r, r, ri),
836
+ .out_rrr = tgen_rotl,
837
+ .out_rri = tgen_rotli,
838
+};
839
+
840
+static const TCGOutOpBinary outop_rotr = {
841
+ .base.static_constraint = C_NotImplemented,
842
+};
843
+
844
static void tgen_sar(TCGContext *s, TCGType type,
845
TCGReg a0, TCGReg a1, TCGReg a2)
846
{
847
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
848
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
849
break;
850
851
- case INDEX_op_rotl_i32:
852
- if (const_args[2]) {
853
- tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
854
- } else {
855
- tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
856
- | MB(0) | ME(31));
857
- }
858
- break;
859
- case INDEX_op_rotr_i32:
860
- if (const_args[2]) {
861
- tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
862
- } else {
863
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
864
- tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
865
- | MB(0) | ME(31));
866
- }
867
- break;
868
-
869
case INDEX_op_brcond_i32:
870
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
871
arg_label(args[3]), TCG_TYPE_I32);
872
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
873
tcg_out_brcond2(s, args, const_args);
874
break;
875
876
- case INDEX_op_rotl_i64:
877
- if (const_args[2]) {
878
- tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
879
- } else {
880
- tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
881
- }
882
- break;
883
- case INDEX_op_rotr_i64:
884
- if (const_args[2]) {
885
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
886
- } else {
887
- tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
888
- tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
889
- }
890
- break;
891
-
892
case INDEX_op_qemu_ld_i32:
893
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
894
break;
895
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
896
case INDEX_op_st_i64:
897
return C_O0_I2(r, r);
898
899
- case INDEX_op_rotl_i32:
900
- case INDEX_op_rotr_i32:
901
- case INDEX_op_rotl_i64:
902
- case INDEX_op_rotr_i64:
903
- return C_O1_I2(r, r, ri);
904
-
905
case INDEX_op_clz_i32:
906
case INDEX_op_ctz_i32:
907
case INDEX_op_clz_i64:
908
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
909
index XXXXXXX..XXXXXXX 100644
910
--- a/tcg/riscv/tcg-target.c.inc
911
+++ b/tcg/riscv/tcg-target.c.inc
912
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
913
.out_rrr = tgen_remu,
914
};
915
916
+static TCGConstraintSetIndex cset_rot(TCGType type, unsigned flags)
917
+{
918
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, ri) : C_NotImplemented;
919
+}
920
+
921
+static void tgen_rotr(TCGContext *s, TCGType type,
922
+ TCGReg a0, TCGReg a1, TCGReg a2)
923
+{
924
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORW : OPC_ROR;
925
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
926
+}
927
+
928
+static void tgen_rotri(TCGContext *s, TCGType type,
929
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
930
+{
931
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORIW : OPC_RORI;
932
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
933
+ tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
934
+}
935
+
936
+static const TCGOutOpBinary outop_rotr = {
937
+ .base.static_constraint = C_Dynamic,
938
+ .base.dynamic_constraint = cset_rot,
939
+ .out_rrr = tgen_rotr,
940
+ .out_rri = tgen_rotri,
941
+};
942
+
943
+static void tgen_rotl(TCGContext *s, TCGType type,
944
+ TCGReg a0, TCGReg a1, TCGReg a2)
945
+{
946
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ROLW : OPC_ROL;
947
+ tcg_out_opc_reg(s, insn, a0, a1, a2);
948
+}
949
+
950
+static void tgen_rotli(TCGContext *s, TCGType type,
951
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
952
+{
953
+ tgen_rotri(s, type, a0, a1, -a2);
954
+}
955
+
956
+static const TCGOutOpBinary outop_rotl = {
957
+ .base.static_constraint = C_Dynamic,
958
+ .base.dynamic_constraint = cset_rot,
959
+ .out_rrr = tgen_rotl,
960
+ .out_rri = tgen_rotli,
961
+};
962
+
963
static void tgen_sar(TCGContext *s, TCGType type,
964
TCGReg a0, TCGReg a1, TCGReg a2)
965
{
966
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
967
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
968
break;
969
970
- case INDEX_op_rotl_i32:
971
- if (c2) {
972
- tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
973
- } else {
974
- tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
975
- }
976
- break;
977
- case INDEX_op_rotl_i64:
978
- if (c2) {
979
- tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
980
- } else {
981
- tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
982
- }
983
- break;
984
-
985
- case INDEX_op_rotr_i32:
986
- if (c2) {
987
- tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
988
- } else {
989
- tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
990
- }
991
- break;
992
- case INDEX_op_rotr_i64:
993
- if (c2) {
994
- tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
995
- } else {
996
- tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
997
- }
998
- break;
999
-
1000
case INDEX_op_bswap64_i64:
1001
tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1002
break;
1003
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1004
case INDEX_op_negsetcond_i64:
1005
return C_O1_I2(r, r, rI);
1006
1007
- case INDEX_op_rotl_i32:
1008
- case INDEX_op_rotr_i32:
1009
- case INDEX_op_rotl_i64:
1010
- case INDEX_op_rotr_i64:
1011
- return C_O1_I2(r, r, ri);
1012
-
1013
case INDEX_op_clz_i32:
1014
case INDEX_op_clz_i64:
1015
case INDEX_op_ctz_i32:
1016
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1017
index XXXXXXX..XXXXXXX 100644
1018
--- a/tcg/s390x/tcg-target.c.inc
1019
+++ b/tcg/s390x/tcg-target.c.inc
1020
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
1021
.base.static_constraint = C_NotImplemented,
1022
};
1023
1024
+static void tgen_rotl_int(TCGContext *s, TCGType type, TCGReg dst,
1025
+ TCGReg src, TCGReg v, tcg_target_long i)
1026
+{
1027
+ S390Opcode insn = type == TCG_TYPE_I32 ? RSY_RLL : RSY_RLLG;
1028
+ tcg_out_sh64(s, insn, dst, src, v, i);
1029
+}
1030
+
1031
+static void tgen_rotl(TCGContext *s, TCGType type,
1032
+ TCGReg a0, TCGReg a1, TCGReg a2)
1033
+{
1034
+ tgen_rotl_int(s, type, a0, a1, a2, 0);
1035
+}
1036
+
1037
+static void tgen_rotli(TCGContext *s, TCGType type,
1038
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
1039
+{
1040
+ tgen_rotl_int(s, type, a0, a1, TCG_REG_NONE, a2);
1041
+}
1042
+
1043
+static const TCGOutOpBinary outop_rotl = {
1044
+ .base.static_constraint = C_O1_I2(r, r, ri),
1045
+ .out_rrr = tgen_rotl,
1046
+ .out_rri = tgen_rotli,
1047
+};
1048
+
1049
+static const TCGOutOpBinary outop_rotr = {
1050
+ .base.static_constraint = C_NotImplemented,
1051
+};
1052
+
1053
static void tgen_sar_int(TCGContext *s, TCGType type, TCGReg dst,
1054
TCGReg src, TCGReg v, tcg_target_long i)
1055
{
1056
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1057
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1058
break;
1059
1060
- case INDEX_op_rotl_i32:
1061
- /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1062
- if (const_args[2]) {
1063
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1064
- } else {
1065
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1066
- }
1067
- break;
1068
- case INDEX_op_rotr_i32:
1069
- if (const_args[2]) {
1070
- tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1071
- TCG_REG_NONE, (32 - args[2]) & 31);
1072
- } else {
1073
- tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1074
- tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1075
- }
1076
- break;
1077
-
1078
case INDEX_op_bswap16_i32:
1079
a0 = args[0], a1 = args[1], a2 = args[2];
1080
tcg_out_insn(s, RRE, LRVR, a0, a1);
1081
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1082
tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
1083
break;
1084
1085
- case INDEX_op_rotl_i64:
1086
- if (const_args[2]) {
1087
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1088
- TCG_REG_NONE, args[2]);
1089
- } else {
1090
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
1091
- }
1092
- break;
1093
- case INDEX_op_rotr_i64:
1094
- if (const_args[2]) {
1095
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1096
- TCG_REG_NONE, (64 - args[2]) & 63);
1097
- } else {
1098
- /* We can use the smaller 32-bit negate because only the
1099
- low 6 bits are examined for the rotate. */
1100
- tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1101
- tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
1102
- }
1103
- break;
1104
-
1105
case INDEX_op_add2_i64:
1106
if (const_args[4]) {
1107
if ((int64_t)args[4] >= 0) {
1108
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1109
case INDEX_op_st_i64:
1110
return C_O0_I2(r, r);
1111
1112
- case INDEX_op_rotl_i32:
1113
- case INDEX_op_rotl_i64:
1114
- case INDEX_op_rotr_i32:
1115
- case INDEX_op_rotr_i64:
1116
- return C_O1_I2(r, r, ri);
1117
case INDEX_op_setcond_i32:
1118
case INDEX_op_negsetcond_i32:
1119
case INDEX_op_setcond_i64:
1120
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1121
index XXXXXXX..XXXXXXX 100644
1122
--- a/tcg/sparc64/tcg-target.c.inc
1123
+++ b/tcg/sparc64/tcg-target.c.inc
1124
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
1125
.base.static_constraint = C_NotImplemented,
1126
};
1127
1128
+static const TCGOutOpBinary outop_rotl = {
1129
+ .base.static_constraint = C_NotImplemented,
1130
+};
1131
+
1132
+static const TCGOutOpBinary outop_rotr = {
1133
+ .base.static_constraint = C_NotImplemented,
1134
+};
1135
+
1136
static void tgen_sar(TCGContext *s, TCGType type,
1137
TCGReg a0, TCGReg a1, TCGReg a2)
1138
{
1139
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
1140
index XXXXXXX..XXXXXXX 100644
1141
--- a/tcg/tci/tcg-target-opc.h.inc
1142
+++ b/tcg/tci/tcg-target-opc.h.inc
1143
@@ -XXX,XX +XXX,XX @@ DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1144
DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1145
DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1146
DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1147
+DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1148
+DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1149
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1150
index XXXXXXX..XXXXXXX 100644
1151
--- a/tcg/tci/tcg-target.c.inc
1152
+++ b/tcg/tci/tcg-target.c.inc
1153
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1154
case INDEX_op_st_i64:
1155
return C_O0_I2(r, r);
1156
1157
- case INDEX_op_rotl_i32:
1158
- case INDEX_op_rotl_i64:
1159
- case INDEX_op_rotr_i32:
1160
- case INDEX_op_rotr_i64:
1161
case INDEX_op_setcond_i32:
1162
case INDEX_op_setcond_i64:
1163
case INDEX_op_deposit_i32:
1164
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_remu = {
1165
.out_rrr = tgen_remu,
1166
};
1167
1168
+static void tgen_rotl(TCGContext *s, TCGType type,
1169
+ TCGReg a0, TCGReg a1, TCGReg a2)
1170
+{
1171
+ TCGOpcode opc = (type == TCG_TYPE_I32
1172
+ ? INDEX_op_tci_rotl32
1173
+ : INDEX_op_rotl_i64);
1174
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
1175
+}
1176
+
1177
+static const TCGOutOpBinary outop_rotl = {
1178
+ .base.static_constraint = C_O1_I2(r, r, r),
1179
+ .out_rrr = tgen_rotl,
1180
+};
1181
+
1182
+static void tgen_rotr(TCGContext *s, TCGType type,
1183
+ TCGReg a0, TCGReg a1, TCGReg a2)
1184
+{
1185
+ TCGOpcode opc = (type == TCG_TYPE_I32
1186
+ ? INDEX_op_tci_rotr32
1187
+ : INDEX_op_rotr_i64);
1188
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
1189
+}
1190
+
1191
+static const TCGOutOpBinary outop_rotr = {
1192
+ .base.static_constraint = C_O1_I2(r, r, r),
1193
+ .out_rrr = tgen_rotr,
1194
+};
1195
+
1196
static void tgen_sar(TCGContext *s, TCGType type,
1197
TCGReg a0, TCGReg a1, TCGReg a2)
1198
{
1199
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1200
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
1201
break;
1202
1203
- CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
1204
- CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
1205
CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
1206
CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
1207
tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
1208
--
1209
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 6 ++---
5
tcg/optimize.c | 20 ++++++++---------
6
tcg/tcg-op.c | 48 ++++++++++++++++++++--------------------
7
tcg/tcg.c | 12 ++++------
8
tcg/tci.c | 8 +++----
9
docs/devel/tcg-ops.rst | 8 +++----
10
tcg/tci/tcg-target.c.inc | 4 ++--
11
7 files changed, 50 insertions(+), 56 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(or, 1, 2, 0, TCG_OPF_INT)
18
DEF(orc, 1, 2, 0, TCG_OPF_INT)
19
DEF(rems, 1, 2, 0, TCG_OPF_INT)
20
DEF(remu, 1, 2, 0, TCG_OPF_INT)
21
+DEF(rotl, 1, 2, 0, TCG_OPF_INT)
22
+DEF(rotr, 1, 2, 0, TCG_OPF_INT)
23
DEF(sar, 1, 2, 0, TCG_OPF_INT)
24
DEF(shl, 1, 2, 0, TCG_OPF_INT)
25
DEF(shr, 1, 2, 0, TCG_OPF_INT)
26
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
27
DEF(st16_i32, 0, 2, 1, 0)
28
DEF(st_i32, 0, 2, 1, 0)
29
/* shifts/rotates */
30
-DEF(rotl_i32, 1, 2, 0, 0)
31
-DEF(rotr_i32, 1, 2, 0, 0)
32
DEF(deposit_i32, 1, 2, 2, 0)
33
DEF(extract_i32, 1, 1, 2, 0)
34
DEF(sextract_i32, 1, 1, 2, 0)
35
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
36
DEF(st32_i64, 0, 2, 1, 0)
37
DEF(st_i64, 0, 2, 1, 0)
38
/* shifts/rotates */
39
-DEF(rotl_i64, 1, 2, 0, 0)
40
-DEF(rotr_i64, 1, 2, 0, 0)
41
DEF(deposit_i64, 1, 2, 2, 0)
42
DEF(extract_i64, 1, 1, 2, 0)
43
DEF(sextract_i64, 1, 1, 2, 0)
44
diff --git a/tcg/optimize.c b/tcg/optimize.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/optimize.c
47
+++ b/tcg/optimize.c
48
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
49
}
50
return (int64_t)x >> (y & 63);
51
52
- case INDEX_op_rotr_i32:
53
- return ror32(x, y & 31);
54
-
55
- case INDEX_op_rotr_i64:
56
+ case INDEX_op_rotr:
57
+ if (type == TCG_TYPE_I32) {
58
+ return ror32(x, y & 31);
59
+ }
60
return ror64(x, y & 63);
61
62
- case INDEX_op_rotl_i32:
63
- return rol32(x, y & 31);
64
-
65
- case INDEX_op_rotl_i64:
66
+ case INDEX_op_rotl:
67
+ if (type == TCG_TYPE_I32) {
68
+ return rol32(x, y & 31);
69
+ }
70
return rol64(x, y & 63);
71
72
case INDEX_op_not:
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
case INDEX_op_remu:
75
done = fold_remainder(&ctx, op);
76
break;
77
- CASE_OP_32_64(rotl):
78
- CASE_OP_32_64(rotr):
79
+ case INDEX_op_rotl:
80
+ case INDEX_op_rotr:
81
case INDEX_op_sar:
82
case INDEX_op_shl:
83
case INDEX_op_shr:
84
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/tcg-op.c
87
+++ b/tcg/tcg-op.c
88
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
89
90
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
91
{
92
- if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
93
- tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2);
94
- } else if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
95
+ if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
96
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, arg2);
97
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
98
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
99
tcg_gen_neg_i32(t0, arg2);
100
- tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, t0);
101
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0);
102
tcg_temp_free_i32(t0);
103
} else {
104
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
105
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
106
/* some cases can be optimized here */
107
if (arg2 == 0) {
108
tcg_gen_mov_i32(ret, arg1);
109
- } else if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
110
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
111
TCGv_i32 t0 = tcg_constant_i32(arg2);
112
- tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, t0);
113
- } else if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
114
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0);
115
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
116
TCGv_i32 t0 = tcg_constant_i32(32 - arg2);
117
- tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, t0);
118
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, t0);
119
} else {
120
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
121
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
122
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
123
124
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
125
{
126
- if (tcg_op_supported(INDEX_op_rotr_i32, TCG_TYPE_I32, 0)) {
127
- tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2);
128
- } else if (tcg_op_supported(INDEX_op_rotl_i32, TCG_TYPE_I32, 0)) {
129
+ if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I32, 0)) {
130
+ tcg_gen_op3_i32(INDEX_op_rotr, ret, arg1, arg2);
131
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I32, 0)) {
132
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
133
tcg_gen_neg_i32(t0, arg2);
134
- tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, t0);
135
+ tcg_gen_op3_i32(INDEX_op_rotl, ret, arg1, t0);
136
tcg_temp_free_i32(t0);
137
} else {
138
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
139
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
140
141
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
142
{
143
- if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
144
- tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2);
145
- } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
146
+ if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
147
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, arg2);
148
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
149
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
150
tcg_gen_neg_i64(t0, arg2);
151
- tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, t0);
152
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0);
153
tcg_temp_free_i64(t0);
154
} else {
155
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
156
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
157
/* some cases can be optimized here */
158
if (arg2 == 0) {
159
tcg_gen_mov_i64(ret, arg1);
160
- } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
161
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
162
TCGv_i64 t0 = tcg_constant_i64(arg2);
163
- tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, t0);
164
- } else if (tcg_op_supported(INDEX_op_rotr_i64, TCG_TYPE_I64, 0)) {
165
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0);
166
+ } else if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) {
167
TCGv_i64 t0 = tcg_constant_i64(64 - arg2);
168
- tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, t0);
169
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, t0);
170
} else {
171
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
172
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
173
@@ -XXX,XX +XXX,XX @@ void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
174
175
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
176
{
177
- if (tcg_op_supported(INDEX_op_rotr_i64, TCG_TYPE_I64, 0)) {
178
- tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2);
179
- } else if (tcg_op_supported(INDEX_op_rotl_i64, TCG_TYPE_I64, 0)) {
180
+ if (tcg_op_supported(INDEX_op_rotr, TCG_TYPE_I64, 0)) {
181
+ tcg_gen_op3_i64(INDEX_op_rotr, ret, arg1, arg2);
182
+ } else if (tcg_op_supported(INDEX_op_rotl, TCG_TYPE_I64, 0)) {
183
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
184
tcg_gen_neg_i64(t0, arg2);
185
- tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, t0);
186
+ tcg_gen_op3_i64(INDEX_op_rotl, ret, arg1, t0);
187
tcg_temp_free_i64(t0);
188
} else {
189
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
190
diff --git a/tcg/tcg.c b/tcg/tcg.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/tcg.c
193
+++ b/tcg/tcg.c
194
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
195
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
196
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
197
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
198
- OUTOP(INDEX_op_rotl_i32, TCGOutOpBinary, outop_rotl),
199
- OUTOP(INDEX_op_rotl_i64, TCGOutOpBinary, outop_rotl),
200
- OUTOP(INDEX_op_rotr_i32, TCGOutOpBinary, outop_rotr),
201
- OUTOP(INDEX_op_rotr_i64, TCGOutOpBinary, outop_rotr),
202
+ OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
203
+ OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
204
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
205
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
206
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
207
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
208
case INDEX_op_orc:
209
case INDEX_op_rems:
210
case INDEX_op_remu:
211
- case INDEX_op_rotl_i32:
212
- case INDEX_op_rotl_i64:
213
- case INDEX_op_rotr_i32:
214
- case INDEX_op_rotr_i64:
215
+ case INDEX_op_rotl:
216
+ case INDEX_op_rotr:
217
case INDEX_op_sar:
218
case INDEX_op_shl:
219
case INDEX_op_shr:
220
diff --git a/tcg/tci.c b/tcg/tci.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/tcg/tci.c
223
+++ b/tcg/tci.c
224
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
225
226
/* Shift/rotate operations (64 bit). */
227
228
- case INDEX_op_rotl_i64:
229
+ case INDEX_op_rotl:
230
tci_args_rrr(insn, &r0, &r1, &r2);
231
regs[r0] = rol64(regs[r1], regs[r2] & 63);
232
break;
233
- case INDEX_op_rotr_i64:
234
+ case INDEX_op_rotr:
235
tci_args_rrr(insn, &r0, &r1, &r2);
236
regs[r0] = ror64(regs[r1], regs[r2] & 63);
237
break;
238
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
239
case INDEX_op_orc:
240
case INDEX_op_rems:
241
case INDEX_op_remu:
242
+ case INDEX_op_rotl:
243
+ case INDEX_op_rotr:
244
case INDEX_op_sar:
245
case INDEX_op_shl:
246
case INDEX_op_shr:
247
case INDEX_op_sub:
248
case INDEX_op_xor:
249
- case INDEX_op_rotl_i64:
250
- case INDEX_op_rotr_i64:
251
case INDEX_op_clz_i32:
252
case INDEX_op_clz_i64:
253
case INDEX_op_ctz_i32:
254
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
255
index XXXXXXX..XXXXXXX 100644
256
--- a/docs/devel/tcg-ops.rst
257
+++ b/docs/devel/tcg-ops.rst
258
@@ -XXX,XX +XXX,XX @@ Shifts/Rotates
259
- | *t0* = *t1* >> *t2* (signed)
260
| Unspecified behavior for negative or out-of-range shifts.
261
262
- * - rotl_i32/i64 *t0*, *t1*, *t2*
263
+ * - rotl *t0*, *t1*, *t2*
264
265
- | Rotation of *t2* bits to the left
266
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
267
+ | Unspecified behavior for negative or out-of-range shifts.
268
269
- * - rotr_i32/i64 *t0*, *t1*, *t2*
270
+ * - rotr *t0*, *t1*, *t2*
271
272
- | Rotation of *t2* bits to the right.
273
- | Unspecified behavior if *t2* < 0 or *t2* >= 32 (resp 64)
274
+ | Unspecified behavior for negative or out-of-range shifts.
275
276
277
Misc
278
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
279
index XXXXXXX..XXXXXXX 100644
280
--- a/tcg/tci/tcg-target.c.inc
281
+++ b/tcg/tci/tcg-target.c.inc
282
@@ -XXX,XX +XXX,XX @@ static void tgen_rotl(TCGContext *s, TCGType type,
283
{
284
TCGOpcode opc = (type == TCG_TYPE_I32
285
? INDEX_op_tci_rotl32
286
- : INDEX_op_rotl_i64);
287
+ : INDEX_op_rotl);
288
tcg_out_op_rrr(s, opc, a0, a1, a2);
289
}
290
291
@@ -XXX,XX +XXX,XX @@ static void tgen_rotr(TCGContext *s, TCGType type,
292
{
293
TCGOpcode opc = (type == TCG_TYPE_I32
294
? INDEX_op_tci_rotr32
295
- : INDEX_op_rotr_i64);
296
+ : INDEX_op_rotr);
297
tcg_out_op_rrr(s, opc, a0, a1, a2);
298
}
299
300
--
301
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 -
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 -
7
tcg/loongarch64/tcg-target-has.h | 2 -
8
tcg/mips/tcg-target-has.h | 2 -
9
tcg/ppc/tcg-target-has.h | 2 -
10
tcg/riscv/tcg-target-has.h | 2 -
11
tcg/s390x/tcg-target-has.h | 2 -
12
tcg/sparc64/tcg-target-has.h | 2 -
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 -
15
tcg/tcg-op.c | 108 ++++++++++++++++---------------
16
tcg/tcg.c | 8 +--
17
tcg/tci.c | 8 +--
18
tcg/aarch64/tcg-target.c.inc | 83 +++++++++++++-----------
19
tcg/arm/tcg-target.c.inc | 47 +++++++++-----
20
tcg/i386/tcg-target.c.inc | 72 +++++++++++----------
21
tcg/loongarch64/tcg-target.c.inc | 36 ++++++++---
22
tcg/mips/tcg-target.c.inc | 86 +++++++++++++-----------
23
tcg/ppc/tcg-target.c.inc | 30 ++++++---
24
tcg/riscv/tcg-target.c.inc | 34 +++++++---
25
tcg/s390x/tcg-target.c.inc | 75 +++++++++++++--------
26
tcg/sparc64/tcg-target.c.inc | 4 ++
27
tcg/tci/tcg-target-opc.h.inc | 1 +
28
tcg/tci/tcg-target.c.inc | 17 ++++-
29
25 files changed, 365 insertions(+), 264 deletions(-)
30
1
31
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target-has.h
34
+++ b/tcg/aarch64/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@
36
/* optional instructions */
37
#define TCG_TARGET_HAS_bswap16_i32 1
38
#define TCG_TARGET_HAS_bswap32_i32 1
39
-#define TCG_TARGET_HAS_clz_i32 1
40
#define TCG_TARGET_HAS_ctz_i32 1
41
#define TCG_TARGET_HAS_ctpop_i32 0
42
#define TCG_TARGET_HAS_extract2_i32 1
43
@@ -XXX,XX +XXX,XX @@
44
#define TCG_TARGET_HAS_bswap16_i64 1
45
#define TCG_TARGET_HAS_bswap32_i64 1
46
#define TCG_TARGET_HAS_bswap64_i64 1
47
-#define TCG_TARGET_HAS_clz_i64 1
48
#define TCG_TARGET_HAS_ctz_i64 1
49
#define TCG_TARGET_HAS_ctpop_i64 0
50
#define TCG_TARGET_HAS_extract2_i64 1
51
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/arm/tcg-target-has.h
54
+++ b/tcg/arm/tcg-target-has.h
55
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
56
/* optional instructions */
57
#define TCG_TARGET_HAS_bswap16_i32 1
58
#define TCG_TARGET_HAS_bswap32_i32 1
59
-#define TCG_TARGET_HAS_clz_i32 1
60
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
61
#define TCG_TARGET_HAS_ctpop_i32 0
62
#define TCG_TARGET_HAS_extract2_i32 1
63
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target-has.h
66
+++ b/tcg/i386/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
/* optional instructions */
69
#define TCG_TARGET_HAS_bswap16_i32 1
70
#define TCG_TARGET_HAS_bswap32_i32 1
71
-#define TCG_TARGET_HAS_clz_i32 1
72
#define TCG_TARGET_HAS_ctz_i32 1
73
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
74
#define TCG_TARGET_HAS_extract2_i32 1
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_bswap16_i64 1
77
#define TCG_TARGET_HAS_bswap32_i64 1
78
#define TCG_TARGET_HAS_bswap64_i64 1
79
-#define TCG_TARGET_HAS_clz_i64 1
80
#define TCG_TARGET_HAS_ctz_i64 1
81
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
82
#define TCG_TARGET_HAS_extract2_i64 1
83
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target-has.h
86
+++ b/tcg/loongarch64/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@
88
#define TCG_TARGET_HAS_muls2_i32 0
89
#define TCG_TARGET_HAS_bswap16_i32 1
90
#define TCG_TARGET_HAS_bswap32_i32 1
91
-#define TCG_TARGET_HAS_clz_i32 1
92
#define TCG_TARGET_HAS_ctz_i32 1
93
#define TCG_TARGET_HAS_ctpop_i32 0
94
#define TCG_TARGET_HAS_qemu_st8_i32 0
95
@@ -XXX,XX +XXX,XX @@
96
#define TCG_TARGET_HAS_bswap16_i64 1
97
#define TCG_TARGET_HAS_bswap32_i64 1
98
#define TCG_TARGET_HAS_bswap64_i64 1
99
-#define TCG_TARGET_HAS_clz_i64 1
100
#define TCG_TARGET_HAS_ctz_i64 1
101
#define TCG_TARGET_HAS_ctpop_i64 0
102
#define TCG_TARGET_HAS_add2_i64 0
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
109
/* optional instructions detected at runtime */
110
#define TCG_TARGET_HAS_extract2_i32 0
111
-#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
112
#define TCG_TARGET_HAS_ctz_i32 0
113
#define TCG_TARGET_HAS_ctpop_i32 0
114
#define TCG_TARGET_HAS_qemu_st8_i32 0
115
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
116
#define TCG_TARGET_HAS_bswap32_i64 1
117
#define TCG_TARGET_HAS_bswap64_i64 1
118
#define TCG_TARGET_HAS_extract2_i64 0
119
-#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
120
#define TCG_TARGET_HAS_ctz_i64 0
121
#define TCG_TARGET_HAS_ctpop_i64 0
122
#endif
123
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target-has.h
126
+++ b/tcg/ppc/tcg-target-has.h
127
@@ -XXX,XX +XXX,XX @@
128
/* optional instructions */
129
#define TCG_TARGET_HAS_bswap16_i32 1
130
#define TCG_TARGET_HAS_bswap32_i32 1
131
-#define TCG_TARGET_HAS_clz_i32 1
132
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
133
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
134
#define TCG_TARGET_HAS_extract2_i32 0
135
@@ -XXX,XX +XXX,XX @@
136
#define TCG_TARGET_HAS_bswap16_i64 1
137
#define TCG_TARGET_HAS_bswap32_i64 1
138
#define TCG_TARGET_HAS_bswap64_i64 1
139
-#define TCG_TARGET_HAS_clz_i64 1
140
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
141
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
142
#define TCG_TARGET_HAS_extract2_i64 0
143
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/riscv/tcg-target-has.h
146
+++ b/tcg/riscv/tcg-target-has.h
147
@@ -XXX,XX +XXX,XX @@
148
#define TCG_TARGET_HAS_muls2_i32 0
149
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
150
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
151
-#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
152
#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_qemu_st8_i32 0
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
158
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
159
-#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
160
#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
161
#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
162
#define TCG_TARGET_HAS_add2_i64 1
163
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target-has.h
166
+++ b/tcg/s390x/tcg-target-has.h
167
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
168
/* optional instructions */
169
#define TCG_TARGET_HAS_bswap16_i32 1
170
#define TCG_TARGET_HAS_bswap32_i32 1
171
-#define TCG_TARGET_HAS_clz_i32 0
172
#define TCG_TARGET_HAS_ctz_i32 0
173
#define TCG_TARGET_HAS_ctpop_i32 1
174
#define TCG_TARGET_HAS_extract2_i32 0
175
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
176
#define TCG_TARGET_HAS_bswap16_i64 1
177
#define TCG_TARGET_HAS_bswap32_i64 1
178
#define TCG_TARGET_HAS_bswap64_i64 1
179
-#define TCG_TARGET_HAS_clz_i64 1
180
#define TCG_TARGET_HAS_ctz_i64 0
181
#define TCG_TARGET_HAS_ctpop_i64 1
182
#define TCG_TARGET_HAS_extract2_i64 0
183
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/sparc64/tcg-target-has.h
186
+++ b/tcg/sparc64/tcg-target-has.h
187
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
188
/* optional instructions */
189
#define TCG_TARGET_HAS_bswap16_i32 0
190
#define TCG_TARGET_HAS_bswap32_i32 0
191
-#define TCG_TARGET_HAS_clz_i32 0
192
#define TCG_TARGET_HAS_ctz_i32 0
193
#define TCG_TARGET_HAS_ctpop_i32 0
194
#define TCG_TARGET_HAS_extract2_i32 0
195
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
196
#define TCG_TARGET_HAS_bswap16_i64 0
197
#define TCG_TARGET_HAS_bswap32_i64 0
198
#define TCG_TARGET_HAS_bswap64_i64 0
199
-#define TCG_TARGET_HAS_clz_i64 0
200
#define TCG_TARGET_HAS_ctz_i64 0
201
#define TCG_TARGET_HAS_ctpop_i64 0
202
#define TCG_TARGET_HAS_extract2_i64 0
203
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/tcg-has.h
206
+++ b/tcg/tcg-has.h
207
@@ -XXX,XX +XXX,XX @@
208
#define TCG_TARGET_HAS_bswap16_i64 0
209
#define TCG_TARGET_HAS_bswap32_i64 0
210
#define TCG_TARGET_HAS_bswap64_i64 0
211
-#define TCG_TARGET_HAS_clz_i64 0
212
#define TCG_TARGET_HAS_ctz_i64 0
213
#define TCG_TARGET_HAS_ctpop_i64 0
214
#define TCG_TARGET_HAS_extract2_i64 0
215
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/tci/tcg-target-has.h
218
+++ b/tcg/tci/tcg-target-has.h
219
@@ -XXX,XX +XXX,XX @@
220
#define TCG_TARGET_HAS_bswap16_i32 1
221
#define TCG_TARGET_HAS_bswap32_i32 1
222
#define TCG_TARGET_HAS_extract2_i32 0
223
-#define TCG_TARGET_HAS_clz_i32 1
224
#define TCG_TARGET_HAS_ctz_i32 1
225
#define TCG_TARGET_HAS_ctpop_i32 1
226
#define TCG_TARGET_HAS_negsetcond_i32 0
227
@@ -XXX,XX +XXX,XX @@
228
#define TCG_TARGET_HAS_bswap32_i64 1
229
#define TCG_TARGET_HAS_bswap64_i64 1
230
#define TCG_TARGET_HAS_extract2_i64 0
231
-#define TCG_TARGET_HAS_clz_i64 1
232
#define TCG_TARGET_HAS_ctz_i64 1
233
#define TCG_TARGET_HAS_ctpop_i64 1
234
#define TCG_TARGET_HAS_negsetcond_i64 0
235
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/tcg-op.c
238
+++ b/tcg/tcg-op.c
239
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
240
241
void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
242
{
243
- if (TCG_TARGET_HAS_clz_i32) {
244
+ if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0)) {
245
tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
246
- } else if (TCG_TARGET_HAS_clz_i64) {
247
+ } else if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
248
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
249
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
250
tcg_gen_extu_i32_i64(t1, arg1);
251
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
252
253
void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
254
{
255
+ TCGv_i32 z, t;
256
+
257
if (TCG_TARGET_HAS_ctz_i32) {
258
tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
259
- } else if (TCG_TARGET_HAS_ctz_i64) {
260
+ return;
261
+ }
262
+ if (TCG_TARGET_HAS_ctz_i64) {
263
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
264
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
265
tcg_gen_extu_i32_i64(t1, arg1);
266
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
267
tcg_gen_extrl_i64_i32(ret, t1);
268
tcg_temp_free_i64(t1);
269
tcg_temp_free_i64(t2);
270
- } else if (TCG_TARGET_HAS_ctpop_i32
271
- || TCG_TARGET_HAS_ctpop_i64
272
- || TCG_TARGET_HAS_clz_i32
273
- || TCG_TARGET_HAS_clz_i64) {
274
- TCGv_i32 z, t = tcg_temp_ebb_new_i32();
275
-
276
- if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
277
- tcg_gen_subi_i32(t, arg1, 1);
278
- tcg_gen_andc_i32(t, t, arg1);
279
- tcg_gen_ctpop_i32(t, t);
280
- } else {
281
- /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
282
- tcg_gen_neg_i32(t, arg1);
283
- tcg_gen_and_i32(t, t, arg1);
284
- tcg_gen_clzi_i32(t, t, 32);
285
- tcg_gen_xori_i32(t, t, 31);
286
- }
287
- z = tcg_constant_i32(0);
288
- tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
289
- tcg_temp_free_i32(t);
290
+ return;
291
+ }
292
+ if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
293
+ t = tcg_temp_ebb_new_i32();
294
+ tcg_gen_subi_i32(t, arg1, 1);
295
+ tcg_gen_andc_i32(t, t, arg1);
296
+ tcg_gen_ctpop_i32(t, t);
297
+ } else if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0) ||
298
+ tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
299
+ t = tcg_temp_ebb_new_i32();
300
+ tcg_gen_neg_i32(t, arg1);
301
+ tcg_gen_and_i32(t, t, arg1);
302
+ tcg_gen_clzi_i32(t, t, 32);
303
+ tcg_gen_xori_i32(t, t, 31);
304
} else {
305
gen_helper_ctz_i32(ret, arg1, arg2);
306
+ return;
307
}
308
+
309
+ z = tcg_constant_i32(0);
310
+ tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
311
+ tcg_temp_free_i32(t);
312
}
313
314
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
315
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
316
317
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
318
{
319
- if (TCG_TARGET_HAS_clz_i32) {
320
+ if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0) ||
321
+ tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
322
TCGv_i32 t = tcg_temp_ebb_new_i32();
323
tcg_gen_sari_i32(t, arg, 31);
324
tcg_gen_xor_i32(t, t, arg);
325
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
326
327
void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
328
{
329
- if (TCG_TARGET_HAS_clz_i64) {
330
+ if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
331
tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2);
332
} else {
333
gen_helper_clz_i64(ret, arg1, arg2);
334
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
335
void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
336
{
337
if (TCG_TARGET_REG_BITS == 32
338
- && TCG_TARGET_HAS_clz_i32
339
- && arg2 <= 0xffffffffu) {
340
+ && arg2 <= 0xffffffffu
341
+ && tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0)) {
342
TCGv_i32 t = tcg_temp_ebb_new_i32();
343
tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32);
344
tcg_gen_addi_i32(t, t, 32);
345
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
346
347
void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
348
{
349
+ TCGv_i64 z, t;
350
+
351
if (TCG_TARGET_HAS_ctz_i64) {
352
tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
353
- } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
354
- TCGv_i64 z, t = tcg_temp_ebb_new_i64();
355
-
356
- if (TCG_TARGET_HAS_ctpop_i64) {
357
- tcg_gen_subi_i64(t, arg1, 1);
358
- tcg_gen_andc_i64(t, t, arg1);
359
- tcg_gen_ctpop_i64(t, t);
360
- } else {
361
- /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
362
- tcg_gen_neg_i64(t, arg1);
363
- tcg_gen_and_i64(t, t, arg1);
364
- tcg_gen_clzi_i64(t, t, 64);
365
- tcg_gen_xori_i64(t, t, 63);
366
- }
367
- z = tcg_constant_i64(0);
368
- tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
369
- tcg_temp_free_i64(t);
370
- tcg_temp_free_i64(z);
371
+ return;
372
+ }
373
+ if (TCG_TARGET_HAS_ctpop_i64) {
374
+ t = tcg_temp_ebb_new_i64();
375
+ tcg_gen_subi_i64(t, arg1, 1);
376
+ tcg_gen_andc_i64(t, t, arg1);
377
+ tcg_gen_ctpop_i64(t, t);
378
+ } else if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
379
+ t = tcg_temp_ebb_new_i64();
380
+ tcg_gen_neg_i64(t, arg1);
381
+ tcg_gen_and_i64(t, t, arg1);
382
+ tcg_gen_clzi_i64(t, t, 64);
383
+ tcg_gen_xori_i64(t, t, 63);
384
} else {
385
gen_helper_ctz_i64(ret, arg1, arg2);
386
+ return;
387
}
388
+
389
+ z = tcg_constant_i64(0);
390
+ tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
391
+ tcg_temp_free_i64(t);
392
}
393
394
void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
395
{
396
if (TCG_TARGET_REG_BITS == 32
397
- && TCG_TARGET_HAS_ctz_i32
398
- && arg2 <= 0xffffffffu) {
399
+ && arg2 <= 0xffffffffu
400
+ && tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)) {
401
TCGv_i32 t32 = tcg_temp_ebb_new_i32();
402
tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32);
403
tcg_gen_addi_i32(t32, t32, 32);
404
tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
405
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
406
tcg_temp_free_i32(t32);
407
- } else if (!TCG_TARGET_HAS_ctz_i64
408
- && TCG_TARGET_HAS_ctpop_i64
409
- && arg2 == 64) {
410
+ } else if (arg2 == 64
411
+ && !tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)
412
+ && TCG_TARGET_HAS_ctpop_i64) {
413
/* This equivalence has the advantage of not requiring a fixup. */
414
TCGv_i64 t = tcg_temp_ebb_new_i64();
415
tcg_gen_subi_i64(t, arg1, 1);
416
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
417
418
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
419
{
420
- if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
421
+ if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
422
TCGv_i64 t = tcg_temp_ebb_new_i64();
423
tcg_gen_sari_i64(t, arg, 63);
424
tcg_gen_xor_i64(t, t, arg);
425
diff --git a/tcg/tcg.c b/tcg/tcg.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/tcg/tcg.c
428
+++ b/tcg/tcg.c
429
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
430
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
431
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
432
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
433
+ OUTOP(INDEX_op_clz_i32, TCGOutOpBinary, outop_clz),
434
+ OUTOP(INDEX_op_clz_i64, TCGOutOpBinary, outop_clz),
435
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
436
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
437
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
438
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
439
return TCG_TARGET_HAS_bswap16_i32;
440
case INDEX_op_bswap32_i32:
441
return TCG_TARGET_HAS_bswap32_i32;
442
- case INDEX_op_clz_i32:
443
- return TCG_TARGET_HAS_clz_i32;
444
case INDEX_op_ctz_i32:
445
return TCG_TARGET_HAS_ctz_i32;
446
case INDEX_op_ctpop_i32:
447
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
448
return TCG_TARGET_HAS_bswap32_i64;
449
case INDEX_op_bswap64_i64:
450
return TCG_TARGET_HAS_bswap64_i64;
451
- case INDEX_op_clz_i64:
452
- return TCG_TARGET_HAS_clz_i64;
453
case INDEX_op_ctz_i64:
454
return TCG_TARGET_HAS_ctz_i64;
455
case INDEX_op_ctpop_i64:
456
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
457
case INDEX_op_add:
458
case INDEX_op_and:
459
case INDEX_op_andc:
460
+ case INDEX_op_clz_i32:
461
+ case INDEX_op_clz_i64:
462
case INDEX_op_divs:
463
case INDEX_op_divu:
464
case INDEX_op_eqv:
465
diff --git a/tcg/tci.c b/tcg/tci.c
466
index XXXXXXX..XXXXXXX 100644
467
--- a/tcg/tci.c
468
+++ b/tcg/tci.c
469
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
470
tci_args_rrr(insn, &r0, &r1, &r2);
471
regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
472
break;
473
-#if TCG_TARGET_HAS_clz_i32
474
- case INDEX_op_clz_i32:
475
+ case INDEX_op_tci_clz32:
476
tci_args_rrr(insn, &r0, &r1, &r2);
477
tmp32 = regs[r1];
478
regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
479
break;
480
-#endif
481
#if TCG_TARGET_HAS_ctz_i32
482
case INDEX_op_ctz_i32:
483
tci_args_rrr(insn, &r0, &r1, &r2);
484
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
485
tci_args_rrr(insn, &r0, &r1, &r2);
486
regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
487
break;
488
-#if TCG_TARGET_HAS_clz_i64
489
case INDEX_op_clz_i64:
490
tci_args_rrr(insn, &r0, &r1, &r2);
491
regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
492
break;
493
-#endif
494
#if TCG_TARGET_HAS_ctz_i64
495
case INDEX_op_ctz_i64:
496
tci_args_rrr(insn, &r0, &r1, &r2);
497
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
498
case INDEX_op_shr:
499
case INDEX_op_sub:
500
case INDEX_op_xor:
501
- case INDEX_op_clz_i32:
502
case INDEX_op_clz_i64:
503
case INDEX_op_ctz_i32:
504
case INDEX_op_ctz_i64:
505
+ case INDEX_op_tci_clz32:
506
case INDEX_op_tci_divs32:
507
case INDEX_op_tci_divu32:
508
case INDEX_op_tci_rems32:
509
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
510
index XXXXXXX..XXXXXXX 100644
511
--- a/tcg/aarch64/tcg-target.c.inc
512
+++ b/tcg/aarch64/tcg-target.c.inc
513
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
514
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
515
}
516
517
-static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
518
- TCGReg a0, TCGArg b, bool const_b, bool is_ctz)
519
-{
520
- TCGReg a1 = a0;
521
- if (is_ctz) {
522
- a1 = TCG_REG_TMP0;
523
- tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
524
- }
525
- if (const_b && b == (ext ? 64 : 32)) {
526
- tcg_out_insn(s, 3507, CLZ, ext, d, a1);
527
- } else {
528
- AArch64Insn sel = I3506_CSEL;
529
-
530
- tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1);
531
- tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
532
-
533
- if (const_b) {
534
- if (b == -1) {
535
- b = TCG_REG_XZR;
536
- sel = I3506_CSINV;
537
- } else if (b == 0) {
538
- b = TCG_REG_XZR;
539
- } else {
540
- tcg_out_movi(s, ext, d, b);
541
- b = d;
542
- }
543
- }
544
- tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP0, b, TCG_COND_NE);
545
- }
546
-}
547
-
548
typedef struct {
549
TCGReg base;
550
TCGReg index;
551
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
552
.out_rrr = tgen_andc,
553
};
554
555
+static void tgen_clz(TCGContext *s, TCGType type,
556
+ TCGReg a0, TCGReg a1, TCGReg a2)
557
+{
558
+ tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
559
+ tcg_out_insn(s, 3507, CLZ, type, TCG_REG_TMP0, a1);
560
+ tcg_out_insn(s, 3506, CSEL, type, a0, TCG_REG_TMP0, a2, TCG_COND_NE);
561
+}
562
+
563
+static void tgen_clzi(TCGContext *s, TCGType type,
564
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
565
+{
566
+ if (a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
567
+ tcg_out_insn(s, 3507, CLZ, type, a0, a1);
568
+ return;
569
+ }
570
+
571
+ tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
572
+ tcg_out_insn(s, 3507, CLZ, type, a0, a1);
573
+
574
+ switch (a2) {
575
+ case -1:
576
+ tcg_out_insn(s, 3506, CSINV, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
577
+ break;
578
+ case 0:
579
+ tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
580
+ break;
581
+ default:
582
+ tcg_out_movi(s, type, TCG_REG_TMP0, a2);
583
+ tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_TMP0, TCG_COND_NE);
584
+ break;
585
+ }
586
+}
587
+
588
+static const TCGOutOpBinary outop_clz = {
589
+ .base.static_constraint = C_O1_I2(r, r, rAL),
590
+ .out_rrr = tgen_clz,
591
+ .out_rri = tgen_clzi,
592
+};
593
+
594
static void tgen_divs(TCGContext *s, TCGType type,
595
TCGReg a0, TCGReg a1, TCGReg a2)
596
{
597
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
598
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
599
break;
600
601
- case INDEX_op_clz_i64:
602
- case INDEX_op_clz_i32:
603
- tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
604
- break;
605
case INDEX_op_ctz_i64:
606
case INDEX_op_ctz_i32:
607
- tcg_out_cltz(s, ext, a0, a1, a2, c2, true);
608
+ tcg_out_insn(s, 3507, RBIT, ext, TCG_REG_TMP0, a1);
609
+ if (c2) {
610
+ tgen_clzi(s, ext, a0, TCG_REG_TMP0, a2);
611
+ } else {
612
+ tgen_clz(s, ext, a0, TCG_REG_TMP0, a2);
613
+ }
614
break;
615
616
case INDEX_op_brcond_i32:
617
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
618
case INDEX_op_negsetcond_i64:
619
return C_O1_I2(r, r, rC);
620
621
- case INDEX_op_clz_i32:
622
case INDEX_op_ctz_i32:
623
- case INDEX_op_clz_i64:
624
case INDEX_op_ctz_i64:
625
return C_O1_I2(r, r, rAL);
626
627
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
628
index XXXXXXX..XXXXXXX 100644
629
--- a/tcg/arm/tcg-target.c.inc
630
+++ b/tcg/arm/tcg-target.c.inc
631
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
632
.out_rrr = tgen_andc,
633
};
634
635
+static void tgen_clz(TCGContext *s, TCGType type,
636
+ TCGReg a0, TCGReg a1, TCGReg a2)
637
+{
638
+ tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
639
+ tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
640
+ tcg_out_mov_reg(s, COND_EQ, a0, a2);
641
+}
642
+
643
+static void tgen_clzi(TCGContext *s, TCGType type,
644
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
645
+{
646
+ if (a2 == 32) {
647
+ tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
648
+ } else {
649
+ tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
650
+ tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
651
+ tcg_out_movi32(s, COND_EQ, a0, a2);
652
+ }
653
+}
654
+
655
+static const TCGOutOpBinary outop_clz = {
656
+ .base.static_constraint = C_O1_I2(r, r, rIK),
657
+ .out_rrr = tgen_clz,
658
+ .out_rri = tgen_clzi,
659
+};
660
+
661
static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
662
{
663
return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
664
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
665
666
case INDEX_op_ctz_i32:
667
tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
668
- a1 = TCG_REG_TMP;
669
- goto do_clz;
670
-
671
- case INDEX_op_clz_i32:
672
- a1 = args[1];
673
- do_clz:
674
- a0 = args[0];
675
- a2 = args[2];
676
- c = const_args[2];
677
- if (c && a2 == 32) {
678
- tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
679
- break;
680
- }
681
- tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
682
- tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
683
- if (c || a0 != a2) {
684
- tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
685
+ if (const_args[2]) {
686
+ tgen_clzi(s, TCG_TYPE_I32, args[0], TCG_REG_TMP, args[2]);
687
+ } else {
688
+ tgen_clz(s, TCG_TYPE_I32, args[0], TCG_REG_TMP, args[2]);
689
}
690
break;
691
692
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
693
index XXXXXXX..XXXXXXX 100644
694
--- a/tcg/i386/tcg-target.c.inc
695
+++ b/tcg/i386/tcg-target.c.inc
696
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
697
}
698
}
699
700
-static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
701
- TCGArg arg2, bool const_a2)
702
-{
703
- if (have_lzcnt) {
704
- tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
705
- if (const_a2) {
706
- tcg_debug_assert(arg2 == (rexw ? 64 : 32));
707
- } else {
708
- tcg_debug_assert(dest != arg2);
709
- tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
710
- }
711
- } else {
712
- tcg_debug_assert(!const_a2);
713
- tcg_debug_assert(dest != arg1);
714
- tcg_debug_assert(dest != arg2);
715
-
716
- /* Recall that the output of BSR is the index not the count. */
717
- tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
718
- tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
719
-
720
- /* Since we have destroyed the flags from BSR, we have to re-test. */
721
- int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw);
722
- tcg_out_cmov(s, jcc, rexw, dest, arg2);
723
- }
724
-}
725
-
726
static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
727
{
728
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
729
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
730
.out_rrr = tgen_andc,
731
};
732
733
+static void tgen_clz(TCGContext *s, TCGType type,
734
+ TCGReg a0, TCGReg a1, TCGReg a2)
735
+{
736
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
737
+ int jcc;
738
+
739
+ if (have_lzcnt) {
740
+ tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1);
741
+ jcc = JCC_JB;
742
+ } else {
743
+ /* Recall that the output of BSR is the index not the count. */
744
+ tcg_out_modrm(s, OPC_BSR + rexw, a0, a1);
745
+ tgen_arithi(s, ARITH_XOR + rexw, a0, rexw ? 63 : 31, 0);
746
+
747
+ /* Since we have destroyed the flags from BSR, we have to re-test. */
748
+ jcc = tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, rexw);
749
+ }
750
+ tcg_out_cmov(s, jcc, rexw, a0, a2);
751
+}
752
+
753
+static void tgen_clzi(TCGContext *s, TCGType type,
754
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
755
+{
756
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
757
+ tcg_out_modrm(s, OPC_LZCNT + rexw, a0, a1);
758
+}
759
+
760
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
761
+{
762
+ return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
763
+}
764
+
765
+static const TCGOutOpBinary outop_clz = {
766
+ .base.static_constraint = C_Dynamic,
767
+ .base.dynamic_constraint = cset_clz,
768
+ .out_rrr = tgen_clz,
769
+ .out_rri = tgen_clzi,
770
+};
771
+
772
static const TCGOutOpBinary outop_divs = {
773
.base.static_constraint = C_NotImplemented,
774
};
775
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
776
OP_32_64(ctz):
777
tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
778
break;
779
- OP_32_64(clz):
780
- tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
781
- break;
782
OP_32_64(ctpop):
783
tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
784
break;
785
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
786
case INDEX_op_ctz_i64:
787
return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
788
789
- case INDEX_op_clz_i32:
790
- case INDEX_op_clz_i64:
791
- return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
792
-
793
case INDEX_op_qemu_ld_i32:
794
return C_O1_I1(r, L);
795
796
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
797
index XXXXXXX..XXXXXXX 100644
798
--- a/tcg/loongarch64/tcg-target.c.inc
799
+++ b/tcg/loongarch64/tcg-target.c.inc
800
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
801
.out_rrr = tgen_andc,
802
};
803
804
+static void tgen_clzi(TCGContext *s, TCGType type,
805
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
806
+{
807
+ /* a2 is constrained to exactly the type width. */
808
+ if (type == TCG_TYPE_I32) {
809
+ tcg_out_opc_clz_w(s, a0, a1);
810
+ } else {
811
+ tcg_out_opc_clz_d(s, a0, a1);
812
+ }
813
+}
814
+
815
+static void tgen_clz(TCGContext *s, TCGType type,
816
+ TCGReg a0, TCGReg a1, TCGReg a2)
817
+{
818
+ tgen_clzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
819
+ /* a0 = a1 ? REG_TMP0 : a2 */
820
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
821
+ tcg_out_opc_masknez(s, a0, a2, a1);
822
+ tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
823
+}
824
+
825
+static const TCGOutOpBinary outop_clz = {
826
+ .base.static_constraint = C_O1_I2(r, r, rW),
827
+ .out_rrr = tgen_clz,
828
+ .out_rri = tgen_clzi,
829
+};
830
+
831
static void tgen_divs(TCGContext *s, TCGType type,
832
TCGReg a0, TCGReg a1, TCGReg a2)
833
{
834
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
835
tcg_out_opc_revb_d(s, a0, a1);
836
break;
837
838
- case INDEX_op_clz_i32:
839
- tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
840
- break;
841
- case INDEX_op_clz_i64:
842
- tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
843
- break;
844
-
845
case INDEX_op_ctz_i32:
846
tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
847
break;
848
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
849
case INDEX_op_qemu_ld_i64:
850
return C_O1_I1(r, r);
851
852
- case INDEX_op_clz_i32:
853
- case INDEX_op_clz_i64:
854
case INDEX_op_ctz_i32:
855
case INDEX_op_ctz_i64:
856
return C_O1_I2(r, r, rW);
857
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
858
index XXXXXXX..XXXXXXX 100644
859
--- a/tcg/mips/tcg-target.c.inc
860
+++ b/tcg/mips/tcg-target.c.inc
861
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
862
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
863
}
864
865
-static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
866
- int width, TCGReg a0, TCGReg a1, TCGArg a2)
867
-{
868
- if (use_mips32r6_instructions) {
869
- if (a2 == width) {
870
- tcg_out_opc_reg(s, opcv6, a0, a1, 0);
871
- } else {
872
- tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
873
- tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
874
- }
875
- } else {
876
- if (a2 == width) {
877
- tcg_out_opc_reg(s, opcv2, a0, a1, a1);
878
- } else if (a0 == a2) {
879
- tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
880
- tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
881
- } else if (a0 != a1) {
882
- tcg_out_opc_reg(s, opcv2, a0, a1, a1);
883
- tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
884
- } else {
885
- tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
886
- tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
887
- tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0);
888
- }
889
- }
890
-}
891
-
892
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
893
{
894
TCGReg base = TCG_REG_ZERO;
895
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
896
.base.static_constraint = C_NotImplemented,
897
};
898
899
+static void tgen_clz(TCGContext *s, TCGType type,
900
+ TCGReg a0, TCGReg a1, TCGReg a2)
901
+{
902
+ if (use_mips32r6_instructions) {
903
+ MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
904
+ tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
905
+ tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
906
+ } else {
907
+ MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
908
+ if (a0 == a2) {
909
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
910
+ tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
911
+ } else if (a0 != a1) {
912
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
913
+ tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
914
+ } else {
915
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
916
+ tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
917
+ tcg_out_mov(s, type, a0, TCG_TMP0);
918
+ }
919
+ }
920
+}
921
+
922
+static void tgen_clzi(TCGContext *s, TCGType type,
923
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
924
+{
925
+ if (a2 == 0) {
926
+ tgen_clz(s, type, a0, a1, TCG_REG_ZERO);
927
+ } else if (use_mips32r6_instructions) {
928
+ MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
929
+ tcg_out_opc_reg(s, opcv6, a0, a1, 0);
930
+ } else {
931
+ MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
932
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
933
+ }
934
+}
935
+
936
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
937
+{
938
+ return use_mips32r2_instructions ? C_O1_I2(r, r, rzW) : C_NotImplemented;
939
+}
940
+
941
+static const TCGOutOpBinary outop_clz = {
942
+ .base.static_constraint = C_Dynamic,
943
+ .base.dynamic_constraint = cset_clz,
944
+ .out_rrr = tgen_clz,
945
+ .out_rri = tgen_clzi,
946
+};
947
+
948
static void tgen_divs(TCGContext *s, TCGType type,
949
TCGReg a0, TCGReg a1, TCGReg a2)
950
{
951
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
952
tcg_out_dsra(s, a0, a1, 32);
953
break;
954
955
- case INDEX_op_clz_i32:
956
- tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
957
- break;
958
- case INDEX_op_clz_i64:
959
- tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2);
960
- break;
961
-
962
case INDEX_op_deposit_i32:
963
tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
964
break;
965
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
966
case INDEX_op_muls2_i64:
967
case INDEX_op_mulu2_i64:
968
return C_O2_I2(r, r, r, r);
969
- case INDEX_op_clz_i32:
970
- case INDEX_op_clz_i64:
971
- return C_O1_I2(r, r, rzW);
972
973
case INDEX_op_deposit_i32:
974
case INDEX_op_deposit_i64:
975
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
976
index XXXXXXX..XXXXXXX 100644
977
--- a/tcg/ppc/tcg-target.c.inc
978
+++ b/tcg/ppc/tcg-target.c.inc
979
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
980
.out_rrr = tgen_andc,
981
};
982
983
+static void tgen_clz(TCGContext *s, TCGType type,
984
+ TCGReg a0, TCGReg a1, TCGReg a2)
985
+{
986
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD;
987
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, false);
988
+}
989
+
990
+static void tgen_clzi(TCGContext *s, TCGType type,
991
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
992
+{
993
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTLZW : CNTLZD;
994
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, true);
995
+}
996
+
997
+static const TCGOutOpBinary outop_clz = {
998
+ .base.static_constraint = C_O1_I2(r, r, rZW),
999
+ .out_rrr = tgen_clz,
1000
+ .out_rri = tgen_clzi,
1001
+};
1002
+
1003
static void tgen_eqv(TCGContext *s, TCGType type,
1004
TCGReg a0, TCGReg a1, TCGReg a2)
1005
{
1006
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1007
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
1008
break;
1009
1010
- case INDEX_op_clz_i32:
1011
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
1012
- args[2], const_args[2]);
1013
- break;
1014
case INDEX_op_ctz_i32:
1015
tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
1016
args[2], const_args[2]);
1017
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1018
tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
1019
break;
1020
1021
- case INDEX_op_clz_i64:
1022
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
1023
- args[2], const_args[2]);
1024
- break;
1025
case INDEX_op_ctz_i64:
1026
tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
1027
args[2], const_args[2]);
1028
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1029
case INDEX_op_st_i64:
1030
return C_O0_I2(r, r);
1031
1032
- case INDEX_op_clz_i32:
1033
case INDEX_op_ctz_i32:
1034
- case INDEX_op_clz_i64:
1035
case INDEX_op_ctz_i64:
1036
return C_O1_I2(r, r, rZW);
1037
1038
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
1039
index XXXXXXX..XXXXXXX 100644
1040
--- a/tcg/riscv/tcg-target.c.inc
1041
+++ b/tcg/riscv/tcg-target.c.inc
1042
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
1043
.out_rrr = tgen_andc,
1044
};
1045
1046
+static void tgen_clz(TCGContext *s, TCGType type,
1047
+ TCGReg a0, TCGReg a1, TCGReg a2)
1048
+{
1049
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
1050
+ tcg_out_cltz(s, type, insn, a0, a1, a2, false);
1051
+}
1052
+
1053
+static void tgen_clzi(TCGContext *s, TCGType type,
1054
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
1055
+{
1056
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
1057
+ tcg_out_cltz(s, type, insn, a0, a1, a2, true);
1058
+}
1059
+
1060
+static TCGConstraintSetIndex cset_clzctz(TCGType type, unsigned flags)
1061
+{
1062
+ return cpuinfo & CPUINFO_ZBB ? C_N1_I2(r, r, rM) : C_NotImplemented;
1063
+}
1064
+
1065
+static const TCGOutOpBinary outop_clz = {
1066
+ .base.static_constraint = C_Dynamic,
1067
+ .base.dynamic_constraint = cset_clzctz,
1068
+ .out_rrr = tgen_clz,
1069
+ .out_rri = tgen_clzi,
1070
+};
1071
+
1072
static void tgen_divs(TCGContext *s, TCGType type,
1073
TCGReg a0, TCGReg a1, TCGReg a2)
1074
{
1075
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1076
tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
1077
break;
1078
1079
- case INDEX_op_clz_i32:
1080
- tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
1081
- break;
1082
- case INDEX_op_clz_i64:
1083
- tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
1084
- break;
1085
case INDEX_op_ctz_i32:
1086
tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
1087
break;
1088
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1089
case INDEX_op_negsetcond_i64:
1090
return C_O1_I2(r, r, rI);
1091
1092
- case INDEX_op_clz_i32:
1093
- case INDEX_op_clz_i64:
1094
case INDEX_op_ctz_i32:
1095
case INDEX_op_ctz_i64:
1096
return C_N1_I2(r, r, rM);
1097
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1098
index XXXXXXX..XXXXXXX 100644
1099
--- a/tcg/s390x/tcg-target.c.inc
1100
+++ b/tcg/s390x/tcg-target.c.inc
1101
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1102
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
1103
}
1104
1105
-static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1106
- TCGArg a2, int a2const)
1107
-{
1108
- /* Since this sets both R and R+1, we have no choice but to store the
1109
- result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1110
- QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1111
- tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1112
-
1113
- if (a2const && a2 == 64) {
1114
- tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1115
- return;
1116
- }
1117
-
1118
- /*
1119
- * Conditions from FLOGR are:
1120
- * 2 -> one bit found
1121
- * 8 -> no one bit found
1122
- */
1123
- tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1124
-}
1125
-
1126
static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1127
{
1128
/* With MIE3, and bit 0 of m4 set, we get the complete result. */
1129
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
1130
.out_rrr = tgen_andc,
1131
};
1132
1133
+static void tgen_clz_int(TCGContext *s, TCGReg dest, TCGReg a1,
1134
+ TCGArg a2, int a2const)
1135
+{
1136
+ /*
1137
+ * Since this sets both R and R+1, we have no choice but to store the
1138
+ * result into R0, allowing R1 == TCG_TMP0 to be clobbered as well.
1139
+ */
1140
+ QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1141
+ tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1142
+
1143
+ if (a2const && a2 == 64) {
1144
+ tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1145
+ return;
1146
+ }
1147
+
1148
+ /*
1149
+ * Conditions from FLOGR are:
1150
+ * 2 -> one bit found
1151
+ * 8 -> no one bit found
1152
+ */
1153
+ tgen_movcond_int(s, TCG_TYPE_I64, dest, a2, a2const, TCG_REG_R0, 8, 2);
1154
+}
1155
+
1156
+static void tgen_clz(TCGContext *s, TCGType type,
1157
+ TCGReg a0, TCGReg a1, TCGReg a2)
1158
+{
1159
+ tgen_clz_int(s, a0, a1, a2, false);
1160
+}
1161
+
1162
+static void tgen_clzi(TCGContext *s, TCGType type,
1163
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
1164
+{
1165
+ tgen_clz_int(s, a0, a1, a2, true);
1166
+}
1167
+
1168
+static TCGConstraintSetIndex cset_clz(TCGType type, unsigned flags)
1169
+{
1170
+ return type == TCG_TYPE_I64 ? C_O1_I2(r, r, rI) : C_NotImplemented;
1171
+}
1172
+
1173
+static const TCGOutOpBinary outop_clz = {
1174
+ .base.static_constraint = C_Dynamic,
1175
+ .base.dynamic_constraint = cset_clz,
1176
+ .out_rrr = tgen_clz,
1177
+ .out_rri = tgen_clzi,
1178
+};
1179
+
1180
static const TCGOutOpBinary outop_divs = {
1181
.base.static_constraint = C_NotImplemented,
1182
};
1183
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1184
tgen_sextract(s, args[0], args[1], args[2], args[3]);
1185
break;
1186
1187
- case INDEX_op_clz_i64:
1188
- tgen_clz(s, args[0], args[1], args[2], const_args[2]);
1189
- break;
1190
-
1191
case INDEX_op_ctpop_i32:
1192
tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
1193
break;
1194
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1195
case INDEX_op_negsetcond_i64:
1196
return C_O1_I2(r, r, rC);
1197
1198
- case INDEX_op_clz_i64:
1199
- return C_O1_I2(r, r, rI);
1200
-
1201
case INDEX_op_brcond_i32:
1202
return C_O0_I2(r, ri);
1203
case INDEX_op_brcond_i64:
1204
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1205
index XXXXXXX..XXXXXXX 100644
1206
--- a/tcg/sparc64/tcg-target.c.inc
1207
+++ b/tcg/sparc64/tcg-target.c.inc
1208
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
1209
.out_rrr = tgen_andc,
1210
};
1211
1212
+static const TCGOutOpBinary outop_clz = {
1213
+ .base.static_constraint = C_NotImplemented,
1214
+};
1215
+
1216
static void tgen_divs_rJ(TCGContext *s, TCGType type,
1217
TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
1218
{
1219
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
1220
index XXXXXXX..XXXXXXX 100644
1221
--- a/tcg/tci/tcg-target-opc.h.inc
1222
+++ b/tcg/tci/tcg-target-opc.h.inc
1223
@@ -XXX,XX +XXX,XX @@
1224
/* These opcodes for use between the tci generator and interpreter. */
1225
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
1226
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
1227
+DEF(tci_clz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1228
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1229
DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1230
DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
1231
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1232
index XXXXXXX..XXXXXXX 100644
1233
--- a/tcg/tci/tcg-target.c.inc
1234
+++ b/tcg/tci/tcg-target.c.inc
1235
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1236
case INDEX_op_setcond_i64:
1237
case INDEX_op_deposit_i32:
1238
case INDEX_op_deposit_i64:
1239
- case INDEX_op_clz_i32:
1240
- case INDEX_op_clz_i64:
1241
case INDEX_op_ctz_i32:
1242
case INDEX_op_ctz_i64:
1243
return C_O1_I2(r, r, r);
1244
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_andc = {
1245
.out_rrr = tgen_andc,
1246
};
1247
1248
+static void tgen_clz(TCGContext *s, TCGType type,
1249
+ TCGReg a0, TCGReg a1, TCGReg a2)
1250
+{
1251
+ TCGOpcode opc = (type == TCG_TYPE_I32
1252
+ ? INDEX_op_tci_clz32
1253
+ : INDEX_op_clz_i64);
1254
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
1255
+}
1256
+
1257
+static const TCGOutOpBinary outop_clz = {
1258
+ .base.static_constraint = C_O1_I2(r, r, r),
1259
+ .out_rrr = tgen_clz,
1260
+};
1261
+
1262
static void tgen_divs(TCGContext *s, TCGType type,
1263
TCGReg a0, TCGReg a1, TCGReg a2)
1264
{
1265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1266
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
1267
break;
1268
1269
- CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
1270
CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
1271
tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
1272
break;
1273
--
1274
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 10 +++++-----
6
tcg/tcg-op.c | 22 ++++++++++------------
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 4 ++--
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 22 insertions(+), 27 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
18
DEF(add, 1, 2, 0, TCG_OPF_INT)
19
DEF(and, 1, 2, 0, TCG_OPF_INT)
20
DEF(andc, 1, 2, 0, TCG_OPF_INT)
21
+DEF(clz, 1, 2, 0, TCG_OPF_INT)
22
DEF(divs, 1, 2, 0, TCG_OPF_INT)
23
DEF(divs2, 2, 3, 0, TCG_OPF_INT)
24
DEF(divu, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
26
27
DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
-DEF(clz_i32, 1, 2, 0, 0)
30
DEF(ctz_i32, 1, 2, 0, 0)
31
DEF(ctpop_i32, 1, 1, 0, 0)
32
33
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
34
DEF(bswap16_i64, 1, 1, 1, 0)
35
DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
-DEF(clz_i64, 1, 2, 0, 0)
38
DEF(ctz_i64, 1, 2, 0, 0)
39
DEF(ctpop_i64, 1, 1, 0, 0)
40
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
case INDEX_op_nor_vec:
47
return ~(x | y);
48
49
- case INDEX_op_clz_i32:
50
- return (uint32_t)x ? clz32(x) : y;
51
-
52
- case INDEX_op_clz_i64:
53
+ case INDEX_op_clz:
54
+ if (type == TCG_TYPE_I32) {
55
+ return (uint32_t)x ? clz32(x) : y;
56
+ }
57
return x ? clz64(x) : y;
58
59
case INDEX_op_ctz_i32:
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
case INDEX_op_bswap64_i64:
62
done = fold_bswap(&ctx, op);
63
break;
64
- CASE_OP_32_64(clz):
65
+ case INDEX_op_clz:
66
CASE_OP_32_64(ctz):
67
done = fold_count_zeros(&ctx, op);
68
break;
69
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg-op.c
72
+++ b/tcg/tcg-op.c
73
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
74
75
void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
76
{
77
- if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0)) {
78
- tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
79
- } else if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
80
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) {
81
+ tcg_gen_op3_i32(INDEX_op_clz, ret, arg1, arg2);
82
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
83
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
84
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
85
tcg_gen_extu_i32_i64(t1, arg1);
86
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
87
tcg_gen_subi_i32(t, arg1, 1);
88
tcg_gen_andc_i32(t, t, arg1);
89
tcg_gen_ctpop_i32(t, t);
90
- } else if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0) ||
91
- tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
92
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) {
93
t = tcg_temp_ebb_new_i32();
94
tcg_gen_neg_i32(t, arg1);
95
tcg_gen_and_i32(t, t, arg1);
96
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
97
98
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
99
{
100
- if (tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0) ||
101
- tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
102
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_REG, 0)) {
103
TCGv_i32 t = tcg_temp_ebb_new_i32();
104
tcg_gen_sari_i32(t, arg, 31);
105
tcg_gen_xor_i32(t, t, arg);
106
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
107
108
void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
109
{
110
- if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
111
- tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2);
112
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
113
+ tcg_gen_op3_i64(INDEX_op_clz, ret, arg1, arg2);
114
} else {
115
gen_helper_clz_i64(ret, arg1, arg2);
116
}
117
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
118
{
119
if (TCG_TARGET_REG_BITS == 32
120
&& arg2 <= 0xffffffffu
121
- && tcg_op_supported(INDEX_op_clz_i32, TCG_TYPE_I32, 0)) {
122
+ && tcg_op_supported(INDEX_op_clz, TCG_TYPE_I32, 0)) {
123
TCGv_i32 t = tcg_temp_ebb_new_i32();
124
tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32);
125
tcg_gen_addi_i32(t, t, 32);
126
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
127
tcg_gen_subi_i64(t, arg1, 1);
128
tcg_gen_andc_i64(t, t, arg1);
129
tcg_gen_ctpop_i64(t, t);
130
- } else if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
131
+ } else if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
132
t = tcg_temp_ebb_new_i64();
133
tcg_gen_neg_i64(t, arg1);
134
tcg_gen_and_i64(t, t, arg1);
135
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
136
137
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
138
{
139
- if (tcg_op_supported(INDEX_op_clz_i64, TCG_TYPE_I64, 0)) {
140
+ if (tcg_op_supported(INDEX_op_clz, TCG_TYPE_I64, 0)) {
141
TCGv_i64 t = tcg_temp_ebb_new_i64();
142
tcg_gen_sari_i64(t, arg, 63);
143
tcg_gen_xor_i64(t, t, arg);
144
diff --git a/tcg/tcg.c b/tcg/tcg.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/tcg.c
147
+++ b/tcg/tcg.c
148
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
149
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
150
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
151
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
152
- OUTOP(INDEX_op_clz_i32, TCGOutOpBinary, outop_clz),
153
- OUTOP(INDEX_op_clz_i64, TCGOutOpBinary, outop_clz),
154
+ OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
155
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
156
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
157
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
158
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
159
case INDEX_op_add:
160
case INDEX_op_and:
161
case INDEX_op_andc:
162
- case INDEX_op_clz_i32:
163
- case INDEX_op_clz_i64:
164
+ case INDEX_op_clz:
165
case INDEX_op_divs:
166
case INDEX_op_divu:
167
case INDEX_op_eqv:
168
diff --git a/tcg/tci.c b/tcg/tci.c
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/tci.c
171
+++ b/tcg/tci.c
172
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
173
tci_args_rrr(insn, &r0, &r1, &r2);
174
regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
175
break;
176
- case INDEX_op_clz_i64:
177
+ case INDEX_op_clz:
178
tci_args_rrr(insn, &r0, &r1, &r2);
179
regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
180
break;
181
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
182
case INDEX_op_add:
183
case INDEX_op_and:
184
case INDEX_op_andc:
185
+ case INDEX_op_clz:
186
case INDEX_op_divs:
187
case INDEX_op_divu:
188
case INDEX_op_eqv:
189
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
190
case INDEX_op_shr:
191
case INDEX_op_sub:
192
case INDEX_op_xor:
193
- case INDEX_op_clz_i64:
194
case INDEX_op_ctz_i32:
195
case INDEX_op_ctz_i64:
196
case INDEX_op_tci_clz32:
197
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
198
index XXXXXXX..XXXXXXX 100644
199
--- a/docs/devel/tcg-ops.rst
200
+++ b/docs/devel/tcg-ops.rst
201
@@ -XXX,XX +XXX,XX @@ Logical
202
203
- | *t0* = *t1* | ~\ *t2*
204
205
- * - clz_i32/i64 *t0*, *t1*, *t2*
206
+ * - clz *t0*, *t1*, *t2*
207
208
- | *t0* = *t1* ? clz(*t1*) : *t2*
209
210
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/tci/tcg-target.c.inc
213
+++ b/tcg/tci/tcg-target.c.inc
214
@@ -XXX,XX +XXX,XX @@ static void tgen_clz(TCGContext *s, TCGType type,
215
{
216
TCGOpcode opc = (type == TCG_TYPE_I32
217
? INDEX_op_tci_clz32
218
- : INDEX_op_clz_i64);
219
+ : INDEX_op_clz);
220
tcg_out_op_rrr(s, opc, a0, a1, a2);
221
}
222
223
--
224
2.43.0
225
226
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/tcg-op.c | 9 ++---
16
tcg/tcg.c | 8 ++---
17
tcg/tci.c | 8 ++---
18
tcg/aarch64/tcg-target.c.inc | 34 ++++++++++--------
19
tcg/arm/tcg-target.c.inc | 39 ++++++++++++++-------
20
tcg/i386/tcg-target.c.inc | 60 +++++++++++++++++++-------------
21
tcg/loongarch64/tcg-target.c.inc | 60 ++++++++++++++------------------
22
tcg/mips/tcg-target.c.inc | 4 +++
23
tcg/ppc/tcg-target.c.inc | 39 ++++++++++++++-------
24
tcg/riscv/tcg-target.c.inc | 32 +++++++++++------
25
tcg/s390x/tcg-target.c.inc | 4 +++
26
tcg/sparc64/tcg-target.c.inc | 4 +++
27
tcg/tci/tcg-target-opc.h.inc | 1 +
28
tcg/tci/tcg-target.c.inc | 20 +++++++----
29
25 files changed, 193 insertions(+), 149 deletions(-)
30
1
31
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target-has.h
34
+++ b/tcg/aarch64/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@
36
/* optional instructions */
37
#define TCG_TARGET_HAS_bswap16_i32 1
38
#define TCG_TARGET_HAS_bswap32_i32 1
39
-#define TCG_TARGET_HAS_ctz_i32 1
40
#define TCG_TARGET_HAS_ctpop_i32 0
41
#define TCG_TARGET_HAS_extract2_i32 1
42
#define TCG_TARGET_HAS_negsetcond_i32 1
43
@@ -XXX,XX +XXX,XX @@
44
#define TCG_TARGET_HAS_bswap16_i64 1
45
#define TCG_TARGET_HAS_bswap32_i64 1
46
#define TCG_TARGET_HAS_bswap64_i64 1
47
-#define TCG_TARGET_HAS_ctz_i64 1
48
#define TCG_TARGET_HAS_ctpop_i64 0
49
#define TCG_TARGET_HAS_extract2_i64 1
50
#define TCG_TARGET_HAS_negsetcond_i64 1
51
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/arm/tcg-target-has.h
54
+++ b/tcg/arm/tcg-target-has.h
55
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
56
/* optional instructions */
57
#define TCG_TARGET_HAS_bswap16_i32 1
58
#define TCG_TARGET_HAS_bswap32_i32 1
59
-#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
60
#define TCG_TARGET_HAS_ctpop_i32 0
61
#define TCG_TARGET_HAS_extract2_i32 1
62
#define TCG_TARGET_HAS_negsetcond_i32 1
63
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target-has.h
66
+++ b/tcg/i386/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
/* optional instructions */
69
#define TCG_TARGET_HAS_bswap16_i32 1
70
#define TCG_TARGET_HAS_bswap32_i32 1
71
-#define TCG_TARGET_HAS_ctz_i32 1
72
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
73
#define TCG_TARGET_HAS_extract2_i32 1
74
#define TCG_TARGET_HAS_negsetcond_i32 1
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_bswap16_i64 1
77
#define TCG_TARGET_HAS_bswap32_i64 1
78
#define TCG_TARGET_HAS_bswap64_i64 1
79
-#define TCG_TARGET_HAS_ctz_i64 1
80
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
81
#define TCG_TARGET_HAS_extract2_i64 1
82
#define TCG_TARGET_HAS_negsetcond_i64 1
83
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target-has.h
86
+++ b/tcg/loongarch64/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@
88
#define TCG_TARGET_HAS_muls2_i32 0
89
#define TCG_TARGET_HAS_bswap16_i32 1
90
#define TCG_TARGET_HAS_bswap32_i32 1
91
-#define TCG_TARGET_HAS_ctz_i32 1
92
#define TCG_TARGET_HAS_ctpop_i32 0
93
#define TCG_TARGET_HAS_qemu_st8_i32 0
94
95
@@ -XXX,XX +XXX,XX @@
96
#define TCG_TARGET_HAS_bswap16_i64 1
97
#define TCG_TARGET_HAS_bswap32_i64 1
98
#define TCG_TARGET_HAS_bswap64_i64 1
99
-#define TCG_TARGET_HAS_ctz_i64 1
100
#define TCG_TARGET_HAS_ctpop_i64 0
101
#define TCG_TARGET_HAS_add2_i64 0
102
#define TCG_TARGET_HAS_sub2_i64 0
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
109
/* optional instructions detected at runtime */
110
#define TCG_TARGET_HAS_extract2_i32 0
111
-#define TCG_TARGET_HAS_ctz_i32 0
112
#define TCG_TARGET_HAS_ctpop_i32 0
113
#define TCG_TARGET_HAS_qemu_st8_i32 0
114
115
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
116
#define TCG_TARGET_HAS_bswap32_i64 1
117
#define TCG_TARGET_HAS_bswap64_i64 1
118
#define TCG_TARGET_HAS_extract2_i64 0
119
-#define TCG_TARGET_HAS_ctz_i64 0
120
#define TCG_TARGET_HAS_ctpop_i64 0
121
#endif
122
123
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target-has.h
126
+++ b/tcg/ppc/tcg-target-has.h
127
@@ -XXX,XX +XXX,XX @@
128
/* optional instructions */
129
#define TCG_TARGET_HAS_bswap16_i32 1
130
#define TCG_TARGET_HAS_bswap32_i32 1
131
-#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
132
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
133
#define TCG_TARGET_HAS_extract2_i32 0
134
#define TCG_TARGET_HAS_negsetcond_i32 1
135
@@ -XXX,XX +XXX,XX @@
136
#define TCG_TARGET_HAS_bswap16_i64 1
137
#define TCG_TARGET_HAS_bswap32_i64 1
138
#define TCG_TARGET_HAS_bswap64_i64 1
139
-#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
140
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
141
#define TCG_TARGET_HAS_extract2_i64 0
142
#define TCG_TARGET_HAS_negsetcond_i64 1
143
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/riscv/tcg-target-has.h
146
+++ b/tcg/riscv/tcg-target-has.h
147
@@ -XXX,XX +XXX,XX @@
148
#define TCG_TARGET_HAS_muls2_i32 0
149
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
150
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
151
-#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
152
#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_qemu_st8_i32 0
154
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
158
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
159
-#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
160
#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
161
#define TCG_TARGET_HAS_add2_i64 1
162
#define TCG_TARGET_HAS_sub2_i64 1
163
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target-has.h
166
+++ b/tcg/s390x/tcg-target-has.h
167
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
168
/* optional instructions */
169
#define TCG_TARGET_HAS_bswap16_i32 1
170
#define TCG_TARGET_HAS_bswap32_i32 1
171
-#define TCG_TARGET_HAS_ctz_i32 0
172
#define TCG_TARGET_HAS_ctpop_i32 1
173
#define TCG_TARGET_HAS_extract2_i32 0
174
#define TCG_TARGET_HAS_negsetcond_i32 1
175
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
176
#define TCG_TARGET_HAS_bswap16_i64 1
177
#define TCG_TARGET_HAS_bswap32_i64 1
178
#define TCG_TARGET_HAS_bswap64_i64 1
179
-#define TCG_TARGET_HAS_ctz_i64 0
180
#define TCG_TARGET_HAS_ctpop_i64 1
181
#define TCG_TARGET_HAS_extract2_i64 0
182
#define TCG_TARGET_HAS_negsetcond_i64 1
183
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/sparc64/tcg-target-has.h
186
+++ b/tcg/sparc64/tcg-target-has.h
187
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
188
/* optional instructions */
189
#define TCG_TARGET_HAS_bswap16_i32 0
190
#define TCG_TARGET_HAS_bswap32_i32 0
191
-#define TCG_TARGET_HAS_ctz_i32 0
192
#define TCG_TARGET_HAS_ctpop_i32 0
193
#define TCG_TARGET_HAS_extract2_i32 0
194
#define TCG_TARGET_HAS_negsetcond_i32 1
195
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
196
#define TCG_TARGET_HAS_bswap16_i64 0
197
#define TCG_TARGET_HAS_bswap32_i64 0
198
#define TCG_TARGET_HAS_bswap64_i64 0
199
-#define TCG_TARGET_HAS_ctz_i64 0
200
#define TCG_TARGET_HAS_ctpop_i64 0
201
#define TCG_TARGET_HAS_extract2_i64 0
202
#define TCG_TARGET_HAS_negsetcond_i64 1
203
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/tcg-has.h
206
+++ b/tcg/tcg-has.h
207
@@ -XXX,XX +XXX,XX @@
208
#define TCG_TARGET_HAS_bswap16_i64 0
209
#define TCG_TARGET_HAS_bswap32_i64 0
210
#define TCG_TARGET_HAS_bswap64_i64 0
211
-#define TCG_TARGET_HAS_ctz_i64 0
212
#define TCG_TARGET_HAS_ctpop_i64 0
213
#define TCG_TARGET_HAS_extract2_i64 0
214
#define TCG_TARGET_HAS_negsetcond_i64 0
215
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/tci/tcg-target-has.h
218
+++ b/tcg/tci/tcg-target-has.h
219
@@ -XXX,XX +XXX,XX @@
220
#define TCG_TARGET_HAS_bswap16_i32 1
221
#define TCG_TARGET_HAS_bswap32_i32 1
222
#define TCG_TARGET_HAS_extract2_i32 0
223
-#define TCG_TARGET_HAS_ctz_i32 1
224
#define TCG_TARGET_HAS_ctpop_i32 1
225
#define TCG_TARGET_HAS_negsetcond_i32 0
226
#define TCG_TARGET_HAS_muls2_i32 1
227
@@ -XXX,XX +XXX,XX @@
228
#define TCG_TARGET_HAS_bswap32_i64 1
229
#define TCG_TARGET_HAS_bswap64_i64 1
230
#define TCG_TARGET_HAS_extract2_i64 0
231
-#define TCG_TARGET_HAS_ctz_i64 1
232
#define TCG_TARGET_HAS_ctpop_i64 1
233
#define TCG_TARGET_HAS_negsetcond_i64 0
234
#define TCG_TARGET_HAS_muls2_i64 1
235
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/tcg-op.c
238
+++ b/tcg/tcg-op.c
239
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
240
{
241
TCGv_i32 z, t;
242
243
- if (TCG_TARGET_HAS_ctz_i32) {
244
+ if (tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)) {
245
tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
246
return;
247
}
248
- if (TCG_TARGET_HAS_ctz_i64) {
249
+ if (tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)) {
250
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
251
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
252
tcg_gen_extu_i32_i64(t1, arg1);
253
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
254
255
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
256
{
257
- if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
258
+ if (!tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)
259
+ && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
260
/* This equivalence has the advantage of not requiring a fixup. */
261
TCGv_i32 t = tcg_temp_ebb_new_i32();
262
tcg_gen_subi_i32(t, arg1, 1);
263
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
264
{
265
TCGv_i64 z, t;
266
267
- if (TCG_TARGET_HAS_ctz_i64) {
268
+ if (tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)) {
269
tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
270
return;
271
}
272
diff --git a/tcg/tcg.c b/tcg/tcg.c
273
index XXXXXXX..XXXXXXX 100644
274
--- a/tcg/tcg.c
275
+++ b/tcg/tcg.c
276
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
277
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
278
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
279
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
280
+ OUTOP(INDEX_op_ctz_i32, TCGOutOpBinary, outop_ctz),
281
+ OUTOP(INDEX_op_ctz_i64, TCGOutOpBinary, outop_ctz),
282
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
283
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
284
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
285
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
286
return TCG_TARGET_HAS_bswap16_i32;
287
case INDEX_op_bswap32_i32:
288
return TCG_TARGET_HAS_bswap32_i32;
289
- case INDEX_op_ctz_i32:
290
- return TCG_TARGET_HAS_ctz_i32;
291
case INDEX_op_ctpop_i32:
292
return TCG_TARGET_HAS_ctpop_i32;
293
294
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
295
return TCG_TARGET_HAS_bswap32_i64;
296
case INDEX_op_bswap64_i64:
297
return TCG_TARGET_HAS_bswap64_i64;
298
- case INDEX_op_ctz_i64:
299
- return TCG_TARGET_HAS_ctz_i64;
300
case INDEX_op_ctpop_i64:
301
return TCG_TARGET_HAS_ctpop_i64;
302
case INDEX_op_add2_i64:
303
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
304
case INDEX_op_and:
305
case INDEX_op_andc:
306
case INDEX_op_clz:
307
+ case INDEX_op_ctz_i32:
308
+ case INDEX_op_ctz_i64:
309
case INDEX_op_divs:
310
case INDEX_op_divu:
311
case INDEX_op_eqv:
312
diff --git a/tcg/tci.c b/tcg/tci.c
313
index XXXXXXX..XXXXXXX 100644
314
--- a/tcg/tci.c
315
+++ b/tcg/tci.c
316
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
317
tmp32 = regs[r1];
318
regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
319
break;
320
-#if TCG_TARGET_HAS_ctz_i32
321
- case INDEX_op_ctz_i32:
322
+ case INDEX_op_tci_ctz32:
323
tci_args_rrr(insn, &r0, &r1, &r2);
324
tmp32 = regs[r1];
325
regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
326
break;
327
-#endif
328
#if TCG_TARGET_HAS_ctpop_i32
329
case INDEX_op_ctpop_i32:
330
tci_args_rr(insn, &r0, &r1);
331
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
332
tci_args_rrr(insn, &r0, &r1, &r2);
333
regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
334
break;
335
-#if TCG_TARGET_HAS_ctz_i64
336
case INDEX_op_ctz_i64:
337
tci_args_rrr(insn, &r0, &r1, &r2);
338
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
339
break;
340
-#endif
341
#if TCG_TARGET_HAS_ctpop_i64
342
case INDEX_op_ctpop_i64:
343
tci_args_rr(insn, &r0, &r1);
344
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
345
case INDEX_op_shr:
346
case INDEX_op_sub:
347
case INDEX_op_xor:
348
- case INDEX_op_ctz_i32:
349
case INDEX_op_ctz_i64:
350
+ case INDEX_op_tci_ctz32:
351
case INDEX_op_tci_clz32:
352
case INDEX_op_tci_divs32:
353
case INDEX_op_tci_divu32:
354
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
355
index XXXXXXX..XXXXXXX 100644
356
--- a/tcg/aarch64/tcg-target.c.inc
357
+++ b/tcg/aarch64/tcg-target.c.inc
358
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
359
.out_rri = tgen_clzi,
360
};
361
362
+static void tgen_ctz(TCGContext *s, TCGType type,
363
+ TCGReg a0, TCGReg a1, TCGReg a2)
364
+{
365
+ tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
366
+ tgen_clz(s, type, a0, TCG_REG_TMP0, a2);
367
+}
368
+
369
+static void tgen_ctzi(TCGContext *s, TCGType type,
370
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
371
+{
372
+ tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
373
+ tgen_clzi(s, type, a0, TCG_REG_TMP0, a2);
374
+}
375
+
376
+static const TCGOutOpBinary outop_ctz = {
377
+ .base.static_constraint = C_O1_I2(r, r, rAL),
378
+ .out_rrr = tgen_ctz,
379
+ .out_rri = tgen_ctzi,
380
+};
381
+
382
static void tgen_divs(TCGContext *s, TCGType type,
383
TCGReg a0, TCGReg a1, TCGReg a2)
384
{
385
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
386
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
387
break;
388
389
- case INDEX_op_ctz_i64:
390
- case INDEX_op_ctz_i32:
391
- tcg_out_insn(s, 3507, RBIT, ext, TCG_REG_TMP0, a1);
392
- if (c2) {
393
- tgen_clzi(s, ext, a0, TCG_REG_TMP0, a2);
394
- } else {
395
- tgen_clz(s, ext, a0, TCG_REG_TMP0, a2);
396
- }
397
- break;
398
-
399
case INDEX_op_brcond_i32:
400
a1 = (int32_t)a1;
401
/* FALLTHRU */
402
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
403
case INDEX_op_negsetcond_i64:
404
return C_O1_I2(r, r, rC);
405
406
- case INDEX_op_ctz_i32:
407
- case INDEX_op_ctz_i64:
408
- return C_O1_I2(r, r, rAL);
409
-
410
case INDEX_op_brcond_i32:
411
case INDEX_op_brcond_i64:
412
return C_O0_I2(r, rC);
413
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
414
index XXXXXXX..XXXXXXX 100644
415
--- a/tcg/arm/tcg-target.c.inc
416
+++ b/tcg/arm/tcg-target.c.inc
417
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
418
.out_rri = tgen_clzi,
419
};
420
421
+static void tgen_ctz(TCGContext *s, TCGType type,
422
+ TCGReg a0, TCGReg a1, TCGReg a2)
423
+{
424
+ tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
425
+ tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
426
+}
427
+
428
+static void tgen_ctzi(TCGContext *s, TCGType type,
429
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
430
+{
431
+ tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
432
+ tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
433
+}
434
+
435
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
436
+{
437
+ return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented;
438
+}
439
+
440
+static const TCGOutOpBinary outop_ctz = {
441
+ .base.static_constraint = C_Dynamic,
442
+ .base.dynamic_constraint = cset_ctz,
443
+ .out_rrr = tgen_ctz,
444
+ .out_rri = tgen_ctzi,
445
+};
446
+
447
static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
448
{
449
return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
450
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
451
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
452
break;
453
454
- case INDEX_op_ctz_i32:
455
- tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
456
- if (const_args[2]) {
457
- tgen_clzi(s, TCG_TYPE_I32, args[0], TCG_REG_TMP, args[2]);
458
- } else {
459
- tgen_clz(s, TCG_TYPE_I32, args[0], TCG_REG_TMP, args[2]);
460
- }
461
- break;
462
-
463
case INDEX_op_brcond_i32:
464
c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
465
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
466
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
467
case INDEX_op_negsetcond_i32:
468
return C_O1_I2(r, r, rIN);
469
470
- case INDEX_op_clz_i32:
471
- case INDEX_op_ctz_i32:
472
- return C_O1_I2(r, r, rIK);
473
-
474
case INDEX_op_mulu2_i32:
475
case INDEX_op_muls2_i32:
476
return C_O2_I2(r, r, r, r);
477
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
478
index XXXXXXX..XXXXXXX 100644
479
--- a/tcg/i386/tcg-target.c.inc
480
+++ b/tcg/i386/tcg-target.c.inc
481
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
482
tcg_out_cmov(s, jcc, rexw, dest, v1);
483
}
484
485
-static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
486
- TCGArg arg2, bool const_a2)
487
-{
488
- if (have_bmi1) {
489
- tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
490
- if (const_a2) {
491
- tcg_debug_assert(arg2 == (rexw ? 64 : 32));
492
- } else {
493
- tcg_debug_assert(dest != arg2);
494
- tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
495
- }
496
- } else {
497
- tcg_debug_assert(dest != arg2);
498
- tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
499
- tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
500
- }
501
-}
502
-
503
static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
504
{
505
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
506
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
507
.out_rri = tgen_clzi,
508
};
509
510
+static void tgen_ctz(TCGContext *s, TCGType type,
511
+ TCGReg a0, TCGReg a1, TCGReg a2)
512
+{
513
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
514
+ int jcc;
515
+
516
+ if (have_bmi1) {
517
+ tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1);
518
+ jcc = JCC_JB;
519
+ } else {
520
+ tcg_out_modrm(s, OPC_BSF + rexw, a0, a1);
521
+ jcc = JCC_JE;
522
+ }
523
+ tcg_out_cmov(s, jcc, rexw, a0, a2);
524
+}
525
+
526
+static void tgen_ctzi(TCGContext *s, TCGType type,
527
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
528
+{
529
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
530
+ tcg_out_modrm(s, OPC_TZCNT + rexw, a0, a1);
531
+}
532
+
533
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
534
+{
535
+ return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
536
+}
537
+
538
+static const TCGOutOpBinary outop_ctz = {
539
+ .base.static_constraint = C_Dynamic,
540
+ .base.dynamic_constraint = cset_ctz,
541
+ .out_rrr = tgen_ctz,
542
+ .out_rri = tgen_ctzi,
543
+};
544
+
545
static const TCGOutOpBinary outop_divs = {
546
.base.static_constraint = C_NotImplemented,
547
};
548
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
549
}
550
break;
551
552
- OP_32_64(ctz):
553
- tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
554
- break;
555
OP_32_64(ctpop):
556
tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
557
break;
558
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
559
case INDEX_op_sub2_i64:
560
return C_N1_O1_I4(r, r, 0, 1, re, re);
561
562
- case INDEX_op_ctz_i32:
563
- case INDEX_op_ctz_i64:
564
- return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
565
-
566
case INDEX_op_qemu_ld_i32:
567
return C_O1_I1(r, L);
568
569
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
570
index XXXXXXX..XXXXXXX 100644
571
--- a/tcg/loongarch64/tcg-target.c.inc
572
+++ b/tcg/loongarch64/tcg-target.c.inc
573
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
574
tcg_out_ext32s(s, ret, arg);
575
}
576
577
-static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
578
- TCGReg a0, TCGReg a1, TCGReg a2,
579
- bool c2, bool is_32bit)
580
-{
581
- if (c2) {
582
- /*
583
- * Fast path: semantics already satisfied due to constraint and
584
- * insn behavior, single instruction is enough.
585
- */
586
- tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
587
- /* all clz/ctz insns belong to DJ-format */
588
- tcg_out32(s, encode_dj_insn(opc, a0, a1));
589
- return;
590
- }
591
-
592
- tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
593
- /* a0 = a1 ? REG_TMP0 : a2 */
594
- tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
595
- tcg_out_opc_masknez(s, a0, a2, a1);
596
- tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
597
-}
598
-
599
#define SETCOND_INV TCG_TARGET_NB_REGS
600
#define SETCOND_NEZ (SETCOND_INV << 1)
601
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
602
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
603
.out_rri = tgen_clzi,
604
};
605
606
+static void tgen_ctzi(TCGContext *s, TCGType type,
607
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
608
+{
609
+ /* a2 is constrained to exactly the type width. */
610
+ if (type == TCG_TYPE_I32) {
611
+ tcg_out_opc_ctz_w(s, a0, a1);
612
+ } else {
613
+ tcg_out_opc_ctz_d(s, a0, a1);
614
+ }
615
+}
616
+
617
+static void tgen_ctz(TCGContext *s, TCGType type,
618
+ TCGReg a0, TCGReg a1, TCGReg a2)
619
+{
620
+ tgen_ctzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
621
+ /* a0 = a1 ? REG_TMP0 : a2 */
622
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
623
+ tcg_out_opc_masknez(s, a0, a2, a1);
624
+ tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
625
+}
626
+
627
+static const TCGOutOpBinary outop_ctz = {
628
+ .base.static_constraint = C_O1_I2(r, r, rW),
629
+ .out_rrr = tgen_ctz,
630
+ .out_rri = tgen_ctzi,
631
+};
632
+
633
static void tgen_divs(TCGContext *s, TCGType type,
634
TCGReg a0, TCGReg a1, TCGReg a2)
635
{
636
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
637
tcg_out_opc_revb_d(s, a0, a1);
638
break;
639
640
- case INDEX_op_ctz_i32:
641
- tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
642
- break;
643
- case INDEX_op_ctz_i64:
644
- tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
645
- break;
646
-
647
case INDEX_op_setcond_i32:
648
case INDEX_op_setcond_i64:
649
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
650
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
651
case INDEX_op_qemu_ld_i64:
652
return C_O1_I1(r, r);
653
654
- case INDEX_op_ctz_i32:
655
- case INDEX_op_ctz_i64:
656
- return C_O1_I2(r, r, rW);
657
-
658
case INDEX_op_deposit_i32:
659
case INDEX_op_deposit_i64:
660
/* Must deposit into the same register as input */
661
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
662
index XXXXXXX..XXXXXXX 100644
663
--- a/tcg/mips/tcg-target.c.inc
664
+++ b/tcg/mips/tcg-target.c.inc
665
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
666
.out_rri = tgen_clzi,
667
};
668
669
+static const TCGOutOpBinary outop_ctz = {
670
+ .base.static_constraint = C_NotImplemented,
671
+};
672
+
673
static void tgen_divs(TCGContext *s, TCGType type,
674
TCGReg a0, TCGReg a1, TCGReg a2)
675
{
676
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
677
index XXXXXXX..XXXXXXX 100644
678
--- a/tcg/ppc/tcg-target.c.inc
679
+++ b/tcg/ppc/tcg-target.c.inc
680
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
681
.out_rri = tgen_clzi,
682
};
683
684
+static void tgen_ctz(TCGContext *s, TCGType type,
685
+ TCGReg a0, TCGReg a1, TCGReg a2)
686
+{
687
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD;
688
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, false);
689
+}
690
+
691
+static void tgen_ctzi(TCGContext *s, TCGType type,
692
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
693
+{
694
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTTZW : CNTTZD;
695
+ tcg_out_cntxz(s, type, insn, a0, a1, a2, true);
696
+}
697
+
698
+static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
699
+{
700
+ return have_isa_3_00 ? C_O1_I2(r, r, rZW) : C_NotImplemented;
701
+}
702
+
703
+static const TCGOutOpBinary outop_ctz = {
704
+ .base.static_constraint = C_Dynamic,
705
+ .base.dynamic_constraint = cset_ctz,
706
+ .out_rrr = tgen_ctz,
707
+ .out_rri = tgen_ctzi,
708
+};
709
+
710
static void tgen_eqv(TCGContext *s, TCGType type,
711
TCGReg a0, TCGReg a1, TCGReg a2)
712
{
713
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
714
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
715
break;
716
717
- case INDEX_op_ctz_i32:
718
- tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
719
- args[2], const_args[2]);
720
- break;
721
case INDEX_op_ctpop_i32:
722
tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
723
break;
724
-
725
- case INDEX_op_ctz_i64:
726
- tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
727
- args[2], const_args[2]);
728
- break;
729
case INDEX_op_ctpop_i64:
730
tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
731
break;
732
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
733
case INDEX_op_st_i64:
734
return C_O0_I2(r, r);
735
736
- case INDEX_op_ctz_i32:
737
- case INDEX_op_ctz_i64:
738
- return C_O1_I2(r, r, rZW);
739
-
740
case INDEX_op_brcond_i32:
741
case INDEX_op_brcond_i64:
742
return C_O0_I2(r, rC);
743
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
744
index XXXXXXX..XXXXXXX 100644
745
--- a/tcg/riscv/tcg-target.c.inc
746
+++ b/tcg/riscv/tcg-target.c.inc
747
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
748
.out_rri = tgen_clzi,
749
};
750
751
+static void tgen_ctz(TCGContext *s, TCGType type,
752
+ TCGReg a0, TCGReg a1, TCGReg a2)
753
+{
754
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
755
+ tcg_out_cltz(s, type, insn, a0, a1, a2, false);
756
+}
757
+
758
+static void tgen_ctzi(TCGContext *s, TCGType type,
759
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
760
+{
761
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
762
+ tcg_out_cltz(s, type, insn, a0, a1, a2, true);
763
+}
764
+
765
+static const TCGOutOpBinary outop_ctz = {
766
+ .base.static_constraint = C_Dynamic,
767
+ .base.dynamic_constraint = cset_clzctz,
768
+ .out_rrr = tgen_ctz,
769
+ .out_rri = tgen_ctzi,
770
+};
771
+
772
static void tgen_divs(TCGContext *s, TCGType type,
773
TCGReg a0, TCGReg a1, TCGReg a2)
774
{
775
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
776
tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
777
break;
778
779
- case INDEX_op_ctz_i32:
780
- tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
781
- break;
782
- case INDEX_op_ctz_i64:
783
- tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
784
- break;
785
-
786
case INDEX_op_add2_i32:
787
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
788
const_args[4], const_args[5], false, true);
789
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
790
case INDEX_op_negsetcond_i64:
791
return C_O1_I2(r, r, rI);
792
793
- case INDEX_op_ctz_i32:
794
- case INDEX_op_ctz_i64:
795
- return C_N1_I2(r, r, rM);
796
-
797
case INDEX_op_brcond_i32:
798
case INDEX_op_brcond_i64:
799
return C_O0_I2(rz, rz);
800
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
801
index XXXXXXX..XXXXXXX 100644
802
--- a/tcg/s390x/tcg-target.c.inc
803
+++ b/tcg/s390x/tcg-target.c.inc
804
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
805
.out_rri = tgen_clzi,
806
};
807
808
+static const TCGOutOpBinary outop_ctz = {
809
+ .base.static_constraint = C_NotImplemented,
810
+};
811
+
812
static const TCGOutOpBinary outop_divs = {
813
.base.static_constraint = C_NotImplemented,
814
};
815
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
816
index XXXXXXX..XXXXXXX 100644
817
--- a/tcg/sparc64/tcg-target.c.inc
818
+++ b/tcg/sparc64/tcg-target.c.inc
819
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
820
.base.static_constraint = C_NotImplemented,
821
};
822
823
+static const TCGOutOpBinary outop_ctz = {
824
+ .base.static_constraint = C_NotImplemented,
825
+};
826
+
827
static void tgen_divs_rJ(TCGContext *s, TCGType type,
828
TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
829
{
830
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
831
index XXXXXXX..XXXXXXX 100644
832
--- a/tcg/tci/tcg-target-opc.h.inc
833
+++ b/tcg/tci/tcg-target-opc.h.inc
834
@@ -XXX,XX +XXX,XX @@
835
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
836
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
837
DEF(tci_clz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
838
+DEF(tci_ctz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
839
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
840
DEF(tci_divu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
841
DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
842
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
843
index XXXXXXX..XXXXXXX 100644
844
--- a/tcg/tci/tcg-target.c.inc
845
+++ b/tcg/tci/tcg-target.c.inc
846
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
847
case INDEX_op_setcond_i64:
848
case INDEX_op_deposit_i32:
849
case INDEX_op_deposit_i64:
850
- case INDEX_op_ctz_i32:
851
- case INDEX_op_ctz_i64:
852
return C_O1_I2(r, r, r);
853
854
case INDEX_op_brcond_i32:
855
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
856
.out_rrr = tgen_clz,
857
};
858
859
+static void tgen_ctz(TCGContext *s, TCGType type,
860
+ TCGReg a0, TCGReg a1, TCGReg a2)
861
+{
862
+ TCGOpcode opc = (type == TCG_TYPE_I32
863
+ ? INDEX_op_tci_ctz32
864
+ : INDEX_op_ctz_i64);
865
+ tcg_out_op_rrr(s, opc, a0, a1, a2);
866
+}
867
+
868
+static const TCGOutOpBinary outop_ctz = {
869
+ .base.static_constraint = C_O1_I2(r, r, r),
870
+ .out_rrr = tgen_ctz,
871
+};
872
+
873
static void tgen_divs(TCGContext *s, TCGType type,
874
TCGReg a0, TCGReg a1, TCGReg a2)
875
{
876
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
877
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
878
break;
879
880
- CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
881
- tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
882
- break;
883
-
884
CASE_32_64(deposit)
885
tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
886
break;
887
--
888
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 10 +++++-----
6
tcg/tcg-op.c | 16 ++++++++--------
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 4 ++--
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 20 insertions(+), 23 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(clz, 1, 2, 0, TCG_OPF_INT)
21
+DEF(ctz, 1, 2, 0, TCG_OPF_INT)
22
DEF(divs, 1, 2, 0, TCG_OPF_INT)
23
DEF(divs2, 2, 3, 0, TCG_OPF_INT)
24
DEF(divu, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
26
27
DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
-DEF(ctz_i32, 1, 2, 0, 0)
30
DEF(ctpop_i32, 1, 1, 0, 0)
31
32
DEF(setcond_i64, 1, 2, 1, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
34
DEF(bswap16_i64, 1, 1, 1, 0)
35
DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
-DEF(ctz_i64, 1, 2, 0, 0)
38
DEF(ctpop_i64, 1, 1, 0, 0)
39
40
DEF(add2_i64, 2, 4, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
}
47
return x ? clz64(x) : y;
48
49
- case INDEX_op_ctz_i32:
50
- return (uint32_t)x ? ctz32(x) : y;
51
-
52
- case INDEX_op_ctz_i64:
53
+ case INDEX_op_ctz:
54
+ if (type == TCG_TYPE_I32) {
55
+ return (uint32_t)x ? ctz32(x) : y;
56
+ }
57
return x ? ctz64(x) : y;
58
59
case INDEX_op_ctpop_i32:
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
done = fold_bswap(&ctx, op);
62
break;
63
case INDEX_op_clz:
64
- CASE_OP_32_64(ctz):
65
+ case INDEX_op_ctz:
66
done = fold_count_zeros(&ctx, op);
67
break;
68
CASE_OP_32_64(ctpop):
69
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg-op.c
72
+++ b/tcg/tcg-op.c
73
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
74
{
75
TCGv_i32 z, t;
76
77
- if (tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)) {
78
- tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
79
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) {
80
+ tcg_gen_op3_i32(INDEX_op_ctz, ret, arg1, arg2);
81
return;
82
}
83
- if (tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)) {
84
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) {
85
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
86
TCGv_i64 t2 = tcg_temp_ebb_new_i64();
87
tcg_gen_extu_i32_i64(t1, arg1);
88
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
89
90
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
91
{
92
- if (!tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)
93
+ if (!tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)
94
&& TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
95
/* This equivalence has the advantage of not requiring a fixup. */
96
TCGv_i32 t = tcg_temp_ebb_new_i32();
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
98
{
99
TCGv_i64 z, t;
100
101
- if (tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)) {
102
- tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
103
+ if (tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)) {
104
+ tcg_gen_op3_i64(INDEX_op_ctz, ret, arg1, arg2);
105
return;
106
}
107
if (TCG_TARGET_HAS_ctpop_i64) {
108
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
109
{
110
if (TCG_TARGET_REG_BITS == 32
111
&& arg2 <= 0xffffffffu
112
- && tcg_op_supported(INDEX_op_ctz_i32, TCG_TYPE_I32, 0)) {
113
+ && tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)) {
114
TCGv_i32 t32 = tcg_temp_ebb_new_i32();
115
tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32);
116
tcg_gen_addi_i32(t32, t32, 32);
117
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
118
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
119
tcg_temp_free_i32(t32);
120
} else if (arg2 == 64
121
- && !tcg_op_supported(INDEX_op_ctz_i64, TCG_TYPE_I64, 0)
122
+ && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)
123
&& TCG_TARGET_HAS_ctpop_i64) {
124
/* This equivalence has the advantage of not requiring a fixup. */
125
TCGv_i64 t = tcg_temp_ebb_new_i64();
126
diff --git a/tcg/tcg.c b/tcg/tcg.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/tcg.c
129
+++ b/tcg/tcg.c
130
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
131
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
132
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
133
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
134
- OUTOP(INDEX_op_ctz_i32, TCGOutOpBinary, outop_ctz),
135
- OUTOP(INDEX_op_ctz_i64, TCGOutOpBinary, outop_ctz),
136
+ OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
137
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
138
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
139
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
140
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
141
case INDEX_op_and:
142
case INDEX_op_andc:
143
case INDEX_op_clz:
144
- case INDEX_op_ctz_i32:
145
- case INDEX_op_ctz_i64:
146
+ case INDEX_op_ctz:
147
case INDEX_op_divs:
148
case INDEX_op_divu:
149
case INDEX_op_eqv:
150
diff --git a/tcg/tci.c b/tcg/tci.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/tci.c
153
+++ b/tcg/tci.c
154
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
155
tci_args_rrr(insn, &r0, &r1, &r2);
156
regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
157
break;
158
- case INDEX_op_ctz_i64:
159
+ case INDEX_op_ctz:
160
tci_args_rrr(insn, &r0, &r1, &r2);
161
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
162
break;
163
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
164
case INDEX_op_and:
165
case INDEX_op_andc:
166
case INDEX_op_clz:
167
+ case INDEX_op_ctz:
168
case INDEX_op_divs:
169
case INDEX_op_divu:
170
case INDEX_op_eqv:
171
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
172
case INDEX_op_shr:
173
case INDEX_op_sub:
174
case INDEX_op_xor:
175
- case INDEX_op_ctz_i64:
176
case INDEX_op_tci_ctz32:
177
case INDEX_op_tci_clz32:
178
case INDEX_op_tci_divs32:
179
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
180
index XXXXXXX..XXXXXXX 100644
181
--- a/docs/devel/tcg-ops.rst
182
+++ b/docs/devel/tcg-ops.rst
183
@@ -XXX,XX +XXX,XX @@ Logical
184
185
- | *t0* = *t1* ? clz(*t1*) : *t2*
186
187
- * - ctz_i32/i64 *t0*, *t1*, *t2*
188
+ * - ctz *t0*, *t1*, *t2*
189
190
- | *t0* = *t1* ? ctz(*t1*) : *t2*
191
192
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
193
index XXXXXXX..XXXXXXX 100644
194
--- a/tcg/tci/tcg-target.c.inc
195
+++ b/tcg/tci/tcg-target.c.inc
196
@@ -XXX,XX +XXX,XX @@ static void tgen_ctz(TCGContext *s, TCGType type,
197
{
198
TCGOpcode opc = (type == TCG_TYPE_I32
199
? INDEX_op_tci_ctz32
200
- : INDEX_op_ctz_i64);
201
+ : INDEX_op_ctz);
202
tcg_out_op_rrr(s, opc, a0, a1, a2);
203
}
204
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 -
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 -
7
tcg/loongarch64/tcg-target-has.h | 2 -
8
tcg/mips/tcg-target-has.h | 2 -
9
tcg/ppc/tcg-target-has.h | 2 -
10
tcg/riscv/tcg-target-has.h | 2 -
11
tcg/s390x/tcg-target-has.h | 2 -
12
tcg/sparc64/tcg-target-has.h | 2 -
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 -
15
tcg/tcg-op.c | 37 ++++++++++--------
16
tcg/tcg.c | 8 ++--
17
tcg/tci.c | 19 ++++-----
18
tcg/aarch64/tcg-target.c.inc | 4 ++
19
tcg/arm/tcg-target.c.inc | 4 ++
20
tcg/i386/tcg-target.c.inc | 23 ++++++++---
21
tcg/loongarch64/tcg-target.c.inc | 4 ++
22
tcg/mips/tcg-target.c.inc | 4 ++
23
tcg/ppc/tcg-target.c.inc | 26 ++++++++-----
24
tcg/riscv/tcg-target.c.inc | 26 ++++++++-----
25
tcg/s390x/tcg-target.c.inc | 66 +++++++++++++++-----------------
26
tcg/sparc64/tcg-target.c.inc | 4 ++
27
tcg/tci/tcg-target.c.inc | 19 +++++++--
28
24 files changed, 151 insertions(+), 113 deletions(-)
29
1
30
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target-has.h
33
+++ b/tcg/aarch64/tcg-target-has.h
34
@@ -XXX,XX +XXX,XX @@
35
/* optional instructions */
36
#define TCG_TARGET_HAS_bswap16_i32 1
37
#define TCG_TARGET_HAS_bswap32_i32 1
38
-#define TCG_TARGET_HAS_ctpop_i32 0
39
#define TCG_TARGET_HAS_extract2_i32 1
40
#define TCG_TARGET_HAS_negsetcond_i32 1
41
#define TCG_TARGET_HAS_add2_i32 1
42
@@ -XXX,XX +XXX,XX @@
43
#define TCG_TARGET_HAS_bswap16_i64 1
44
#define TCG_TARGET_HAS_bswap32_i64 1
45
#define TCG_TARGET_HAS_bswap64_i64 1
46
-#define TCG_TARGET_HAS_ctpop_i64 0
47
#define TCG_TARGET_HAS_extract2_i64 1
48
#define TCG_TARGET_HAS_negsetcond_i64 1
49
#define TCG_TARGET_HAS_add2_i64 1
50
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target-has.h
53
+++ b/tcg/arm/tcg-target-has.h
54
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
55
/* optional instructions */
56
#define TCG_TARGET_HAS_bswap16_i32 1
57
#define TCG_TARGET_HAS_bswap32_i32 1
58
-#define TCG_TARGET_HAS_ctpop_i32 0
59
#define TCG_TARGET_HAS_extract2_i32 1
60
#define TCG_TARGET_HAS_negsetcond_i32 1
61
#define TCG_TARGET_HAS_mulu2_i32 1
62
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target-has.h
65
+++ b/tcg/i386/tcg-target-has.h
66
@@ -XXX,XX +XXX,XX @@
67
/* optional instructions */
68
#define TCG_TARGET_HAS_bswap16_i32 1
69
#define TCG_TARGET_HAS_bswap32_i32 1
70
-#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
71
#define TCG_TARGET_HAS_extract2_i32 1
72
#define TCG_TARGET_HAS_negsetcond_i32 1
73
#define TCG_TARGET_HAS_add2_i32 1
74
@@ -XXX,XX +XXX,XX @@
75
#define TCG_TARGET_HAS_bswap16_i64 1
76
#define TCG_TARGET_HAS_bswap32_i64 1
77
#define TCG_TARGET_HAS_bswap64_i64 1
78
-#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
79
#define TCG_TARGET_HAS_extract2_i64 1
80
#define TCG_TARGET_HAS_negsetcond_i64 1
81
#define TCG_TARGET_HAS_add2_i64 1
82
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target-has.h
85
+++ b/tcg/loongarch64/tcg-target-has.h
86
@@ -XXX,XX +XXX,XX @@
87
#define TCG_TARGET_HAS_muls2_i32 0
88
#define TCG_TARGET_HAS_bswap16_i32 1
89
#define TCG_TARGET_HAS_bswap32_i32 1
90
-#define TCG_TARGET_HAS_ctpop_i32 0
91
#define TCG_TARGET_HAS_qemu_st8_i32 0
92
93
/* 64-bit operations */
94
@@ -XXX,XX +XXX,XX @@
95
#define TCG_TARGET_HAS_bswap16_i64 1
96
#define TCG_TARGET_HAS_bswap32_i64 1
97
#define TCG_TARGET_HAS_bswap64_i64 1
98
-#define TCG_TARGET_HAS_ctpop_i64 0
99
#define TCG_TARGET_HAS_add2_i64 0
100
#define TCG_TARGET_HAS_sub2_i64 0
101
#define TCG_TARGET_HAS_mulu2_i64 0
102
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/mips/tcg-target-has.h
105
+++ b/tcg/mips/tcg-target-has.h
106
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
107
108
/* optional instructions detected at runtime */
109
#define TCG_TARGET_HAS_extract2_i32 0
110
-#define TCG_TARGET_HAS_ctpop_i32 0
111
#define TCG_TARGET_HAS_qemu_st8_i32 0
112
113
#if TCG_TARGET_REG_BITS == 64
114
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
115
#define TCG_TARGET_HAS_bswap32_i64 1
116
#define TCG_TARGET_HAS_bswap64_i64 1
117
#define TCG_TARGET_HAS_extract2_i64 0
118
-#define TCG_TARGET_HAS_ctpop_i64 0
119
#endif
120
121
#define TCG_TARGET_HAS_qemu_ldst_i128 0
122
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/ppc/tcg-target-has.h
125
+++ b/tcg/ppc/tcg-target-has.h
126
@@ -XXX,XX +XXX,XX @@
127
/* optional instructions */
128
#define TCG_TARGET_HAS_bswap16_i32 1
129
#define TCG_TARGET_HAS_bswap32_i32 1
130
-#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
131
#define TCG_TARGET_HAS_extract2_i32 0
132
#define TCG_TARGET_HAS_negsetcond_i32 1
133
#define TCG_TARGET_HAS_mulu2_i32 0
134
@@ -XXX,XX +XXX,XX @@
135
#define TCG_TARGET_HAS_bswap16_i64 1
136
#define TCG_TARGET_HAS_bswap32_i64 1
137
#define TCG_TARGET_HAS_bswap64_i64 1
138
-#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
139
#define TCG_TARGET_HAS_extract2_i64 0
140
#define TCG_TARGET_HAS_negsetcond_i64 1
141
#define TCG_TARGET_HAS_add2_i64 1
142
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/riscv/tcg-target-has.h
145
+++ b/tcg/riscv/tcg-target-has.h
146
@@ -XXX,XX +XXX,XX @@
147
#define TCG_TARGET_HAS_muls2_i32 0
148
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
149
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
150
-#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
151
#define TCG_TARGET_HAS_qemu_st8_i32 0
152
153
#define TCG_TARGET_HAS_negsetcond_i64 1
154
@@ -XXX,XX +XXX,XX @@
155
#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
156
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
158
-#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
159
#define TCG_TARGET_HAS_add2_i64 1
160
#define TCG_TARGET_HAS_sub2_i64 1
161
#define TCG_TARGET_HAS_mulu2_i64 0
162
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/s390x/tcg-target-has.h
165
+++ b/tcg/s390x/tcg-target-has.h
166
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
167
/* optional instructions */
168
#define TCG_TARGET_HAS_bswap16_i32 1
169
#define TCG_TARGET_HAS_bswap32_i32 1
170
-#define TCG_TARGET_HAS_ctpop_i32 1
171
#define TCG_TARGET_HAS_extract2_i32 0
172
#define TCG_TARGET_HAS_negsetcond_i32 1
173
#define TCG_TARGET_HAS_add2_i32 1
174
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
175
#define TCG_TARGET_HAS_bswap16_i64 1
176
#define TCG_TARGET_HAS_bswap32_i64 1
177
#define TCG_TARGET_HAS_bswap64_i64 1
178
-#define TCG_TARGET_HAS_ctpop_i64 1
179
#define TCG_TARGET_HAS_extract2_i64 0
180
#define TCG_TARGET_HAS_negsetcond_i64 1
181
#define TCG_TARGET_HAS_add2_i64 1
182
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/sparc64/tcg-target-has.h
185
+++ b/tcg/sparc64/tcg-target-has.h
186
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
187
/* optional instructions */
188
#define TCG_TARGET_HAS_bswap16_i32 0
189
#define TCG_TARGET_HAS_bswap32_i32 0
190
-#define TCG_TARGET_HAS_ctpop_i32 0
191
#define TCG_TARGET_HAS_extract2_i32 0
192
#define TCG_TARGET_HAS_negsetcond_i32 1
193
#define TCG_TARGET_HAS_add2_i32 1
194
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
195
#define TCG_TARGET_HAS_bswap16_i64 0
196
#define TCG_TARGET_HAS_bswap32_i64 0
197
#define TCG_TARGET_HAS_bswap64_i64 0
198
-#define TCG_TARGET_HAS_ctpop_i64 0
199
#define TCG_TARGET_HAS_extract2_i64 0
200
#define TCG_TARGET_HAS_negsetcond_i64 1
201
#define TCG_TARGET_HAS_add2_i64 1
202
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/tcg-has.h
205
+++ b/tcg/tcg-has.h
206
@@ -XXX,XX +XXX,XX @@
207
#define TCG_TARGET_HAS_bswap16_i64 0
208
#define TCG_TARGET_HAS_bswap32_i64 0
209
#define TCG_TARGET_HAS_bswap64_i64 0
210
-#define TCG_TARGET_HAS_ctpop_i64 0
211
#define TCG_TARGET_HAS_extract2_i64 0
212
#define TCG_TARGET_HAS_negsetcond_i64 0
213
#define TCG_TARGET_HAS_add2_i64 0
214
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tci/tcg-target-has.h
217
+++ b/tcg/tci/tcg-target-has.h
218
@@ -XXX,XX +XXX,XX @@
219
#define TCG_TARGET_HAS_bswap16_i32 1
220
#define TCG_TARGET_HAS_bswap32_i32 1
221
#define TCG_TARGET_HAS_extract2_i32 0
222
-#define TCG_TARGET_HAS_ctpop_i32 1
223
#define TCG_TARGET_HAS_negsetcond_i32 0
224
#define TCG_TARGET_HAS_muls2_i32 1
225
#define TCG_TARGET_HAS_qemu_st8_i32 0
226
@@ -XXX,XX +XXX,XX @@
227
#define TCG_TARGET_HAS_bswap32_i64 1
228
#define TCG_TARGET_HAS_bswap64_i64 1
229
#define TCG_TARGET_HAS_extract2_i64 0
230
-#define TCG_TARGET_HAS_ctpop_i64 1
231
#define TCG_TARGET_HAS_negsetcond_i64 0
232
#define TCG_TARGET_HAS_muls2_i64 1
233
#define TCG_TARGET_HAS_add2_i32 1
234
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
235
index XXXXXXX..XXXXXXX 100644
236
--- a/tcg/tcg-op.c
237
+++ b/tcg/tcg-op.c
238
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
239
tcg_temp_free_i64(t2);
240
return;
241
}
242
- if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
243
+ if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0) ||
244
+ tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
245
t = tcg_temp_ebb_new_i32();
246
tcg_gen_subi_i32(t, arg1, 1);
247
tcg_gen_andc_i32(t, t, arg1);
248
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
249
250
void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
251
{
252
- if (!tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)
253
- && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
254
+ if (arg2 == 32
255
+ && !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)
256
+ && tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
257
/* This equivalence has the advantage of not requiring a fixup. */
258
TCGv_i32 t = tcg_temp_ebb_new_i32();
259
tcg_gen_subi_i32(t, arg1, 1);
260
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
261
262
void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
263
{
264
- if (TCG_TARGET_HAS_ctpop_i32) {
265
+ if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
266
tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
267
- } else if (TCG_TARGET_HAS_ctpop_i64) {
268
+ } else if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
269
TCGv_i64 t = tcg_temp_ebb_new_i64();
270
tcg_gen_extu_i32_i64(t, arg1);
271
tcg_gen_ctpop_i64(t, t);
272
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
273
tcg_gen_op3_i64(INDEX_op_ctz, ret, arg1, arg2);
274
return;
275
}
276
- if (TCG_TARGET_HAS_ctpop_i64) {
277
+ if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
278
t = tcg_temp_ebb_new_i64();
279
tcg_gen_subi_i64(t, arg1, 1);
280
tcg_gen_andc_i64(t, t, arg1);
281
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
282
tcg_temp_free_i32(t32);
283
} else if (arg2 == 64
284
&& !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)
285
- && TCG_TARGET_HAS_ctpop_i64) {
286
+ && tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
287
/* This equivalence has the advantage of not requiring a fixup. */
288
TCGv_i64 t = tcg_temp_ebb_new_i64();
289
tcg_gen_subi_i64(t, arg1, 1);
290
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
291
292
void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
293
{
294
- if (TCG_TARGET_HAS_ctpop_i64) {
295
- tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
296
- } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) {
297
- tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
298
- tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
299
- tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
300
- tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
301
+ if (TCG_TARGET_REG_BITS == 64) {
302
+ if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
303
+ tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
304
+ return;
305
+ }
306
} else {
307
- gen_helper_ctpop_i64(ret, arg1);
308
+ if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
309
+ tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
310
+ tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
311
+ tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
312
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
313
+ return;
314
+ }
315
}
316
+ gen_helper_ctpop_i64(ret, arg1);
317
}
318
319
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
320
diff --git a/tcg/tcg.c b/tcg/tcg.c
321
index XXXXXXX..XXXXXXX 100644
322
--- a/tcg/tcg.c
323
+++ b/tcg/tcg.c
324
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
325
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
326
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
327
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
328
+ OUTOP(INDEX_op_ctpop_i32, TCGOutOpUnary, outop_ctpop),
329
+ OUTOP(INDEX_op_ctpop_i64, TCGOutOpUnary, outop_ctpop),
330
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
331
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
332
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
333
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
334
return TCG_TARGET_HAS_bswap16_i32;
335
case INDEX_op_bswap32_i32:
336
return TCG_TARGET_HAS_bswap32_i32;
337
- case INDEX_op_ctpop_i32:
338
- return TCG_TARGET_HAS_ctpop_i32;
339
340
case INDEX_op_brcond2_i32:
341
case INDEX_op_setcond2_i32:
342
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
343
return TCG_TARGET_HAS_bswap32_i64;
344
case INDEX_op_bswap64_i64:
345
return TCG_TARGET_HAS_bswap64_i64;
346
- case INDEX_op_ctpop_i64:
347
- return TCG_TARGET_HAS_ctpop_i64;
348
case INDEX_op_add2_i64:
349
return TCG_TARGET_HAS_add2_i64;
350
case INDEX_op_sub2_i64:
351
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
352
}
353
break;
354
355
+ case INDEX_op_ctpop_i32:
356
+ case INDEX_op_ctpop_i64:
357
case INDEX_op_neg:
358
case INDEX_op_not:
359
{
360
diff --git a/tcg/tci.c b/tcg/tci.c
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/tci.c
363
+++ b/tcg/tci.c
364
@@ -XXX,XX +XXX,XX @@
365
#include <ffi.h>
366
367
368
+#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
369
+
370
/*
371
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
372
* Without assertions, the interpreter runs much faster.
373
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
374
tci_args_rr(insn, &r0, &r1);
375
regs[r0] = ~regs[r1];
376
break;
377
+ case INDEX_op_ctpop_i32:
378
+ case INDEX_op_ctpop_i64:
379
+ tci_args_rr(insn, &r0, &r1);
380
+ regs[r0] = ctpop_tr(regs[r1]);
381
+ break;
382
383
/* Arithmetic operations (32 bit). */
384
385
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
386
tmp32 = regs[r1];
387
regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
388
break;
389
-#if TCG_TARGET_HAS_ctpop_i32
390
- case INDEX_op_ctpop_i32:
391
- tci_args_rr(insn, &r0, &r1);
392
- regs[r0] = ctpop32(regs[r1]);
393
- break;
394
-#endif
395
396
/* Shift/rotate operations. */
397
398
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
399
tci_args_rrr(insn, &r0, &r1, &r2);
400
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
401
break;
402
-#if TCG_TARGET_HAS_ctpop_i64
403
- case INDEX_op_ctpop_i64:
404
- tci_args_rr(insn, &r0, &r1);
405
- regs[r0] = ctpop64(regs[r1]);
406
- break;
407
-#endif
408
#if TCG_TARGET_HAS_mulu2_i64
409
case INDEX_op_mulu2_i64:
410
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
411
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
412
index XXXXXXX..XXXXXXX 100644
413
--- a/tcg/aarch64/tcg-target.c.inc
414
+++ b/tcg/aarch64/tcg-target.c.inc
415
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
416
.out_rri = tgen_clzi,
417
};
418
419
+static const TCGOutOpUnary outop_ctpop = {
420
+ .base.static_constraint = C_NotImplemented,
421
+};
422
+
423
static void tgen_ctz(TCGContext *s, TCGType type,
424
TCGReg a0, TCGReg a1, TCGReg a2)
425
{
426
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
427
index XXXXXXX..XXXXXXX 100644
428
--- a/tcg/arm/tcg-target.c.inc
429
+++ b/tcg/arm/tcg-target.c.inc
430
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
431
.out_rri = tgen_clzi,
432
};
433
434
+static const TCGOutOpUnary outop_ctpop = {
435
+ .base.static_constraint = C_NotImplemented,
436
+};
437
+
438
static void tgen_ctz(TCGContext *s, TCGType type,
439
TCGReg a0, TCGReg a1, TCGReg a2)
440
{
441
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
442
index XXXXXXX..XXXXXXX 100644
443
--- a/tcg/i386/tcg-target.c.inc
444
+++ b/tcg/i386/tcg-target.c.inc
445
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
446
.out_rri = tgen_clzi,
447
};
448
449
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
450
+{
451
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
452
+ tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
453
+}
454
+
455
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
456
+{
457
+ return have_popcnt ? C_O1_I1(r, r) : C_NotImplemented;
458
+}
459
+
460
+static const TCGOutOpUnary outop_ctpop = {
461
+ .base.static_constraint = C_Dynamic,
462
+ .base.dynamic_constraint = cset_ctpop,
463
+ .out_rr = tgen_ctpop,
464
+};
465
+
466
static void tgen_ctz(TCGContext *s, TCGType type,
467
TCGReg a0, TCGReg a1, TCGReg a2)
468
{
469
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
470
}
471
break;
472
473
- OP_32_64(ctpop):
474
- tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
475
- break;
476
-
477
OP_32_64(brcond):
478
tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
479
arg_label(args[3]), 0);
480
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
481
case INDEX_op_extract_i64:
482
case INDEX_op_sextract_i32:
483
case INDEX_op_sextract_i64:
484
- case INDEX_op_ctpop_i32:
485
- case INDEX_op_ctpop_i64:
486
return C_O1_I1(r, r);
487
488
case INDEX_op_extract2_i32:
489
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
490
index XXXXXXX..XXXXXXX 100644
491
--- a/tcg/loongarch64/tcg-target.c.inc
492
+++ b/tcg/loongarch64/tcg-target.c.inc
493
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
494
.out_rri = tgen_clzi,
495
};
496
497
+static const TCGOutOpUnary outop_ctpop = {
498
+ .base.static_constraint = C_NotImplemented,
499
+};
500
+
501
static void tgen_ctzi(TCGContext *s, TCGType type,
502
TCGReg a0, TCGReg a1, tcg_target_long a2)
503
{
504
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
505
index XXXXXXX..XXXXXXX 100644
506
--- a/tcg/mips/tcg-target.c.inc
507
+++ b/tcg/mips/tcg-target.c.inc
508
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
509
.out_rri = tgen_clzi,
510
};
511
512
+static const TCGOutOpUnary outop_ctpop = {
513
+ .base.static_constraint = C_NotImplemented,
514
+};
515
+
516
static const TCGOutOpBinary outop_ctz = {
517
.base.static_constraint = C_NotImplemented,
518
};
519
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
520
index XXXXXXX..XXXXXXX 100644
521
--- a/tcg/ppc/tcg-target.c.inc
522
+++ b/tcg/ppc/tcg-target.c.inc
523
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
524
.out_rri = tgen_clzi,
525
};
526
527
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
528
+{
529
+ uint32_t insn = type == TCG_TYPE_I32 ? CNTPOPW : CNTPOPD;
530
+ tcg_out32(s, insn | SAB(a1, a0, 0));
531
+}
532
+
533
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
534
+{
535
+ return have_isa_2_06 ? C_O1_I1(r, r) : C_NotImplemented;
536
+}
537
+
538
+static const TCGOutOpUnary outop_ctpop = {
539
+ .base.static_constraint = C_Dynamic,
540
+ .base.dynamic_constraint = cset_ctpop,
541
+ .out_rr = tgen_ctpop,
542
+};
543
+
544
static void tgen_ctz(TCGContext *s, TCGType type,
545
TCGReg a0, TCGReg a1, TCGReg a2)
546
{
547
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
548
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
549
break;
550
551
- case INDEX_op_ctpop_i32:
552
- tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
553
- break;
554
- case INDEX_op_ctpop_i64:
555
- tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
556
- break;
557
-
558
case INDEX_op_brcond_i32:
559
tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
560
arg_label(args[3]), TCG_TYPE_I32);
561
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
562
case INDEX_op_ld16u_i32:
563
case INDEX_op_ld16s_i32:
564
case INDEX_op_ld_i32:
565
- case INDEX_op_ctpop_i32:
566
case INDEX_op_bswap16_i32:
567
case INDEX_op_bswap32_i32:
568
case INDEX_op_extract_i32:
569
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
570
case INDEX_op_ld32u_i64:
571
case INDEX_op_ld32s_i64:
572
case INDEX_op_ld_i64:
573
- case INDEX_op_ctpop_i64:
574
case INDEX_op_ext_i32_i64:
575
case INDEX_op_extu_i32_i64:
576
case INDEX_op_bswap16_i64:
577
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
578
index XXXXXXX..XXXXXXX 100644
579
--- a/tcg/riscv/tcg-target.c.inc
580
+++ b/tcg/riscv/tcg-target.c.inc
581
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
582
.out_rri = tgen_clzi,
583
};
584
585
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
586
+{
587
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CPOPW : OPC_CPOP;
588
+ tcg_out_opc_imm(s, insn, a0, a1, 0);
589
+}
590
+
591
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
592
+{
593
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented;
594
+}
595
+
596
+static const TCGOutOpUnary outop_ctpop = {
597
+ .base.static_constraint = C_Dynamic,
598
+ .base.dynamic_constraint = cset_ctpop,
599
+ .out_rr = tgen_ctpop,
600
+};
601
+
602
static void tgen_ctz(TCGContext *s, TCGType type,
603
TCGReg a0, TCGReg a1, TCGReg a2)
604
{
605
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
606
}
607
break;
608
609
- case INDEX_op_ctpop_i32:
610
- tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
611
- break;
612
- case INDEX_op_ctpop_i64:
613
- tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
614
- break;
615
-
616
case INDEX_op_add2_i32:
617
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
618
const_args[4], const_args[5], false, true);
619
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
620
case INDEX_op_bswap16_i64:
621
case INDEX_op_bswap32_i64:
622
case INDEX_op_bswap64_i64:
623
- case INDEX_op_ctpop_i32:
624
- case INDEX_op_ctpop_i64:
625
return C_O1_I1(r, r);
626
627
case INDEX_op_st8_i32:
628
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
629
index XXXXXXX..XXXXXXX 100644
630
--- a/tcg/s390x/tcg-target.c.inc
631
+++ b/tcg/s390x/tcg-target.c.inc
632
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
633
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
634
}
635
636
-static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
637
-{
638
- /* With MIE3, and bit 0 of m4 set, we get the complete result. */
639
- if (HAVE_FACILITY(MISC_INSN_EXT3)) {
640
- if (type == TCG_TYPE_I32) {
641
- tcg_out_ext32u(s, dest, src);
642
- src = dest;
643
- }
644
- tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
645
- return;
646
- }
647
-
648
- /* Without MIE3, each byte gets the count of bits for the byte. */
649
- tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
650
-
651
- /* Multiply to sum each byte at the top of the word. */
652
- if (type == TCG_TYPE_I32) {
653
- tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
654
- tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
655
- } else {
656
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
657
- tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
658
- tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
659
- }
660
-}
661
-
662
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
663
int ofs, int len, int z)
664
{
665
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
666
.out_rri = tgen_clzi,
667
};
668
669
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
670
+{
671
+ /* With MIE3, and bit 0 of m4 set, we get the complete result. */
672
+ if (HAVE_FACILITY(MISC_INSN_EXT3)) {
673
+ if (type == TCG_TYPE_I32) {
674
+ tcg_out_ext32u(s, dest, src);
675
+ src = dest;
676
+ }
677
+ tcg_out_insn(s, RRFc, POPCNT, dest, src, 8);
678
+ return;
679
+ }
680
+
681
+ /* Without MIE3, each byte gets the count of bits for the byte. */
682
+ tcg_out_insn(s, RRFc, POPCNT, dest, src, 0);
683
+
684
+ /* Multiply to sum each byte at the top of the word. */
685
+ if (type == TCG_TYPE_I32) {
686
+ tcg_out_insn(s, RIL, MSFI, dest, 0x01010101);
687
+ tcg_out_sh32(s, RS_SRL, dest, TCG_REG_NONE, 24);
688
+ } else {
689
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0x0101010101010101ull);
690
+ tcg_out_insn(s, RRE, MSGR, dest, TCG_TMP0);
691
+ tcg_out_sh64(s, RSY_SRLG, dest, dest, TCG_REG_NONE, 56);
692
+ }
693
+}
694
+
695
+static const TCGOutOpUnary outop_ctpop = {
696
+ .base.static_constraint = C_O1_I1(r, r),
697
+ .out_rr = tgen_ctpop,
698
+};
699
+
700
static const TCGOutOpBinary outop_ctz = {
701
.base.static_constraint = C_NotImplemented,
702
};
703
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
704
tgen_sextract(s, args[0], args[1], args[2], args[3]);
705
break;
706
707
- case INDEX_op_ctpop_i32:
708
- tgen_ctpop(s, TCG_TYPE_I32, args[0], args[1]);
709
- break;
710
- case INDEX_op_ctpop_i64:
711
- tgen_ctpop(s, TCG_TYPE_I64, args[0], args[1]);
712
- break;
713
-
714
case INDEX_op_mb:
715
/* The host memory model is quite strong, we simply need to
716
serialize the instruction stream. */
717
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
718
case INDEX_op_extract_i64:
719
case INDEX_op_sextract_i32:
720
case INDEX_op_sextract_i64:
721
- case INDEX_op_ctpop_i32:
722
- case INDEX_op_ctpop_i64:
723
return C_O1_I1(r, r);
724
725
case INDEX_op_qemu_ld_i32:
726
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
727
index XXXXXXX..XXXXXXX 100644
728
--- a/tcg/sparc64/tcg-target.c.inc
729
+++ b/tcg/sparc64/tcg-target.c.inc
730
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_clz = {
731
.base.static_constraint = C_NotImplemented,
732
};
733
734
+static const TCGOutOpUnary outop_ctpop = {
735
+ .base.static_constraint = C_NotImplemented,
736
+};
737
+
738
static const TCGOutOpBinary outop_ctz = {
739
.base.static_constraint = C_NotImplemented,
740
};
741
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
742
index XXXXXXX..XXXXXXX 100644
743
--- a/tcg/tci/tcg-target.c.inc
744
+++ b/tcg/tci/tcg-target.c.inc
745
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
746
case INDEX_op_extract_i64:
747
case INDEX_op_sextract_i32:
748
case INDEX_op_sextract_i64:
749
- case INDEX_op_ctpop_i32:
750
- case INDEX_op_ctpop_i64:
751
return C_O1_I1(r, r);
752
753
case INDEX_op_st8_i32:
754
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
755
.out_rrr = tgen_xor,
756
};
757
758
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
759
+{
760
+ tcg_out_op_rr(s, glue(INDEX_op_ctpop_i,TCG_TARGET_REG_BITS), a0, a1);
761
+}
762
+
763
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
764
+{
765
+ return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
766
+}
767
+
768
+static const TCGOutOpUnary outop_ctpop = {
769
+ .base.static_constraint = C_Dynamic,
770
+ .base.dynamic_constraint = cset_ctpop,
771
+ .out_rr = tgen_ctpop,
772
+};
773
+
774
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
775
{
776
tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
777
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
778
tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
779
break;
780
781
- CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
782
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
783
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
784
tcg_out_op_rr(s, opc, args[0], args[1]);
785
--
786
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 9 +++------
6
tcg/tcg-op.c | 21 ++++++++++-----------
7
tcg/tcg.c | 6 ++----
8
tcg/tci.c | 6 ++----
9
docs/devel/tcg-ops.rst | 6 +++---
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 22 insertions(+), 31 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(clz, 1, 2, 0, TCG_OPF_INT)
21
+DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
22
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
23
DEF(divs, 1, 2, 0, TCG_OPF_INT)
24
DEF(divs2, 2, 3, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
26
27
DEF(bswap16_i32, 1, 1, 1, 0)
28
DEF(bswap32_i32, 1, 1, 1, 0)
29
-DEF(ctpop_i32, 1, 1, 0, 0)
30
31
DEF(setcond_i64, 1, 2, 1, 0)
32
DEF(negsetcond_i64, 1, 2, 1, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
34
DEF(bswap16_i64, 1, 1, 1, 0)
35
DEF(bswap32_i64, 1, 1, 1, 0)
36
DEF(bswap64_i64, 1, 1, 1, 0)
37
-DEF(ctpop_i64, 1, 1, 0, 0)
38
39
DEF(add2_i64, 2, 4, 0, 0)
40
DEF(sub2_i64, 2, 4, 0, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
46
}
47
return x ? ctz64(x) : y;
48
49
- case INDEX_op_ctpop_i32:
50
- return ctpop32(x);
51
-
52
- case INDEX_op_ctpop_i64:
53
- return ctpop64(x);
54
+ case INDEX_op_ctpop:
55
+ return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
56
57
CASE_OP_32_64(bswap16):
58
x = bswap16(x);
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
case INDEX_op_ctz:
61
done = fold_count_zeros(&ctx, op);
62
break;
63
- CASE_OP_32_64(ctpop):
64
+ case INDEX_op_ctpop:
65
done = fold_ctpop(&ctx, op);
66
break;
67
CASE_OP_32_64(deposit):
68
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/tcg-op.c
71
+++ b/tcg/tcg-op.c
72
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
73
tcg_temp_free_i64(t2);
74
return;
75
}
76
- if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0) ||
77
- tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
78
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) {
79
t = tcg_temp_ebb_new_i32();
80
tcg_gen_subi_i32(t, arg1, 1);
81
tcg_gen_andc_i32(t, t, arg1);
82
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
83
{
84
if (arg2 == 32
85
&& !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I32, 0)
86
- && tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
87
+ && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_REG, 0)) {
88
/* This equivalence has the advantage of not requiring a fixup. */
89
TCGv_i32 t = tcg_temp_ebb_new_i32();
90
tcg_gen_subi_i32(t, arg1, 1);
91
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
92
93
void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
94
{
95
- if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
96
- tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
97
- } else if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
98
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) {
99
+ tcg_gen_op2_i32(INDEX_op_ctpop, ret, arg1);
100
+ } else if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
101
TCGv_i64 t = tcg_temp_ebb_new_i64();
102
tcg_gen_extu_i32_i64(t, arg1);
103
tcg_gen_ctpop_i64(t, t);
104
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
105
tcg_gen_op3_i64(INDEX_op_ctz, ret, arg1, arg2);
106
return;
107
}
108
- if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
109
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
110
t = tcg_temp_ebb_new_i64();
111
tcg_gen_subi_i64(t, arg1, 1);
112
tcg_gen_andc_i64(t, t, arg1);
113
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
114
tcg_temp_free_i32(t32);
115
} else if (arg2 == 64
116
&& !tcg_op_supported(INDEX_op_ctz, TCG_TYPE_I64, 0)
117
- && tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
118
+ && tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
119
/* This equivalence has the advantage of not requiring a fixup. */
120
TCGv_i64 t = tcg_temp_ebb_new_i64();
121
tcg_gen_subi_i64(t, arg1, 1);
122
@@ -XXX,XX +XXX,XX @@ void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
123
void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
124
{
125
if (TCG_TARGET_REG_BITS == 64) {
126
- if (tcg_op_supported(INDEX_op_ctpop_i64, TCG_TYPE_I64, 0)) {
127
- tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
128
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I64, 0)) {
129
+ tcg_gen_op2_i64(INDEX_op_ctpop, ret, arg1);
130
return;
131
}
132
} else {
133
- if (tcg_op_supported(INDEX_op_ctpop_i32, TCG_TYPE_I32, 0)) {
134
+ if (tcg_op_supported(INDEX_op_ctpop, TCG_TYPE_I32, 0)) {
135
tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
136
tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
137
tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
138
diff --git a/tcg/tcg.c b/tcg/tcg.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/tcg.c
141
+++ b/tcg/tcg.c
142
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
143
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
144
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
145
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
146
- OUTOP(INDEX_op_ctpop_i32, TCGOutOpUnary, outop_ctpop),
147
- OUTOP(INDEX_op_ctpop_i64, TCGOutOpUnary, outop_ctpop),
148
+ OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
149
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
150
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
151
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
152
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
153
}
154
break;
155
156
- case INDEX_op_ctpop_i32:
157
- case INDEX_op_ctpop_i64:
158
+ case INDEX_op_ctpop:
159
case INDEX_op_neg:
160
case INDEX_op_not:
161
{
162
diff --git a/tcg/tci.c b/tcg/tci.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/tci.c
165
+++ b/tcg/tci.c
166
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
167
tci_args_rr(insn, &r0, &r1);
168
regs[r0] = ~regs[r1];
169
break;
170
- case INDEX_op_ctpop_i32:
171
- case INDEX_op_ctpop_i64:
172
+ case INDEX_op_ctpop:
173
tci_args_rr(insn, &r0, &r1);
174
regs[r0] = ctpop_tr(regs[r1]);
175
break;
176
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
177
op_name, str_r(r0), str_r(r1), s2);
178
break;
179
180
+ case INDEX_op_ctpop:
181
case INDEX_op_mov:
182
case INDEX_op_neg:
183
case INDEX_op_not:
184
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
185
case INDEX_op_bswap32_i32:
186
case INDEX_op_bswap32_i64:
187
case INDEX_op_bswap64_i64:
188
- case INDEX_op_ctpop_i32:
189
- case INDEX_op_ctpop_i64:
190
tci_args_rr(insn, &r0, &r1);
191
info->fprintf_func(info->stream, "%-12s %s, %s",
192
op_name, str_r(r0), str_r(r1));
193
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
194
index XXXXXXX..XXXXXXX 100644
195
--- a/docs/devel/tcg-ops.rst
196
+++ b/docs/devel/tcg-ops.rst
197
@@ -XXX,XX +XXX,XX @@ Logical
198
199
- | *t0* = *t1* ? ctz(*t1*) : *t2*
200
201
- * - ctpop_i32/i64 *t0*, *t1*
202
+ * - ctpop *t0*, *t1*
203
204
- | *t0* = number of bits set in *t1*
205
|
206
- | With *ctpop* short for "count population", matching
207
- | the function name used in ``include/qemu/host-utils.h``.
208
+ | The name *ctpop* is short for "count population", and matches
209
+ the function name used in ``include/qemu/host-utils.h``.
210
211
212
Shifts/Rotates
213
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tci/tcg-target.c.inc
216
+++ b/tcg/tci/tcg-target.c.inc
217
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
218
219
static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
220
{
221
- tcg_out_op_rr(s, glue(INDEX_op_ctpop_i,TCG_TARGET_REG_BITS), a0, a1);
222
+ tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
223
}
224
225
static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
226
--
227
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-con-set.h | 1 +
13
tcg/sparc64/tcg-target-has.h | 2 --
14
tcg/tcg-has.h | 1 -
15
tcg/tci/tcg-target-has.h | 2 --
16
tcg/tcg-op.c | 4 ++--
17
tcg/tcg.c | 26 ++++++++++++++++++++++----
18
tcg/tci.c | 23 ++++++++++-------------
19
tcg/aarch64/tcg-target.c.inc | 4 ++++
20
tcg/arm/tcg-target.c.inc | 25 +++++++++++++------------
21
tcg/i386/tcg-target.c.inc | 17 ++++++++++++-----
22
tcg/loongarch64/tcg-target.c.inc | 4 ++++
23
tcg/mips/tcg-target.c.inc | 28 ++++++++++++++++++++--------
24
tcg/ppc/tcg-target.c.inc | 4 ++++
25
tcg/riscv/tcg-target.c.inc | 4 ++++
26
tcg/s390x/tcg-target.c.inc | 27 ++++++++++++++++++++-------
27
tcg/sparc64/tcg-target.c.inc | 29 ++++++++++++++++++++++++-----
28
tcg/tci/tcg-target.c.inc | 21 ++++++++++++++++++---
29
25 files changed, 158 insertions(+), 79 deletions(-)
30
1
31
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target-has.h
34
+++ b/tcg/aarch64/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@
36
#define TCG_TARGET_HAS_add2_i32 1
37
#define TCG_TARGET_HAS_sub2_i32 1
38
#define TCG_TARGET_HAS_mulu2_i32 0
39
-#define TCG_TARGET_HAS_muls2_i32 0
40
#define TCG_TARGET_HAS_extr_i64_i32 0
41
#define TCG_TARGET_HAS_qemu_st8_i32 0
42
43
@@ -XXX,XX +XXX,XX @@
44
#define TCG_TARGET_HAS_add2_i64 1
45
#define TCG_TARGET_HAS_sub2_i64 1
46
#define TCG_TARGET_HAS_mulu2_i64 0
47
-#define TCG_TARGET_HAS_muls2_i64 0
48
49
/*
50
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
51
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/arm/tcg-target-has.h
54
+++ b/tcg/arm/tcg-target-has.h
55
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
56
#define TCG_TARGET_HAS_extract2_i32 1
57
#define TCG_TARGET_HAS_negsetcond_i32 1
58
#define TCG_TARGET_HAS_mulu2_i32 1
59
-#define TCG_TARGET_HAS_muls2_i32 1
60
#define TCG_TARGET_HAS_qemu_st8_i32 0
61
62
#define TCG_TARGET_HAS_qemu_ldst_i128 0
63
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target-has.h
66
+++ b/tcg/i386/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
#define TCG_TARGET_HAS_add2_i32 1
69
#define TCG_TARGET_HAS_sub2_i32 1
70
#define TCG_TARGET_HAS_mulu2_i32 1
71
-#define TCG_TARGET_HAS_muls2_i32 1
72
73
#if TCG_TARGET_REG_BITS == 64
74
/* Keep 32-bit values zero-extended in a register. */
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_add2_i64 1
77
#define TCG_TARGET_HAS_sub2_i64 1
78
#define TCG_TARGET_HAS_mulu2_i64 1
79
-#define TCG_TARGET_HAS_muls2_i64 1
80
#define TCG_TARGET_HAS_qemu_st8_i32 0
81
#else
82
#define TCG_TARGET_HAS_qemu_st8_i32 1
83
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target-has.h
86
+++ b/tcg/loongarch64/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@
88
#define TCG_TARGET_HAS_add2_i32 0
89
#define TCG_TARGET_HAS_sub2_i32 0
90
#define TCG_TARGET_HAS_mulu2_i32 0
91
-#define TCG_TARGET_HAS_muls2_i32 0
92
#define TCG_TARGET_HAS_bswap16_i32 1
93
#define TCG_TARGET_HAS_bswap32_i32 1
94
#define TCG_TARGET_HAS_qemu_st8_i32 0
95
@@ -XXX,XX +XXX,XX @@
96
#define TCG_TARGET_HAS_add2_i64 0
97
#define TCG_TARGET_HAS_sub2_i64 0
98
#define TCG_TARGET_HAS_mulu2_i64 0
99
-#define TCG_TARGET_HAS_muls2_i64 0
100
101
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
102
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
109
/* optional instructions */
110
#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
111
-#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
112
#define TCG_TARGET_HAS_bswap16_i32 1
113
#define TCG_TARGET_HAS_bswap32_i32 1
114
#define TCG_TARGET_HAS_negsetcond_i32 0
115
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
116
#define TCG_TARGET_HAS_add2_i64 0
117
#define TCG_TARGET_HAS_sub2_i64 0
118
#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
119
-#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
120
#define TCG_TARGET_HAS_ext32s_i64 1
121
#define TCG_TARGET_HAS_ext32u_i64 1
122
#define TCG_TARGET_HAS_negsetcond_i64 0
123
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target-has.h
126
+++ b/tcg/ppc/tcg-target-has.h
127
@@ -XXX,XX +XXX,XX @@
128
#define TCG_TARGET_HAS_extract2_i32 0
129
#define TCG_TARGET_HAS_negsetcond_i32 1
130
#define TCG_TARGET_HAS_mulu2_i32 0
131
-#define TCG_TARGET_HAS_muls2_i32 0
132
#define TCG_TARGET_HAS_qemu_st8_i32 0
133
134
#if TCG_TARGET_REG_BITS == 64
135
@@ -XXX,XX +XXX,XX @@
136
#define TCG_TARGET_HAS_add2_i64 1
137
#define TCG_TARGET_HAS_sub2_i64 1
138
#define TCG_TARGET_HAS_mulu2_i64 0
139
-#define TCG_TARGET_HAS_muls2_i64 0
140
#endif
141
142
#define TCG_TARGET_HAS_qemu_ldst_i128 \
143
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/riscv/tcg-target-has.h
146
+++ b/tcg/riscv/tcg-target-has.h
147
@@ -XXX,XX +XXX,XX @@
148
#define TCG_TARGET_HAS_add2_i32 1
149
#define TCG_TARGET_HAS_sub2_i32 1
150
#define TCG_TARGET_HAS_mulu2_i32 0
151
-#define TCG_TARGET_HAS_muls2_i32 0
152
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_qemu_st8_i32 0
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_add2_i64 1
157
#define TCG_TARGET_HAS_sub2_i64 1
158
#define TCG_TARGET_HAS_mulu2_i64 0
159
-#define TCG_TARGET_HAS_muls2_i64 0
160
161
#define TCG_TARGET_HAS_qemu_ldst_i128 0
162
163
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target-has.h
166
+++ b/tcg/s390x/tcg-target-has.h
167
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
168
#define TCG_TARGET_HAS_add2_i32 1
169
#define TCG_TARGET_HAS_sub2_i32 1
170
#define TCG_TARGET_HAS_mulu2_i32 0
171
-#define TCG_TARGET_HAS_muls2_i32 0
172
#define TCG_TARGET_HAS_extr_i64_i32 0
173
#define TCG_TARGET_HAS_qemu_st8_i32 0
174
175
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
176
#define TCG_TARGET_HAS_add2_i64 1
177
#define TCG_TARGET_HAS_sub2_i64 1
178
#define TCG_TARGET_HAS_mulu2_i64 1
179
-#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
180
181
#define TCG_TARGET_HAS_qemu_ldst_i128 1
182
183
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/sparc64/tcg-target-con-set.h
186
+++ b/tcg/sparc64/tcg-target-con-set.h
187
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, r)
188
C_O1_I2(r, r, rJ)
189
C_O1_I2(r, rz, rJ)
190
C_O1_I4(r, rz, rJ, rI, 0)
191
+C_O2_I2(r, r, r, r)
192
C_O2_I2(r, r, rz, rJ)
193
C_O2_I4(r, r, rz, rz, rJ, rJ)
194
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/sparc64/tcg-target-has.h
197
+++ b/tcg/sparc64/tcg-target-has.h
198
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
199
#define TCG_TARGET_HAS_add2_i32 1
200
#define TCG_TARGET_HAS_sub2_i32 1
201
#define TCG_TARGET_HAS_mulu2_i32 1
202
-#define TCG_TARGET_HAS_muls2_i32 1
203
#define TCG_TARGET_HAS_qemu_st8_i32 0
204
205
#define TCG_TARGET_HAS_extr_i64_i32 0
206
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
207
#define TCG_TARGET_HAS_add2_i64 1
208
#define TCG_TARGET_HAS_sub2_i64 1
209
#define TCG_TARGET_HAS_mulu2_i64 0
210
-#define TCG_TARGET_HAS_muls2_i64 0
211
212
#define TCG_TARGET_HAS_qemu_ldst_i128 0
213
214
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/tcg-has.h
217
+++ b/tcg/tcg-has.h
218
@@ -XXX,XX +XXX,XX @@
219
#define TCG_TARGET_HAS_add2_i64 0
220
#define TCG_TARGET_HAS_sub2_i64 0
221
#define TCG_TARGET_HAS_mulu2_i64 0
222
-#define TCG_TARGET_HAS_muls2_i64 0
223
/* Turn some undef macros into true macros. */
224
#define TCG_TARGET_HAS_add2_i32 1
225
#define TCG_TARGET_HAS_sub2_i32 1
226
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
227
index XXXXXXX..XXXXXXX 100644
228
--- a/tcg/tci/tcg-target-has.h
229
+++ b/tcg/tci/tcg-target-has.h
230
@@ -XXX,XX +XXX,XX @@
231
#define TCG_TARGET_HAS_bswap32_i32 1
232
#define TCG_TARGET_HAS_extract2_i32 0
233
#define TCG_TARGET_HAS_negsetcond_i32 0
234
-#define TCG_TARGET_HAS_muls2_i32 1
235
#define TCG_TARGET_HAS_qemu_st8_i32 0
236
237
#if TCG_TARGET_REG_BITS == 64
238
@@ -XXX,XX +XXX,XX @@
239
#define TCG_TARGET_HAS_bswap64_i64 1
240
#define TCG_TARGET_HAS_extract2_i64 0
241
#define TCG_TARGET_HAS_negsetcond_i64 0
242
-#define TCG_TARGET_HAS_muls2_i64 1
243
#define TCG_TARGET_HAS_add2_i32 1
244
#define TCG_TARGET_HAS_sub2_i32 1
245
#define TCG_TARGET_HAS_mulu2_i32 1
246
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tcg/tcg-op.c
249
+++ b/tcg/tcg-op.c
250
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
251
252
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
253
{
254
- if (TCG_TARGET_HAS_muls2_i32) {
255
+ if (tcg_op_supported(INDEX_op_muls2_i32, TCG_TYPE_I32, 0)) {
256
tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
257
} else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I32, 0)) {
258
TCGv_i32 t = tcg_temp_ebb_new_i32();
259
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
260
261
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
262
{
263
- if (TCG_TARGET_HAS_muls2_i64) {
264
+ if (tcg_op_supported(INDEX_op_muls2_i64, TCG_TYPE_I64, 0)) {
265
tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
266
} else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I64, 0)) {
267
TCGv_i64 t = tcg_temp_ebb_new_i64();
268
diff --git a/tcg/tcg.c b/tcg/tcg.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/tcg/tcg.c
271
+++ b/tcg/tcg.c
272
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpDivRem {
273
TCGReg a0, TCGReg a1, TCGReg a4);
274
} TCGOutOpDivRem;
275
276
+typedef struct TCGOutOpMul2 {
277
+ TCGOutOp base;
278
+ void (*out_rrrr)(TCGContext *s, TCGType type,
279
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3);
280
+} TCGOutOpMul2;
281
+
282
typedef struct TCGOutOpUnary {
283
TCGOutOp base;
284
void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
285
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
286
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
287
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
288
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
289
+ OUTOP(INDEX_op_muls2_i32, TCGOutOpMul2, outop_muls2),
290
+ OUTOP(INDEX_op_muls2_i64, TCGOutOpMul2, outop_muls2),
291
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
292
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
293
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
294
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
295
return TCG_TARGET_HAS_sub2_i32;
296
case INDEX_op_mulu2_i32:
297
return TCG_TARGET_HAS_mulu2_i32;
298
- case INDEX_op_muls2_i32:
299
- return TCG_TARGET_HAS_muls2_i32;
300
case INDEX_op_bswap16_i32:
301
return TCG_TARGET_HAS_bswap16_i32;
302
case INDEX_op_bswap32_i32:
303
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
304
return TCG_TARGET_HAS_sub2_i64;
305
case INDEX_op_mulu2_i64:
306
return TCG_TARGET_HAS_mulu2_i64;
307
- case INDEX_op_muls2_i64:
308
- return TCG_TARGET_HAS_muls2_i64;
309
310
case INDEX_op_mov_vec:
311
case INDEX_op_dup_vec:
312
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
313
}
314
break;
315
316
+ case INDEX_op_muls2_i32:
317
+ case INDEX_op_muls2_i64:
318
+ {
319
+ const TCGOutOpMul2 *out =
320
+ container_of(all_outop[op->opc], TCGOutOpMul2, base);
321
+
322
+ tcg_debug_assert(!const_args[2]);
323
+ tcg_debug_assert(!const_args[3]);
324
+ out->out_rrrr(s, type, new_args[0], new_args[1],
325
+ new_args[2], new_args[3]);
326
+ }
327
+ break;
328
+
329
+
330
default:
331
if (def->flags & TCG_OPF_VECTOR) {
332
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
333
diff --git a/tcg/tci.c b/tcg/tci.c
334
index XXXXXXX..XXXXXXX 100644
335
--- a/tcg/tci.c
336
+++ b/tcg/tci.c
337
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
338
tci_args_rr(insn, &r0, &r1);
339
regs[r0] = ctpop_tr(regs[r1]);
340
break;
341
+ case INDEX_op_muls2_i32:
342
+ case INDEX_op_muls2_i64:
343
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
344
+#if TCG_TARGET_REG_BITS == 32
345
+ tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
346
+ tci_write_reg64(regs, r1, r0, tmp64);
347
+#else
348
+ muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
349
+#endif
350
+ break;
351
352
/* Arithmetic operations (32 bit). */
353
354
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
355
tci_write_reg64(regs, r1, r0, tmp64);
356
break;
357
#endif
358
-#if TCG_TARGET_HAS_muls2_i32
359
- case INDEX_op_muls2_i32:
360
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
361
- tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
362
- tci_write_reg64(regs, r1, r0, tmp64);
363
- break;
364
-#endif
365
#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
366
CASE_32_64(bswap16)
367
tci_args_rr(insn, &r0, &r1);
368
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
369
mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
370
break;
371
#endif
372
-#if TCG_TARGET_HAS_muls2_i64
373
- case INDEX_op_muls2_i64:
374
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
375
- muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
376
- break;
377
-#endif
378
#if TCG_TARGET_HAS_add2_i64
379
case INDEX_op_add2_i64:
380
tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
381
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
382
index XXXXXXX..XXXXXXX 100644
383
--- a/tcg/aarch64/tcg-target.c.inc
384
+++ b/tcg/aarch64/tcg-target.c.inc
385
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
386
.out_rrr = tgen_mul,
387
};
388
389
+static const TCGOutOpMul2 outop_muls2 = {
390
+ .base.static_constraint = C_NotImplemented,
391
+};
392
+
393
static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
394
{
395
return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented;
396
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
397
index XXXXXXX..XXXXXXX 100644
398
--- a/tcg/arm/tcg-target.c.inc
399
+++ b/tcg/arm/tcg-target.c.inc
400
@@ -XXX,XX +XXX,XX @@ static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
401
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
402
}
403
404
-static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
405
- TCGReg rd1, TCGReg rn, TCGReg rm)
406
-{
407
- /* smull */
408
- tcg_out32(s, (cond << 28) | 0x00c00090 |
409
- (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
410
-}
411
-
412
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
413
{
414
/* sxtb */
415
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
416
.out_rrr = tgen_mul,
417
};
418
419
+static void tgen_muls2(TCGContext *s, TCGType type,
420
+ TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
421
+{
422
+ /* smull */
423
+ tcg_out32(s, (COND_AL << 28) | 0x00c00090 |
424
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
425
+}
426
+
427
+static const TCGOutOpMul2 outop_muls2 = {
428
+ .base.static_constraint = C_O2_I2(r, r, r, r),
429
+ .out_rrrr = tgen_muls2,
430
+};
431
+
432
static const TCGOutOpBinary outop_mulsh = {
433
.base.static_constraint = C_NotImplemented,
434
};
435
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
436
case INDEX_op_mulu2_i32:
437
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
438
break;
439
- case INDEX_op_muls2_i32:
440
- tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
441
- break;
442
443
case INDEX_op_brcond_i32:
444
c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
445
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
446
return C_O1_I2(r, r, rIN);
447
448
case INDEX_op_mulu2_i32:
449
- case INDEX_op_muls2_i32:
450
return C_O2_I2(r, r, r, r);
451
452
case INDEX_op_brcond_i32:
453
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
454
index XXXXXXX..XXXXXXX 100644
455
--- a/tcg/i386/tcg-target.c.inc
456
+++ b/tcg/i386/tcg-target.c.inc
457
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
458
.out_rri = tgen_muli,
459
};
460
461
+static void tgen_muls2(TCGContext *s, TCGType type,
462
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
463
+{
464
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
465
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, a3);
466
+}
467
+
468
+static const TCGOutOpMul2 outop_muls2 = {
469
+ .base.static_constraint = C_O2_I2(a, d, a, r),
470
+ .out_rrrr = tgen_muls2,
471
+};
472
+
473
static const TCGOutOpBinary outop_mulsh = {
474
.base.static_constraint = C_NotImplemented,
475
};
476
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
477
OP_32_64(mulu2):
478
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
479
break;
480
- OP_32_64(muls2):
481
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
482
- break;
483
OP_32_64(add2):
484
if (const_args[4]) {
485
tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
486
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
487
488
case INDEX_op_mulu2_i32:
489
case INDEX_op_mulu2_i64:
490
- case INDEX_op_muls2_i32:
491
- case INDEX_op_muls2_i64:
492
return C_O2_I2(a, d, a, r);
493
494
case INDEX_op_add2_i32:
495
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
496
index XXXXXXX..XXXXXXX 100644
497
--- a/tcg/loongarch64/tcg-target.c.inc
498
+++ b/tcg/loongarch64/tcg-target.c.inc
499
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
500
.out_rrr = tgen_mul,
501
};
502
503
+static const TCGOutOpMul2 outop_muls2 = {
504
+ .base.static_constraint = C_NotImplemented,
505
+};
506
+
507
static void tgen_mulsh(TCGContext *s, TCGType type,
508
TCGReg a0, TCGReg a1, TCGReg a2)
509
{
510
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
511
index XXXXXXX..XXXXXXX 100644
512
--- a/tcg/mips/tcg-target.c.inc
513
+++ b/tcg/mips/tcg-target.c.inc
514
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
515
.out_rrr = tgen_mul,
516
};
517
518
+static void tgen_muls2(TCGContext *s, TCGType type,
519
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
520
+{
521
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULT : OPC_DMULT;
522
+ tcg_out_opc_reg(s, insn, 0, a2, a3);
523
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
524
+ tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
525
+}
526
+
527
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
528
+{
529
+ return use_mips32r6_instructions ? C_NotImplemented : C_O2_I2(r, r, r, r);
530
+}
531
+
532
+static const TCGOutOpMul2 outop_muls2 = {
533
+ .base.static_constraint = C_Dynamic,
534
+ .base.dynamic_constraint = cset_mul2,
535
+ .out_rrrr = tgen_muls2,
536
+};
537
+
538
static void tgen_mulsh(TCGContext *s, TCGType type,
539
TCGReg a0, TCGReg a1, TCGReg a2)
540
{
541
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
542
tcg_out_ldst(s, i1, a0, a1, a2);
543
break;
544
545
- case INDEX_op_muls2_i32:
546
- i1 = OPC_MULT;
547
- goto do_hilo2;
548
case INDEX_op_mulu2_i32:
549
i1 = OPC_MULTU;
550
goto do_hilo2;
551
- case INDEX_op_muls2_i64:
552
- i1 = OPC_DMULT;
553
- goto do_hilo2;
554
case INDEX_op_mulu2_i64:
555
i1 = OPC_DMULTU;
556
do_hilo2:
557
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
558
case INDEX_op_setcond_i32:
559
case INDEX_op_setcond_i64:
560
return C_O1_I2(r, rz, rz);
561
- case INDEX_op_muls2_i32:
562
case INDEX_op_mulu2_i32:
563
- case INDEX_op_muls2_i64:
564
case INDEX_op_mulu2_i64:
565
return C_O2_I2(r, r, r, r);
566
567
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
568
index XXXXXXX..XXXXXXX 100644
569
--- a/tcg/ppc/tcg-target.c.inc
570
+++ b/tcg/ppc/tcg-target.c.inc
571
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
572
.out_rri = tgen_muli,
573
};
574
575
+static const TCGOutOpMul2 outop_muls2 = {
576
+ .base.static_constraint = C_NotImplemented,
577
+};
578
+
579
static void tgen_mulsh(TCGContext *s, TCGType type,
580
TCGReg a0, TCGReg a1, TCGReg a2)
581
{
582
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
583
index XXXXXXX..XXXXXXX 100644
584
--- a/tcg/riscv/tcg-target.c.inc
585
+++ b/tcg/riscv/tcg-target.c.inc
586
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
587
.out_rrr = tgen_mul,
588
};
589
590
+static const TCGOutOpMul2 outop_muls2 = {
591
+ .base.static_constraint = C_NotImplemented,
592
+};
593
+
594
static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
595
{
596
return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r);
597
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
598
index XXXXXXX..XXXXXXX 100644
599
--- a/tcg/s390x/tcg-target.c.inc
600
+++ b/tcg/s390x/tcg-target.c.inc
601
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
602
.out_rri = tgen_muli,
603
};
604
605
+static void tgen_muls2(TCGContext *s, TCGType type,
606
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
607
+{
608
+ tcg_debug_assert((a1 & 1) == 0);
609
+ tcg_debug_assert(a0 == a1 + 1);
610
+ tcg_out_insn(s, RRFa, MGRK, a1, a2, a3);
611
+}
612
+
613
+static TCGConstraintSetIndex cset_muls2(TCGType type, unsigned flags)
614
+{
615
+ return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2)
616
+ ? C_O2_I2(o, m, r, r) : C_NotImplemented);
617
+}
618
+
619
+static const TCGOutOpMul2 outop_muls2 = {
620
+ .base.static_constraint = C_Dynamic,
621
+ .base.dynamic_constraint = cset_muls2,
622
+ .out_rrrr = tgen_muls2,
623
+};
624
+
625
static const TCGOutOpBinary outop_mulsh = {
626
.base.static_constraint = C_NotImplemented,
627
};
628
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
629
tcg_debug_assert(args[0] == args[1] + 1);
630
tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
631
break;
632
- case INDEX_op_muls2_i64:
633
- tcg_debug_assert((args[1] & 1) == 0);
634
- tcg_debug_assert(args[0] == args[1] + 1);
635
- tcg_out_insn(s, RRFa, MGRK, args[1], args[2], args[3]);
636
- break;
637
638
case INDEX_op_add2_i64:
639
if (const_args[4]) {
640
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
641
642
case INDEX_op_mulu2_i64:
643
return C_O2_I2(o, m, 0, r);
644
- case INDEX_op_muls2_i64:
645
- return C_O2_I2(o, m, r, r);
646
647
case INDEX_op_add2_i32:
648
case INDEX_op_sub2_i32:
649
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
650
index XXXXXXX..XXXXXXX 100644
651
--- a/tcg/sparc64/tcg-target.c.inc
652
+++ b/tcg/sparc64/tcg-target.c.inc
653
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
654
.out_rri = tgen_muli,
655
};
656
657
+/*
658
+ * The 32-bit multiply insns produce a full 64-bit result.
659
+ * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions
660
+ * before the actual multiply; we only need extract the high part
661
+ * into the separate operand.
662
+ */
663
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
664
+{
665
+ return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented;
666
+}
667
+
668
+static void tgen_muls2(TCGContext *s, TCGType type,
669
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
670
+{
671
+ tcg_out_arith(s, a0, a2, a3, ARITH_SMUL);
672
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
673
+}
674
+
675
+static const TCGOutOpMul2 outop_muls2 = {
676
+ .base.static_constraint = C_Dynamic,
677
+ .base.dynamic_constraint = cset_mul2,
678
+ .out_rrrr = tgen_muls2,
679
+};
680
+
681
static const TCGOutOpBinary outop_mulsh = {
682
.base.static_constraint = C_NotImplemented,
683
};
684
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
685
break;
686
case INDEX_op_mulu2_i32:
687
c = ARITH_UMUL;
688
- goto do_mul2;
689
- case INDEX_op_muls2_i32:
690
- c = ARITH_SMUL;
691
- do_mul2:
692
/* The 32-bit multiply insns produce a full 64-bit result. */
693
tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
694
tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
695
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
696
case INDEX_op_sub2_i64:
697
return C_O2_I4(r, r, rz, rz, rJ, rJ);
698
case INDEX_op_mulu2_i32:
699
- case INDEX_op_muls2_i32:
700
return C_O2_I2(r, r, rz, rJ);
701
702
default:
703
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
704
index XXXXXXX..XXXXXXX 100644
705
--- a/tcg/tci/tcg-target.c.inc
706
+++ b/tcg/tci/tcg-target.c.inc
707
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
708
709
case INDEX_op_mulu2_i32:
710
case INDEX_op_mulu2_i64:
711
- case INDEX_op_muls2_i32:
712
- case INDEX_op_muls2_i64:
713
return C_O2_I2(r, r, r, r);
714
715
case INDEX_op_movcond_i32:
716
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mul = {
717
.out_rrr = tgen_mul,
718
};
719
720
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
721
+{
722
+ return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
723
+}
724
+
725
+static void tgen_muls2(TCGContext *s, TCGType type,
726
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
727
+{
728
+ tcg_out_op_rrrr(s, glue(INDEX_op_muls2_i,TCG_TARGET_REG_BITS),
729
+ a0, a1, a2, a3);
730
+}
731
+
732
+static const TCGOutOpMul2 outop_muls2 = {
733
+ .base.static_constraint = C_Dynamic,
734
+ .base.dynamic_constraint = cset_mul2,
735
+ .out_rrrr = tgen_muls2,
736
+};
737
+
738
static const TCGOutOpBinary outop_mulsh = {
739
.base.static_constraint = C_NotImplemented,
740
};
741
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
742
#endif
743
744
CASE_32_64(mulu2)
745
- CASE_32_64(muls2)
746
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
747
break;
748
749
--
750
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 3 +--
6
tcg/optimize.c | 17 +++++++++--------
7
tcg/tcg-op.c | 8 ++++----
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 6 ++----
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 3 +--
12
7 files changed, 21 insertions(+), 27 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(divu, 1, 2, 0, TCG_OPF_INT)
19
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
20
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
21
DEF(mul, 1, 2, 0, TCG_OPF_INT)
22
+DEF(muls2, 2, 2, 0, TCG_OPF_INT)
23
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
24
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
25
DEF(nand, 1, 2, 0, TCG_OPF_INT)
26
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
27
DEF(add2_i32, 2, 4, 0, 0)
28
DEF(sub2_i32, 2, 4, 0, 0)
29
DEF(mulu2_i32, 2, 2, 0, 0)
30
-DEF(muls2_i32, 2, 2, 0, 0)
31
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
32
DEF(setcond2_i32, 1, 4, 1, 0)
33
34
@@ -XXX,XX +XXX,XX @@ DEF(bswap64_i64, 1, 1, 1, 0)
35
DEF(add2_i64, 2, 4, 0, 0)
36
DEF(sub2_i64, 2, 4, 0, 0)
37
DEF(mulu2_i64, 2, 2, 0, 0)
38
-DEF(muls2_i64, 2, 2, 0, 0)
39
40
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
41
42
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/optimize.c
45
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
47
h = (int32_t)(l >> 32);
48
l = (int32_t)l;
49
break;
50
- case INDEX_op_muls2_i32:
51
- l = (int64_t)(int32_t)a * (int32_t)b;
52
- h = l >> 32;
53
- l = (int32_t)l;
54
- break;
55
case INDEX_op_mulu2_i64:
56
mulu64(&l, &h, a, b);
57
break;
58
- case INDEX_op_muls2_i64:
59
- muls64(&l, &h, a, b);
60
+ case INDEX_op_muls2:
61
+ if (ctx->type == TCG_TYPE_I32) {
62
+ l = (int64_t)(int32_t)a * (int32_t)b;
63
+ h = l >> 32;
64
+ l = (int32_t)l;
65
+ } else {
66
+ muls64(&l, &h, a, b);
67
+ }
68
break;
69
default:
70
g_assert_not_reached();
71
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
72
case INDEX_op_muluh:
73
done = fold_mul_highpart(&ctx, op);
74
break;
75
- CASE_OP_32_64(muls2):
76
+ case INDEX_op_muls2:
77
CASE_OP_32_64(mulu2):
78
done = fold_multiply2(&ctx, op);
79
break;
80
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/tcg-op.c
83
+++ b/tcg/tcg-op.c
84
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
85
86
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
87
{
88
- if (tcg_op_supported(INDEX_op_muls2_i32, TCG_TYPE_I32, 0)) {
89
- tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2);
90
+ if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I32, 0)) {
91
+ tcg_gen_op4_i32(INDEX_op_muls2, rl, rh, arg1, arg2);
92
} else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I32, 0)) {
93
TCGv_i32 t = tcg_temp_ebb_new_i32();
94
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
95
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
96
97
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
98
{
99
- if (tcg_op_supported(INDEX_op_muls2_i64, TCG_TYPE_I64, 0)) {
100
- tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2);
101
+ if (tcg_op_supported(INDEX_op_muls2, TCG_TYPE_I64, 0)) {
102
+ tcg_gen_op4_i64(INDEX_op_muls2, rl, rh, arg1, arg2);
103
} else if (tcg_op_supported(INDEX_op_mulsh, TCG_TYPE_I64, 0)) {
104
TCGv_i64 t = tcg_temp_ebb_new_i64();
105
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
106
diff --git a/tcg/tcg.c b/tcg/tcg.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/tcg.c
109
+++ b/tcg/tcg.c
110
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
111
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
112
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
113
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
114
- OUTOP(INDEX_op_muls2_i32, TCGOutOpMul2, outop_muls2),
115
- OUTOP(INDEX_op_muls2_i64, TCGOutOpMul2, outop_muls2),
116
+ OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
117
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
118
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
119
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
120
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
121
}
122
goto do_not_remove;
123
124
- case INDEX_op_muls2_i32:
125
- case INDEX_op_muls2_i64:
126
+ case INDEX_op_muls2:
127
opc_new = INDEX_op_mul;
128
opc_new2 = INDEX_op_mulsh;
129
goto do_mul2;
130
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
131
}
132
break;
133
134
- case INDEX_op_muls2_i32:
135
- case INDEX_op_muls2_i64:
136
+ case INDEX_op_muls2:
137
{
138
const TCGOutOpMul2 *out =
139
container_of(all_outop[op->opc], TCGOutOpMul2, base);
140
diff --git a/tcg/tci.c b/tcg/tci.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/tci.c
143
+++ b/tcg/tci.c
144
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
145
tci_args_rr(insn, &r0, &r1);
146
regs[r0] = ctpop_tr(regs[r1]);
147
break;
148
- case INDEX_op_muls2_i32:
149
- case INDEX_op_muls2_i64:
150
+ case INDEX_op_muls2:
151
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
152
#if TCG_TARGET_REG_BITS == 32
153
tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
154
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
155
str_r(r3), str_r(r4), str_c(c));
156
break;
157
158
+ case INDEX_op_muls2:
159
case INDEX_op_mulu2_i32:
160
case INDEX_op_mulu2_i64:
161
- case INDEX_op_muls2_i32:
162
- case INDEX_op_muls2_i64:
163
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
164
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
165
op_name, str_r(r0), str_r(r1),
166
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
167
index XXXXXXX..XXXXXXX 100644
168
--- a/docs/devel/tcg-ops.rst
169
+++ b/docs/devel/tcg-ops.rst
170
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
171
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
172
double-word product *t0*. The latter is returned in two single-word outputs.
173
174
- * - muls2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
175
+ * - muls2 *t0_low*, *t0_high*, *t1*, *t2*
176
177
- | Similar to mulu2, except the two inputs *t1* and *t2* are signed.
178
179
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/tci/tcg-target.c.inc
182
+++ b/tcg/tci/tcg-target.c.inc
183
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
184
static void tgen_muls2(TCGContext *s, TCGType type,
185
TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
186
{
187
- tcg_out_op_rrrr(s, glue(INDEX_op_muls2_i,TCG_TARGET_REG_BITS),
188
- a0, a1, a2, a3);
189
+ tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
190
}
191
192
static const TCGOutOpMul2 outop_muls2 = {
193
--
194
2.43.0
195
196
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 2 --
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-con-set.h | 1 -
13
tcg/sparc64/tcg-target-has.h | 2 --
14
tcg/tcg-has.h | 1 -
15
tcg/tci/tcg-target-has.h | 4 ----
16
tcg/tcg-op.c | 8 ++++----
17
tcg/tcg.c | 8 ++++----
18
tcg/tci.c | 23 ++++++++++-------------
19
tcg/aarch64/tcg-target.c.inc | 4 ++++
20
tcg/arm/tcg-target.c.inc | 27 +++++++++++++--------------
21
tcg/i386/tcg-target.c.inc | 19 ++++++++++++-------
22
tcg/loongarch64/tcg-target.c.inc | 4 ++++
23
tcg/mips/tcg-target.c.inc | 29 +++++++++++++++--------------
24
tcg/ppc/tcg-target.c.inc | 4 ++++
25
tcg/riscv/tcg-target.c.inc | 4 ++++
26
tcg/s390x/tcg-target.c.inc | 31 +++++++++++++++++++++----------
27
tcg/sparc64/tcg-target.c.inc | 23 ++++++++++++++---------
28
tcg/tci/tcg-target.c.inc | 21 +++++++++++++--------
29
25 files changed, 122 insertions(+), 106 deletions(-)
30
1
31
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target-has.h
34
+++ b/tcg/aarch64/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@
36
#define TCG_TARGET_HAS_negsetcond_i32 1
37
#define TCG_TARGET_HAS_add2_i32 1
38
#define TCG_TARGET_HAS_sub2_i32 1
39
-#define TCG_TARGET_HAS_mulu2_i32 0
40
#define TCG_TARGET_HAS_extr_i64_i32 0
41
#define TCG_TARGET_HAS_qemu_st8_i32 0
42
43
@@ -XXX,XX +XXX,XX @@
44
#define TCG_TARGET_HAS_negsetcond_i64 1
45
#define TCG_TARGET_HAS_add2_i64 1
46
#define TCG_TARGET_HAS_sub2_i64 1
47
-#define TCG_TARGET_HAS_mulu2_i64 0
48
49
/*
50
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
51
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/arm/tcg-target-has.h
54
+++ b/tcg/arm/tcg-target-has.h
55
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
56
#define TCG_TARGET_HAS_bswap32_i32 1
57
#define TCG_TARGET_HAS_extract2_i32 1
58
#define TCG_TARGET_HAS_negsetcond_i32 1
59
-#define TCG_TARGET_HAS_mulu2_i32 1
60
#define TCG_TARGET_HAS_qemu_st8_i32 0
61
62
#define TCG_TARGET_HAS_qemu_ldst_i128 0
63
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target-has.h
66
+++ b/tcg/i386/tcg-target-has.h
67
@@ -XXX,XX +XXX,XX @@
68
#define TCG_TARGET_HAS_negsetcond_i32 1
69
#define TCG_TARGET_HAS_add2_i32 1
70
#define TCG_TARGET_HAS_sub2_i32 1
71
-#define TCG_TARGET_HAS_mulu2_i32 1
72
73
#if TCG_TARGET_REG_BITS == 64
74
/* Keep 32-bit values zero-extended in a register. */
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_negsetcond_i64 1
77
#define TCG_TARGET_HAS_add2_i64 1
78
#define TCG_TARGET_HAS_sub2_i64 1
79
-#define TCG_TARGET_HAS_mulu2_i64 1
80
#define TCG_TARGET_HAS_qemu_st8_i32 0
81
#else
82
#define TCG_TARGET_HAS_qemu_st8_i32 1
83
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target-has.h
86
+++ b/tcg/loongarch64/tcg-target-has.h
87
@@ -XXX,XX +XXX,XX @@
88
#define TCG_TARGET_HAS_extract2_i32 0
89
#define TCG_TARGET_HAS_add2_i32 0
90
#define TCG_TARGET_HAS_sub2_i32 0
91
-#define TCG_TARGET_HAS_mulu2_i32 0
92
#define TCG_TARGET_HAS_bswap16_i32 1
93
#define TCG_TARGET_HAS_bswap32_i32 1
94
#define TCG_TARGET_HAS_qemu_st8_i32 0
95
@@ -XXX,XX +XXX,XX @@
96
#define TCG_TARGET_HAS_bswap64_i64 1
97
#define TCG_TARGET_HAS_add2_i64 0
98
#define TCG_TARGET_HAS_sub2_i64 0
99
-#define TCG_TARGET_HAS_mulu2_i64 0
100
101
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
102
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
#endif
109
110
/* optional instructions */
111
-#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
112
#define TCG_TARGET_HAS_bswap16_i32 1
113
#define TCG_TARGET_HAS_bswap32_i32 1
114
#define TCG_TARGET_HAS_negsetcond_i32 0
115
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
116
#define TCG_TARGET_HAS_extr_i64_i32 1
117
#define TCG_TARGET_HAS_add2_i64 0
118
#define TCG_TARGET_HAS_sub2_i64 0
119
-#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
120
#define TCG_TARGET_HAS_ext32s_i64 1
121
#define TCG_TARGET_HAS_ext32u_i64 1
122
#define TCG_TARGET_HAS_negsetcond_i64 0
123
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/ppc/tcg-target-has.h
126
+++ b/tcg/ppc/tcg-target-has.h
127
@@ -XXX,XX +XXX,XX @@
128
#define TCG_TARGET_HAS_bswap32_i32 1
129
#define TCG_TARGET_HAS_extract2_i32 0
130
#define TCG_TARGET_HAS_negsetcond_i32 1
131
-#define TCG_TARGET_HAS_mulu2_i32 0
132
#define TCG_TARGET_HAS_qemu_st8_i32 0
133
134
#if TCG_TARGET_REG_BITS == 64
135
@@ -XXX,XX +XXX,XX @@
136
#define TCG_TARGET_HAS_negsetcond_i64 1
137
#define TCG_TARGET_HAS_add2_i64 1
138
#define TCG_TARGET_HAS_sub2_i64 1
139
-#define TCG_TARGET_HAS_mulu2_i64 0
140
#endif
141
142
#define TCG_TARGET_HAS_qemu_ldst_i128 \
143
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/riscv/tcg-target-has.h
146
+++ b/tcg/riscv/tcg-target-has.h
147
@@ -XXX,XX +XXX,XX @@
148
#define TCG_TARGET_HAS_extract2_i32 0
149
#define TCG_TARGET_HAS_add2_i32 1
150
#define TCG_TARGET_HAS_sub2_i32 1
151
-#define TCG_TARGET_HAS_mulu2_i32 0
152
#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_qemu_st8_i32 0
155
@@ -XXX,XX +XXX,XX @@
156
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_add2_i64 1
158
#define TCG_TARGET_HAS_sub2_i64 1
159
-#define TCG_TARGET_HAS_mulu2_i64 0
160
161
#define TCG_TARGET_HAS_qemu_ldst_i128 0
162
163
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target-has.h
166
+++ b/tcg/s390x/tcg-target-has.h
167
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
168
#define TCG_TARGET_HAS_negsetcond_i32 1
169
#define TCG_TARGET_HAS_add2_i32 1
170
#define TCG_TARGET_HAS_sub2_i32 1
171
-#define TCG_TARGET_HAS_mulu2_i32 0
172
#define TCG_TARGET_HAS_extr_i64_i32 0
173
#define TCG_TARGET_HAS_qemu_st8_i32 0
174
175
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
176
#define TCG_TARGET_HAS_negsetcond_i64 1
177
#define TCG_TARGET_HAS_add2_i64 1
178
#define TCG_TARGET_HAS_sub2_i64 1
179
-#define TCG_TARGET_HAS_mulu2_i64 1
180
181
#define TCG_TARGET_HAS_qemu_ldst_i128 1
182
183
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/sparc64/tcg-target-con-set.h
186
+++ b/tcg/sparc64/tcg-target-con-set.h
187
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rJ)
188
C_O1_I2(r, rz, rJ)
189
C_O1_I4(r, rz, rJ, rI, 0)
190
C_O2_I2(r, r, r, r)
191
-C_O2_I2(r, r, rz, rJ)
192
C_O2_I4(r, r, rz, rz, rJ, rJ)
193
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tcg/sparc64/tcg-target-has.h
196
+++ b/tcg/sparc64/tcg-target-has.h
197
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
198
#define TCG_TARGET_HAS_negsetcond_i32 1
199
#define TCG_TARGET_HAS_add2_i32 1
200
#define TCG_TARGET_HAS_sub2_i32 1
201
-#define TCG_TARGET_HAS_mulu2_i32 1
202
#define TCG_TARGET_HAS_qemu_st8_i32 0
203
204
#define TCG_TARGET_HAS_extr_i64_i32 0
205
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
206
#define TCG_TARGET_HAS_negsetcond_i64 1
207
#define TCG_TARGET_HAS_add2_i64 1
208
#define TCG_TARGET_HAS_sub2_i64 1
209
-#define TCG_TARGET_HAS_mulu2_i64 0
210
211
#define TCG_TARGET_HAS_qemu_ldst_i128 0
212
213
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tcg-has.h
216
+++ b/tcg/tcg-has.h
217
@@ -XXX,XX +XXX,XX @@
218
#define TCG_TARGET_HAS_negsetcond_i64 0
219
#define TCG_TARGET_HAS_add2_i64 0
220
#define TCG_TARGET_HAS_sub2_i64 0
221
-#define TCG_TARGET_HAS_mulu2_i64 0
222
/* Turn some undef macros into true macros. */
223
#define TCG_TARGET_HAS_add2_i32 1
224
#define TCG_TARGET_HAS_sub2_i32 1
225
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/tcg/tci/tcg-target-has.h
228
+++ b/tcg/tci/tcg-target-has.h
229
@@ -XXX,XX +XXX,XX @@
230
#define TCG_TARGET_HAS_negsetcond_i64 0
231
#define TCG_TARGET_HAS_add2_i32 1
232
#define TCG_TARGET_HAS_sub2_i32 1
233
-#define TCG_TARGET_HAS_mulu2_i32 1
234
#define TCG_TARGET_HAS_add2_i64 1
235
#define TCG_TARGET_HAS_sub2_i64 1
236
-#define TCG_TARGET_HAS_mulu2_i64 1
237
-#else
238
-#define TCG_TARGET_HAS_mulu2_i32 1
239
#endif /* TCG_TARGET_REG_BITS == 64 */
240
241
#define TCG_TARGET_HAS_qemu_ldst_i128 0
242
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
243
index XXXXXXX..XXXXXXX 100644
244
--- a/tcg/tcg-op.c
245
+++ b/tcg/tcg-op.c
246
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
247
248
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
249
{
250
- if (TCG_TARGET_HAS_mulu2_i32) {
251
+ if (tcg_op_supported(INDEX_op_mulu2_i32, TCG_TYPE_I32, 0)) {
252
tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
253
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
254
TCGv_i32 t = tcg_temp_ebb_new_i32();
255
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
256
tcg_temp_free_i64(t0);
257
tcg_temp_free_i64(t1);
258
} else {
259
- qemu_build_not_reached();
260
+ g_assert_not_reached();
261
}
262
}
263
264
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
265
266
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
267
{
268
- if (TCG_TARGET_HAS_mulu2_i64) {
269
+ if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0)) {
270
tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
271
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
272
TCGv_i64 t = tcg_temp_ebb_new_i64();
273
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
274
tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2);
275
tcg_gen_mov_i64(rl, t);
276
tcg_temp_free_i64(t);
277
- } else if (TCG_TARGET_HAS_mulu2_i64 ||
278
+ } else if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0) ||
279
tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
280
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
281
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
282
diff --git a/tcg/tcg.c b/tcg/tcg.c
283
index XXXXXXX..XXXXXXX 100644
284
--- a/tcg/tcg.c
285
+++ b/tcg/tcg.c
286
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
287
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
288
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
289
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
290
+ OUTOP(INDEX_op_mulu2_i32, TCGOutOpMul2, outop_mulu2),
291
+ OUTOP(INDEX_op_mulu2_i64, TCGOutOpMul2, outop_mulu2),
292
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
293
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
294
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
295
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
296
return TCG_TARGET_HAS_add2_i32;
297
case INDEX_op_sub2_i32:
298
return TCG_TARGET_HAS_sub2_i32;
299
- case INDEX_op_mulu2_i32:
300
- return TCG_TARGET_HAS_mulu2_i32;
301
case INDEX_op_bswap16_i32:
302
return TCG_TARGET_HAS_bswap16_i32;
303
case INDEX_op_bswap32_i32:
304
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
305
return TCG_TARGET_HAS_add2_i64;
306
case INDEX_op_sub2_i64:
307
return TCG_TARGET_HAS_sub2_i64;
308
- case INDEX_op_mulu2_i64:
309
- return TCG_TARGET_HAS_mulu2_i64;
310
311
case INDEX_op_mov_vec:
312
case INDEX_op_dup_vec:
313
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
314
break;
315
316
case INDEX_op_muls2:
317
+ case INDEX_op_mulu2_i32:
318
+ case INDEX_op_mulu2_i64:
319
{
320
const TCGOutOpMul2 *out =
321
container_of(all_outop[op->opc], TCGOutOpMul2, base);
322
diff --git a/tcg/tci.c b/tcg/tci.c
323
index XXXXXXX..XXXXXXX 100644
324
--- a/tcg/tci.c
325
+++ b/tcg/tci.c
326
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
327
tci_write_reg64(regs, r1, r0, tmp64);
328
#else
329
muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
330
+#endif
331
+ break;
332
+ case INDEX_op_mulu2_i32:
333
+ case INDEX_op_mulu2_i64:
334
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
335
+#if TCG_TARGET_REG_BITS == 32
336
+ tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
337
+ tci_write_reg64(regs, r1, r0, tmp64);
338
+#else
339
+ mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
340
#endif
341
break;
342
343
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
344
tci_write_reg64(regs, r1, r0, T1 - T2);
345
break;
346
#endif
347
-#if TCG_TARGET_HAS_mulu2_i32
348
- case INDEX_op_mulu2_i32:
349
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
350
- tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
351
- tci_write_reg64(regs, r1, r0, tmp64);
352
- break;
353
-#endif
354
#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
355
CASE_32_64(bswap16)
356
tci_args_rr(insn, &r0, &r1);
357
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
358
tci_args_rrr(insn, &r0, &r1, &r2);
359
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
360
break;
361
-#if TCG_TARGET_HAS_mulu2_i64
362
- case INDEX_op_mulu2_i64:
363
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
364
- mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
365
- break;
366
-#endif
367
#if TCG_TARGET_HAS_add2_i64
368
case INDEX_op_add2_i64:
369
tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
370
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
371
index XXXXXXX..XXXXXXX 100644
372
--- a/tcg/aarch64/tcg-target.c.inc
373
+++ b/tcg/aarch64/tcg-target.c.inc
374
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
375
.out_rrr = tgen_mulsh,
376
};
377
378
+static const TCGOutOpMul2 outop_mulu2 = {
379
+ .base.static_constraint = C_NotImplemented,
380
+};
381
+
382
static void tgen_muluh(TCGContext *s, TCGType type,
383
TCGReg a0, TCGReg a1, TCGReg a2)
384
{
385
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
386
index XXXXXXX..XXXXXXX 100644
387
--- a/tcg/arm/tcg-target.c.inc
388
+++ b/tcg/arm/tcg-target.c.inc
389
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
390
}
391
}
392
393
-static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
394
- TCGReg rd1, TCGReg rn, TCGReg rm)
395
-{
396
- /* umull */
397
- tcg_out32(s, (cond << 28) | 0x00800090 |
398
- (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
399
-}
400
-
401
static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
402
{
403
/* sxtb */
404
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
405
.base.static_constraint = C_NotImplemented,
406
};
407
408
+static void tgen_mulu2(TCGContext *s, TCGType type,
409
+ TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
410
+{
411
+ /* umull */
412
+ tcg_out32(s, (COND_AL << 28) | 0x00800090 |
413
+ (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
414
+}
415
+
416
+static const TCGOutOpMul2 outop_mulu2 = {
417
+ .base.static_constraint = C_O2_I2(r, r, r, r),
418
+ .out_rrrr = tgen_mulu2,
419
+};
420
+
421
static const TCGOutOpBinary outop_muluh = {
422
.base.static_constraint = C_NotImplemented,
423
};
424
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
425
}
426
tcg_out_mov_reg(s, COND_AL, args[0], a0);
427
break;
428
- case INDEX_op_mulu2_i32:
429
- tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
430
- break;
431
432
case INDEX_op_brcond_i32:
433
c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
434
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
435
case INDEX_op_negsetcond_i32:
436
return C_O1_I2(r, r, rIN);
437
438
- case INDEX_op_mulu2_i32:
439
- return C_O2_I2(r, r, r, r);
440
-
441
case INDEX_op_brcond_i32:
442
return C_O0_I2(r, rIN);
443
case INDEX_op_deposit_i32:
444
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
445
index XXXXXXX..XXXXXXX 100644
446
--- a/tcg/i386/tcg-target.c.inc
447
+++ b/tcg/i386/tcg-target.c.inc
448
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_muluh = {
449
.base.static_constraint = C_NotImplemented,
450
};
451
452
+static void tgen_mulu2(TCGContext *s, TCGType type,
453
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
454
+{
455
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
456
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, a3);
457
+}
458
+
459
+static const TCGOutOpMul2 outop_mulu2 = {
460
+ .base.static_constraint = C_O2_I2(a, d, a, r),
461
+ .out_rrrr = tgen_mulu2,
462
+};
463
+
464
static const TCGOutOpBinary outop_nand = {
465
.base.static_constraint = C_NotImplemented,
466
};
467
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
468
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
469
break;
470
471
- OP_32_64(mulu2):
472
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
473
- break;
474
OP_32_64(add2):
475
if (const_args[4]) {
476
tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
477
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
478
case INDEX_op_movcond_i64:
479
return C_O1_I4(r, r, reT, r, 0);
480
481
- case INDEX_op_mulu2_i32:
482
- case INDEX_op_mulu2_i64:
483
- return C_O2_I2(a, d, a, r);
484
-
485
case INDEX_op_add2_i32:
486
case INDEX_op_add2_i64:
487
case INDEX_op_sub2_i32:
488
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
489
index XXXXXXX..XXXXXXX 100644
490
--- a/tcg/loongarch64/tcg-target.c.inc
491
+++ b/tcg/loongarch64/tcg-target.c.inc
492
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
493
.out_rrr = tgen_mulsh,
494
};
495
496
+static const TCGOutOpMul2 outop_mulu2 = {
497
+ .base.static_constraint = C_NotImplemented,
498
+};
499
+
500
static void tgen_muluh(TCGContext *s, TCGType type,
501
TCGReg a0, TCGReg a1, TCGReg a2)
502
{
503
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
504
index XXXXXXX..XXXXXXX 100644
505
--- a/tcg/mips/tcg-target.c.inc
506
+++ b/tcg/mips/tcg-target.c.inc
507
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
508
.out_rrr = tgen_mulsh,
509
};
510
511
+static void tgen_mulu2(TCGContext *s, TCGType type,
512
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
513
+{
514
+ MIPSInsn insn = type == TCG_TYPE_I32 ? OPC_MULTU : OPC_DMULTU;
515
+ tcg_out_opc_reg(s, insn, 0, a2, a3);
516
+ tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
517
+ tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
518
+}
519
+
520
+static const TCGOutOpMul2 outop_mulu2 = {
521
+ .base.static_constraint = C_Dynamic,
522
+ .base.dynamic_constraint = cset_mul2,
523
+ .out_rrrr = tgen_mulu2,
524
+};
525
+
526
static void tgen_muluh(TCGContext *s, TCGType type,
527
TCGReg a0, TCGReg a1, TCGReg a2)
528
{
529
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
530
tcg_out_ldst(s, i1, a0, a1, a2);
531
break;
532
533
- case INDEX_op_mulu2_i32:
534
- i1 = OPC_MULTU;
535
- goto do_hilo2;
536
- case INDEX_op_mulu2_i64:
537
- i1 = OPC_DMULTU;
538
- do_hilo2:
539
- tcg_out_opc_reg(s, i1, 0, a2, args[3]);
540
- tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
541
- tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0);
542
- break;
543
-
544
case INDEX_op_bswap16_i32:
545
case INDEX_op_bswap16_i64:
546
tcg_out_bswap16(s, a0, a1, a2);
547
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
548
case INDEX_op_setcond_i32:
549
case INDEX_op_setcond_i64:
550
return C_O1_I2(r, rz, rz);
551
- case INDEX_op_mulu2_i32:
552
- case INDEX_op_mulu2_i64:
553
- return C_O2_I2(r, r, r, r);
554
555
case INDEX_op_deposit_i32:
556
case INDEX_op_deposit_i64:
557
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
558
index XXXXXXX..XXXXXXX 100644
559
--- a/tcg/ppc/tcg-target.c.inc
560
+++ b/tcg/ppc/tcg-target.c.inc
561
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
562
.out_rrr = tgen_mulsh,
563
};
564
565
+static const TCGOutOpMul2 outop_mulu2 = {
566
+ .base.static_constraint = C_NotImplemented,
567
+};
568
+
569
static void tgen_muluh(TCGContext *s, TCGType type,
570
TCGReg a0, TCGReg a1, TCGReg a2)
571
{
572
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
573
index XXXXXXX..XXXXXXX 100644
574
--- a/tcg/riscv/tcg-target.c.inc
575
+++ b/tcg/riscv/tcg-target.c.inc
576
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
577
.out_rrr = tgen_mulsh,
578
};
579
580
+static const TCGOutOpMul2 outop_mulu2 = {
581
+ .base.static_constraint = C_NotImplemented,
582
+};
583
+
584
static void tgen_muluh(TCGContext *s, TCGType type,
585
TCGReg a0, TCGReg a1, TCGReg a2)
586
{
587
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
588
index XXXXXXX..XXXXXXX 100644
589
--- a/tcg/s390x/tcg-target.c.inc
590
+++ b/tcg/s390x/tcg-target.c.inc
591
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
592
.base.static_constraint = C_NotImplemented,
593
};
594
595
+static void tgen_mulu2(TCGContext *s, TCGType type,
596
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
597
+{
598
+ tcg_debug_assert(a0 == a2);
599
+ tcg_debug_assert((a1 & 1) == 0);
600
+ tcg_debug_assert(a0 == a1 + 1);
601
+ tcg_out_insn(s, RRE, MLGR, a1, a3);
602
+}
603
+
604
+static TCGConstraintSetIndex cset_mulu2(TCGType type, unsigned flags)
605
+{
606
+ return (type == TCG_TYPE_I64 && HAVE_FACILITY(MISC_INSN_EXT2)
607
+ ? C_O2_I2(o, m, 0, r) : C_NotImplemented);
608
+}
609
+
610
+static const TCGOutOpMul2 outop_mulu2 = {
611
+ .base.static_constraint = C_Dynamic,
612
+ .base.dynamic_constraint = cset_mulu2,
613
+ .out_rrrr = tgen_mulu2,
614
+};
615
+
616
static const TCGOutOpBinary outop_muluh = {
617
.base.static_constraint = C_NotImplemented,
618
};
619
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
620
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
621
break;
622
623
- case INDEX_op_mulu2_i64:
624
- tcg_debug_assert(args[0] == args[2]);
625
- tcg_debug_assert((args[1] & 1) == 0);
626
- tcg_debug_assert(args[0] == args[1] + 1);
627
- tcg_out_insn(s, RRE, MLGR, args[1], args[3]);
628
- break;
629
-
630
case INDEX_op_add2_i64:
631
if (const_args[4]) {
632
if ((int64_t)args[4] >= 0) {
633
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
634
case INDEX_op_movcond_i64:
635
return C_O1_I4(r, r, rC, rI, r);
636
637
- case INDEX_op_mulu2_i64:
638
- return C_O2_I2(o, m, 0, r);
639
-
640
case INDEX_op_add2_i32:
641
case INDEX_op_sub2_i32:
642
return C_N1_O1_I4(r, r, 0, 1, ri, r);
643
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
644
index XXXXXXX..XXXXXXX 100644
645
--- a/tcg/sparc64/tcg-target.c.inc
646
+++ b/tcg/sparc64/tcg-target.c.inc
647
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
648
.base.static_constraint = C_NotImplemented,
649
};
650
651
+static void tgen_mulu2(TCGContext *s, TCGType type,
652
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
653
+{
654
+ tcg_out_arith(s, a0, a2, a3, ARITH_UMUL);
655
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
656
+}
657
+
658
+static const TCGOutOpMul2 outop_mulu2 = {
659
+ .base.static_constraint = C_Dynamic,
660
+ .base.dynamic_constraint = cset_mul2,
661
+ .out_rrrr = tgen_mulu2,
662
+};
663
+
664
static void tgen_muluh(TCGContext *s, TCGType type,
665
TCGReg a0, TCGReg a1, TCGReg a2)
666
{
667
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
668
const int const_args[TCG_MAX_OP_ARGS])
669
{
670
TCGArg a0, a1, a2;
671
- int c, c2;
672
+ int c2;
673
674
/* Hoist the loads of the most common arguments. */
675
a0 = args[0];
676
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
677
args[4], const_args[4], args[5], const_args[5],
678
ARITH_SUBCC, ARITH_SUBC);
679
break;
680
- case INDEX_op_mulu2_i32:
681
- c = ARITH_UMUL;
682
- /* The 32-bit multiply insns produce a full 64-bit result. */
683
- tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
684
- tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
685
- break;
686
687
case INDEX_op_qemu_ld_i32:
688
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
689
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
690
case INDEX_op_sub2_i32:
691
case INDEX_op_sub2_i64:
692
return C_O2_I4(r, r, rz, rz, rJ, rJ);
693
- case INDEX_op_mulu2_i32:
694
- return C_O2_I2(r, r, rz, rJ);
695
696
default:
697
return C_NotImplemented;
698
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
699
index XXXXXXX..XXXXXXX 100644
700
--- a/tcg/tci/tcg-target.c.inc
701
+++ b/tcg/tci/tcg-target.c.inc
702
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
703
return C_O0_I4(r, r, r, r);
704
#endif
705
706
- case INDEX_op_mulu2_i32:
707
- case INDEX_op_mulu2_i64:
708
- return C_O2_I2(r, r, r, r);
709
-
710
case INDEX_op_movcond_i32:
711
case INDEX_op_movcond_i64:
712
case INDEX_op_setcond2_i32:
713
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
714
.base.static_constraint = C_NotImplemented,
715
};
716
717
+static void tgen_mulu2(TCGContext *s, TCGType type,
718
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
719
+{
720
+ tcg_out_op_rrrr(s, glue(INDEX_op_mulu2_i,TCG_TARGET_REG_BITS),
721
+ a0, a1, a2, a3);
722
+}
723
+
724
+static const TCGOutOpMul2 outop_mulu2 = {
725
+ .base.static_constraint = C_Dynamic,
726
+ .base.dynamic_constraint = cset_mul2,
727
+ .out_rrrr = tgen_mulu2,
728
+};
729
+
730
static const TCGOutOpBinary outop_muluh = {
731
.base.static_constraint = C_NotImplemented,
732
};
733
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
734
break;
735
#endif
736
737
- CASE_32_64(mulu2)
738
- tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
739
- break;
740
-
741
case INDEX_op_qemu_ld_i64:
742
case INDEX_op_qemu_st_i64:
743
if (TCG_TARGET_REG_BITS == 32) {
744
--
745
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 17 +++++++++--------
6
tcg/tcg-op.c | 10 +++++-----
7
tcg/tcg.c | 9 +++------
8
tcg/tci.c | 6 ++----
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 3 +--
11
7 files changed, 22 insertions(+), 28 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(eqv, 1, 2, 0, TCG_OPF_INT)
18
DEF(mul, 1, 2, 0, TCG_OPF_INT)
19
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
20
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
21
+DEF(mulu2, 2, 2, 0, TCG_OPF_INT)
22
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
23
DEF(nand, 1, 2, 0, TCG_OPF_INT)
24
DEF(neg, 1, 1, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
26
27
DEF(add2_i32, 2, 4, 0, 0)
28
DEF(sub2_i32, 2, 4, 0, 0)
29
-DEF(mulu2_i32, 2, 2, 0, 0)
30
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
31
DEF(setcond2_i32, 1, 4, 1, 0)
32
33
@@ -XXX,XX +XXX,XX @@ DEF(bswap64_i64, 1, 1, 1, 0)
34
35
DEF(add2_i64, 2, 4, 0, 0)
36
DEF(sub2_i64, 2, 4, 0, 0)
37
-DEF(mulu2_i64, 2, 2, 0, 0)
38
39
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
40
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
46
TCGOp *op2;
47
48
switch (op->opc) {
49
- case INDEX_op_mulu2_i32:
50
- l = (uint64_t)(uint32_t)a * (uint32_t)b;
51
- h = (int32_t)(l >> 32);
52
- l = (int32_t)l;
53
- break;
54
- case INDEX_op_mulu2_i64:
55
- mulu64(&l, &h, a, b);
56
+ case INDEX_op_mulu2:
57
+ if (ctx->type == TCG_TYPE_I32) {
58
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
59
+ h = (int32_t)(l >> 32);
60
+ l = (int32_t)l;
61
+ } else {
62
+ mulu64(&l, &h, a, b);
63
+ }
64
break;
65
case INDEX_op_muls2:
66
if (ctx->type == TCG_TYPE_I32) {
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
68
done = fold_mul_highpart(&ctx, op);
69
break;
70
case INDEX_op_muls2:
71
- CASE_OP_32_64(mulu2):
72
+ case INDEX_op_mulu2:
73
done = fold_multiply2(&ctx, op);
74
break;
75
case INDEX_op_nand:
76
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tcg-op.c
79
+++ b/tcg/tcg-op.c
80
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
81
82
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
83
{
84
- if (tcg_op_supported(INDEX_op_mulu2_i32, TCG_TYPE_I32, 0)) {
85
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
86
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I32, 0)) {
87
+ tcg_gen_op4_i32(INDEX_op_mulu2, rl, rh, arg1, arg2);
88
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
89
TCGv_i32 t = tcg_temp_ebb_new_i32();
90
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
91
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
92
93
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
94
{
95
- if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0)) {
96
- tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
97
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0)) {
98
+ tcg_gen_op4_i64(INDEX_op_mulu2, rl, rh, arg1, arg2);
99
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
100
TCGv_i64 t = tcg_temp_ebb_new_i64();
101
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
102
@@ -XXX,XX +XXX,XX @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
103
tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2);
104
tcg_gen_mov_i64(rl, t);
105
tcg_temp_free_i64(t);
106
- } else if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0) ||
107
+ } else if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0) ||
108
tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
109
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
110
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
111
diff --git a/tcg/tcg.c b/tcg/tcg.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/tcg.c
114
+++ b/tcg/tcg.c
115
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
116
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
117
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
118
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
119
- OUTOP(INDEX_op_mulu2_i32, TCGOutOpMul2, outop_mulu2),
120
- OUTOP(INDEX_op_mulu2_i64, TCGOutOpMul2, outop_mulu2),
121
+ OUTOP(INDEX_op_mulu2, TCGOutOpMul2, outop_mulu2),
122
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
123
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
124
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
125
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
126
opc_new = INDEX_op_mul;
127
opc_new2 = INDEX_op_mulsh;
128
goto do_mul2;
129
- case INDEX_op_mulu2_i32:
130
- case INDEX_op_mulu2_i64:
131
+ case INDEX_op_mulu2:
132
opc_new = INDEX_op_mul;
133
opc_new2 = INDEX_op_muluh;
134
do_mul2:
135
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
136
break;
137
138
case INDEX_op_muls2:
139
- case INDEX_op_mulu2_i32:
140
- case INDEX_op_mulu2_i64:
141
+ case INDEX_op_mulu2:
142
{
143
const TCGOutOpMul2 *out =
144
container_of(all_outop[op->opc], TCGOutOpMul2, base);
145
diff --git a/tcg/tci.c b/tcg/tci.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/tci.c
148
+++ b/tcg/tci.c
149
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
150
muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
151
#endif
152
break;
153
- case INDEX_op_mulu2_i32:
154
- case INDEX_op_mulu2_i64:
155
+ case INDEX_op_mulu2:
156
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
157
#if TCG_TARGET_REG_BITS == 32
158
tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
159
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
160
break;
161
162
case INDEX_op_muls2:
163
- case INDEX_op_mulu2_i32:
164
- case INDEX_op_mulu2_i64:
165
+ case INDEX_op_mulu2:
166
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
167
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
168
op_name, str_r(r0), str_r(r1),
169
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
170
index XXXXXXX..XXXXXXX 100644
171
--- a/docs/devel/tcg-ops.rst
172
+++ b/docs/devel/tcg-ops.rst
173
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
174
formed from two single-word arguments, and the double-word output *t0*
175
is returned in two single-word outputs.
176
177
- * - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
178
+ * - mulu2 *t0_low*, *t0_high*, *t1*, *t2*
179
180
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
181
double-word product *t0*. The latter is returned in two single-word outputs.
182
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/tci/tcg-target.c.inc
185
+++ b/tcg/tci/tcg-target.c.inc
186
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_mulsh = {
187
static void tgen_mulu2(TCGContext *s, TCGType type,
188
TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
189
{
190
- tcg_out_op_rrrr(s, glue(INDEX_op_mulu2_i,TCG_TARGET_REG_BITS),
191
- a0, a1, a2, a3);
192
+ tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
193
}
194
195
static const TCGOutOpMul2 outop_mulu2 = {
196
--
197
2.43.0
198
199
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/loongarch64/tcg-target-con-set.h | 2 --
5
tcg/loongarch64/tcg-target-has.h | 4 ++--
6
tcg/loongarch64/tcg-target.c.inc | 34 ++++++++++++++++++++++------
7
3 files changed, 29 insertions(+), 11 deletions(-)
8
1
9
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/loongarch64/tcg-target-con-set.h
12
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rJ)
14
C_O1_I2(r, r, rU)
15
C_O1_I2(r, r, rW)
16
C_O1_I2(r, 0, rz)
17
-C_O1_I2(r, rz, ri)
18
-C_O1_I2(r, rz, rJ)
19
C_O1_I2(w, w, w)
20
C_O1_I2(w, w, wM)
21
C_O1_I2(w, w, wA)
22
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/loongarch64/tcg-target-has.h
25
+++ b/tcg/loongarch64/tcg-target-has.h
26
@@ -XXX,XX +XXX,XX @@
27
#include "host/cpuinfo.h"
28
29
/* optional instructions */
30
-#define TCG_TARGET_HAS_negsetcond_i32 0
31
+#define TCG_TARGET_HAS_negsetcond_i32 1
32
#define TCG_TARGET_HAS_extract2_i32 0
33
#define TCG_TARGET_HAS_add2_i32 0
34
#define TCG_TARGET_HAS_sub2_i32 0
35
@@ -XXX,XX +XXX,XX @@
36
#define TCG_TARGET_HAS_qemu_st8_i32 0
37
38
/* 64-bit operations */
39
-#define TCG_TARGET_HAS_negsetcond_i64 0
40
+#define TCG_TARGET_HAS_negsetcond_i64 1
41
#define TCG_TARGET_HAS_extract2_i64 0
42
#define TCG_TARGET_HAS_extr_i64_i32 1
43
#define TCG_TARGET_HAS_bswap16_i64 1
44
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/loongarch64/tcg-target.c.inc
47
+++ b/tcg/loongarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
49
}
50
51
static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
52
- TCGReg arg1, tcg_target_long arg2, bool c2)
53
+ TCGReg arg1, tcg_target_long arg2,
54
+ bool c2, bool neg)
55
{
56
int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
57
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
58
59
- if (tmpflags != ret) {
60
- TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
61
-
62
+ if (neg) {
63
+ /* If intermediate result is zero/non-zero: test != 0. */
64
+ if (tmpflags & SETCOND_NEZ) {
65
+ tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
66
+ tmp = ret;
67
+ }
68
+ /* Produce the 0/-1 result. */
69
+ if (tmpflags & SETCOND_INV) {
70
+ tcg_out_opc_addi_d(s, ret, tmp, -1);
71
+ } else {
72
+ tcg_out_opc_sub_d(s, ret, TCG_REG_ZERO, tmp);
73
+ }
74
+ } else {
75
switch (tmpflags & SETCOND_FLAGS) {
76
+ case 0:
77
+ tcg_debug_assert(tmp == ret);
78
+ break;
79
case SETCOND_INV:
80
/* Intermediate result is boolean: simply invert. */
81
tcg_out_opc_xori(s, ret, tmp, 1);
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
83
84
case INDEX_op_setcond_i32:
85
case INDEX_op_setcond_i64:
86
- tcg_out_setcond(s, args[3], a0, a1, a2, c2);
87
+ tcg_out_setcond(s, args[3], a0, a1, a2, c2, false);
88
+ break;
89
+ case INDEX_op_negsetcond_i32:
90
+ case INDEX_op_negsetcond_i64:
91
+ tcg_out_setcond(s, args[3], a0, a1, a2, c2, true);
92
break;
93
94
case INDEX_op_movcond_i32:
95
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
96
return C_O1_I2(r, 0, rz);
97
98
case INDEX_op_setcond_i32:
99
- return C_O1_I2(r, rz, ri);
100
case INDEX_op_setcond_i64:
101
- return C_O1_I2(r, rz, rJ);
102
+ case INDEX_op_negsetcond_i32:
103
+ case INDEX_op_negsetcond_i64:
104
+ return C_O1_I2(r, r, rJ);
105
106
case INDEX_op_movcond_i32:
107
case INDEX_op_movcond_i64:
108
--
109
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/mips/tcg-target-has.h | 4 ++--
5
tcg/mips/tcg-target.c.inc | 25 +++++++++++++++++++++++++
6
2 files changed, 27 insertions(+), 2 deletions(-)
7
1
8
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/mips/tcg-target-has.h
11
+++ b/tcg/mips/tcg-target-has.h
12
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
13
/* optional instructions */
14
#define TCG_TARGET_HAS_bswap16_i32 1
15
#define TCG_TARGET_HAS_bswap32_i32 1
16
-#define TCG_TARGET_HAS_negsetcond_i32 0
17
+#define TCG_TARGET_HAS_negsetcond_i32 1
18
19
#if TCG_TARGET_REG_BITS == 64
20
#define TCG_TARGET_HAS_add2_i32 0
21
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
22
#define TCG_TARGET_HAS_sub2_i64 0
23
#define TCG_TARGET_HAS_ext32s_i64 1
24
#define TCG_TARGET_HAS_ext32u_i64 1
25
-#define TCG_TARGET_HAS_negsetcond_i64 0
26
+#define TCG_TARGET_HAS_negsetcond_i64 1
27
#endif
28
29
/* optional instructions detected at runtime */
30
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/mips/tcg-target.c.inc
33
+++ b/tcg/mips/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
35
tcg_out_setcond_end(s, ret, tmpflags);
36
}
37
38
+static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
39
+ TCGReg arg1, TCGReg arg2)
40
+{
41
+ int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2);
42
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
43
+
44
+ /* If intermediate result is zero/non-zero: test != 0. */
45
+ if (tmpflags & SETCOND_NEZ) {
46
+ tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
47
+ tmp = ret;
48
+ }
49
+ /* Produce the 0/-1 result. */
50
+ if (tmpflags & SETCOND_INV) {
51
+ tcg_out_opc_imm(s, OPC_ADDIU, ret, tmp, -1);
52
+ } else {
53
+ tcg_out_opc_reg(s, OPC_SUBU, ret, TCG_REG_ZERO, tmp);
54
+ }
55
+}
56
+
57
static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
58
TCGReg arg2, TCGLabel *l)
59
{
60
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
61
case INDEX_op_setcond_i64:
62
tcg_out_setcond(s, args[3], a0, a1, a2);
63
break;
64
+ case INDEX_op_negsetcond_i32:
65
+ case INDEX_op_negsetcond_i64:
66
+ tcg_out_negsetcond(s, args[3], a0, a1, a2);
67
+ break;
68
case INDEX_op_setcond2_i32:
69
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
70
break;
71
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
72
73
case INDEX_op_setcond_i32:
74
case INDEX_op_setcond_i64:
75
+ case INDEX_op_negsetcond_i32:
76
+ case INDEX_op_negsetcond_i64:
77
return C_O1_I2(r, rz, rz);
78
79
case INDEX_op_deposit_i32:
80
--
81
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/tci/tcg-target-has.h | 4 ++--
6
tcg/tci/tcg-target.c.inc | 13 +++++++++++++
7
2 files changed, 15 insertions(+), 2 deletions(-)
8
1
9
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tci/tcg-target-has.h
12
+++ b/tcg/tci/tcg-target-has.h
13
@@ -XXX,XX +XXX,XX @@
14
#define TCG_TARGET_HAS_bswap16_i32 1
15
#define TCG_TARGET_HAS_bswap32_i32 1
16
#define TCG_TARGET_HAS_extract2_i32 0
17
-#define TCG_TARGET_HAS_negsetcond_i32 0
18
+#define TCG_TARGET_HAS_negsetcond_i32 1
19
#define TCG_TARGET_HAS_qemu_st8_i32 0
20
21
#if TCG_TARGET_REG_BITS == 64
22
@@ -XXX,XX +XXX,XX @@
23
#define TCG_TARGET_HAS_bswap32_i64 1
24
#define TCG_TARGET_HAS_bswap64_i64 1
25
#define TCG_TARGET_HAS_extract2_i64 0
26
-#define TCG_TARGET_HAS_negsetcond_i64 0
27
+#define TCG_TARGET_HAS_negsetcond_i64 1
28
#define TCG_TARGET_HAS_add2_i32 1
29
#define TCG_TARGET_HAS_sub2_i32 1
30
#define TCG_TARGET_HAS_add2_i64 1
31
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/tci/tcg-target.c.inc
34
+++ b/tcg/tci/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
36
37
case INDEX_op_setcond_i32:
38
case INDEX_op_setcond_i64:
39
+ case INDEX_op_negsetcond_i32:
40
+ case INDEX_op_negsetcond_i64:
41
case INDEX_op_deposit_i32:
42
case INDEX_op_deposit_i64:
43
return C_O1_I2(r, r, r);
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
45
args[3], args[4], args[5]);
46
break;
47
48
+ case INDEX_op_negsetcond_i32:
49
+ tcg_out_op_rrrc(s, INDEX_op_setcond_i32,
50
+ args[0], args[1], args[2], args[3]);
51
+ tcg_out_op_rr(s, INDEX_op_neg, args[0], args[0]);
52
+ break;
53
+ case INDEX_op_negsetcond_i64:
54
+ tcg_out_op_rrrc(s, INDEX_op_setcond_i64,
55
+ args[0], args[1], args[2], args[3]);
56
+ tcg_out_op_rr(s, INDEX_op_neg, args[0], args[0]);
57
+ break;
58
+
59
CASE_32_64(ld8u)
60
CASE_32_64(ld8s)
61
CASE_32_64(ld16u)
62
--
63
2.43.0
64
65
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/mips/tcg-target-con-set.h | 2 +-
5
tcg/sparc64/tcg-target-con-set.h | 1 -
6
tcg/tcg.c | 31 ++++++++
7
tcg/aarch64/tcg-target.c.inc | 121 ++++++++++++++++++++-----------
8
tcg/arm/tcg-target.c.inc | 117 +++++++++++++++++++++---------
9
tcg/i386/tcg-target.c.inc | 57 +++++++++++----
10
tcg/loongarch64/tcg-target.c.inc | 51 +++++++++----
11
tcg/mips/tcg-target.c.inc | 39 +++++-----
12
tcg/ppc/tcg-target.c.inc | 61 ++++++++++------
13
tcg/riscv/tcg-target.c.inc | 52 +++++++++----
14
tcg/s390x/tcg-target.c.inc | 64 +++++++++-------
15
tcg/sparc64/tcg-target.c.inc | 69 +++++++++++++-----
16
tcg/tci/tcg-target.c.inc | 49 +++++++------
17
13 files changed, 477 insertions(+), 237 deletions(-)
18
1
19
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/mips/tcg-target-con-set.h
22
+++ b/tcg/mips/tcg-target-con-set.h
23
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, ri)
24
C_O1_I2(r, r, rI)
25
C_O1_I2(r, r, rIK)
26
C_O1_I2(r, r, rJ)
27
+C_O1_I2(r, r, rz)
28
C_O1_I2(r, r, rzW)
29
-C_O1_I2(r, rz, rz)
30
C_O1_I4(r, rz, rz, rz, 0)
31
C_O1_I4(r, rz, rz, rz, rz)
32
C_O2_I1(r, r, r)
33
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/sparc64/tcg-target-con-set.h
36
+++ b/tcg/sparc64/tcg-target-con-set.h
37
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rz, rJ)
38
C_O1_I1(r, r)
39
C_O1_I2(r, r, r)
40
C_O1_I2(r, r, rJ)
41
-C_O1_I2(r, rz, rJ)
42
C_O1_I4(r, rz, rJ, rI, 0)
43
C_O2_I2(r, r, r, r)
44
C_O2_I4(r, r, rz, rz, rJ, rJ)
45
diff --git a/tcg/tcg.c b/tcg/tcg.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/tcg.c
48
+++ b/tcg/tcg.c
49
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpUnary {
50
void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
51
} TCGOutOpUnary;
52
53
+typedef struct TCGOutOpSetcond {
54
+ TCGOutOp base;
55
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGCond cond,
56
+ TCGReg ret, TCGReg a1, TCGReg a2);
57
+ void (*out_rri)(TCGContext *s, TCGType type, TCGCond cond,
58
+ TCGReg ret, TCGReg a1, tcg_target_long a2);
59
+} TCGOutOpSetcond;
60
+
61
typedef struct TCGOutOpSubtract {
62
TCGOutOp base;
63
void (*out_rrr)(TCGContext *s, TCGType type,
64
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
65
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
66
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
67
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
68
+ OUTOP(INDEX_op_negsetcond_i32, TCGOutOpSetcond, outop_negsetcond),
69
+ OUTOP(INDEX_op_negsetcond_i64, TCGOutOpSetcond, outop_negsetcond),
70
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
71
OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
72
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
73
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
74
OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
75
OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
76
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
77
+ OUTOP(INDEX_op_setcond_i32, TCGOutOpSetcond, outop_setcond),
78
+ OUTOP(INDEX_op_setcond_i64, TCGOutOpSetcond, outop_setcond),
79
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
80
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
81
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
82
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
83
}
84
break;
85
86
+ case INDEX_op_setcond_i32:
87
+ case INDEX_op_setcond_i64:
88
+ case INDEX_op_negsetcond_i32:
89
+ case INDEX_op_negsetcond_i64:
90
+ {
91
+ const TCGOutOpSetcond *out =
92
+ container_of(all_outop[op->opc], TCGOutOpSetcond, base);
93
+ TCGCond cond = new_args[3];
94
+
95
+ tcg_debug_assert(!const_args[1]);
96
+ if (const_args[2]) {
97
+ out->out_rri(s, type, cond,
98
+ new_args[0], new_args[1], new_args[2]);
99
+ } else {
100
+ out->out_rrr(s, type, cond,
101
+ new_args[0], new_args[1], new_args[2]);
102
+ }
103
+ }
104
+ break;
105
106
default:
107
if (def->flags & TCG_OPF_VECTOR) {
108
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/aarch64/tcg-target.c.inc
111
+++ b/tcg/aarch64/tcg-target.c.inc
112
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
113
tcg_out_bfm(s, ext, rd, rn, a, b);
114
}
115
116
+static void tgen_cmp(TCGContext *s, TCGType ext, TCGCond cond,
117
+ TCGReg a, TCGReg b)
118
+{
119
+ if (is_tst_cond(cond)) {
120
+ tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
121
+ } else {
122
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
123
+ }
124
+}
125
+
126
+static void tgen_cmpi(TCGContext *s, TCGType ext, TCGCond cond,
127
+ TCGReg a, tcg_target_long b)
128
+{
129
+ if (is_tst_cond(cond)) {
130
+ tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b);
131
+ } else if (b >= 0) {
132
+ tcg_debug_assert(is_aimm(b));
133
+ tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
134
+ } else {
135
+ tcg_debug_assert(is_aimm(-b));
136
+ tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
137
+ }
138
+}
139
+
140
static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
141
tcg_target_long b, bool const_b)
142
{
143
- if (is_tst_cond(cond)) {
144
- if (!const_b) {
145
- tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
146
- } else {
147
- tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b);
148
- }
149
+ if (const_b) {
150
+ tgen_cmpi(s, ext, cond, a, b);
151
} else {
152
- if (!const_b) {
153
- tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
154
- } else if (b >= 0) {
155
- tcg_debug_assert(is_aimm(b));
156
- tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
157
- } else {
158
- tcg_debug_assert(is_aimm(-b));
159
- tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
160
- }
161
+ tgen_cmp(s, ext, cond, a, b);
162
}
163
}
164
165
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
166
.out_rr = tgen_not,
167
};
168
169
+static void tgen_cset(TCGContext *s, TCGCond cond, TCGReg ret)
170
+{
171
+ /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
172
+ tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, ret, TCG_REG_XZR,
173
+ TCG_REG_XZR, tcg_invert_cond(cond));
174
+}
175
+
176
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
177
+ TCGReg a0, TCGReg a1, TCGReg a2)
178
+{
179
+ tgen_cmp(s, type, cond, a1, a2);
180
+ tgen_cset(s, cond, a0);
181
+}
182
+
183
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
184
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
185
+{
186
+ tgen_cmpi(s, type, cond, a1, a2);
187
+ tgen_cset(s, cond, a0);
188
+}
189
+
190
+static const TCGOutOpSetcond outop_setcond = {
191
+ .base.static_constraint = C_O1_I2(r, r, rC),
192
+ .out_rrr = tgen_setcond,
193
+ .out_rri = tgen_setcondi,
194
+};
195
+
196
+static void tgen_csetm(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret)
197
+{
198
+ /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
199
+ tcg_out_insn(s, 3506, CSINV, ext, ret, TCG_REG_XZR,
200
+ TCG_REG_XZR, tcg_invert_cond(cond));
201
+}
202
+
203
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
204
+ TCGReg a0, TCGReg a1, TCGReg a2)
205
+{
206
+ tgen_cmp(s, type, cond, a1, a2);
207
+ tgen_csetm(s, type, cond, a0);
208
+}
209
+
210
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
211
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
212
+{
213
+ tgen_cmpi(s, type, cond, a1, a2);
214
+ tgen_csetm(s, type, cond, a0);
215
+}
216
+
217
+static const TCGOutOpSetcond outop_negsetcond = {
218
+ .base.static_constraint = C_O1_I2(r, r, rC),
219
+ .out_rrr = tgen_negsetcond,
220
+ .out_rri = tgen_negsetcondi,
221
+};
222
223
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
224
const TCGArg args[TCG_MAX_OP_ARGS],
225
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
226
tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
227
break;
228
229
- case INDEX_op_setcond_i32:
230
- a2 = (int32_t)a2;
231
- /* FALLTHRU */
232
- case INDEX_op_setcond_i64:
233
- tcg_out_cmp(s, ext, args[3], a1, a2, c2);
234
- /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
235
- tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
236
- TCG_REG_XZR, tcg_invert_cond(args[3]));
237
- break;
238
-
239
- case INDEX_op_negsetcond_i32:
240
- a2 = (int32_t)a2;
241
- /* FALLTHRU */
242
- case INDEX_op_negsetcond_i64:
243
- tcg_out_cmp(s, ext, args[3], a1, a2, c2);
244
- /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
245
- tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
246
- TCG_REG_XZR, tcg_invert_cond(args[3]));
247
- break;
248
-
249
case INDEX_op_movcond_i32:
250
a2 = (int32_t)a2;
251
/* FALLTHRU */
252
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
253
case INDEX_op_st_i64:
254
return C_O0_I2(rz, r);
255
256
- case INDEX_op_setcond_i32:
257
- case INDEX_op_setcond_i64:
258
- case INDEX_op_negsetcond_i32:
259
- case INDEX_op_negsetcond_i64:
260
- return C_O1_I2(r, r, rC);
261
-
262
case INDEX_op_brcond_i32:
263
case INDEX_op_brcond_i64:
264
return C_O0_I2(r, rC);
265
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
266
index XXXXXXX..XXXXXXX 100644
267
--- a/tcg/arm/tcg-target.c.inc
268
+++ b/tcg/arm/tcg-target.c.inc
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
270
}
271
}
272
273
-static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
274
- TCGArg b, int b_const)
275
+static TCGCond tgen_cmp(TCGContext *s, TCGCond cond, TCGReg a, TCGReg b)
276
{
277
+ if (is_tst_cond(cond)) {
278
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
279
+ return tcg_tst_eqne_cond(cond);
280
+ }
281
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, a, b, SHIFT_IMM_LSL(0));
282
+ return cond;
283
+}
284
+
285
+static TCGCond tgen_cmpi(TCGContext *s, TCGCond cond, TCGReg a, TCGArg b)
286
+{
287
+ int imm12;
288
+
289
if (!is_tst_cond(cond)) {
290
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
291
+ tcg_out_dat_IN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b);
292
return cond;
293
}
294
295
- cond = tcg_tst_eqne_cond(cond);
296
- if (b_const) {
297
- int imm12 = encode_imm(b);
298
-
299
- /*
300
- * The compare constraints allow rIN, but TST does not support N.
301
- * Be prepared to load the constant into a scratch register.
302
- */
303
- if (imm12 >= 0) {
304
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
305
- return cond;
306
- }
307
+ /*
308
+ * The compare constraints allow rIN, but TST does not support N.
309
+ * Be prepared to load the constant into a scratch register.
310
+ */
311
+ imm12 = encode_imm(b);
312
+ if (imm12 >= 0) {
313
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
314
+ } else {
315
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
316
- b = TCG_REG_TMP;
317
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0,
318
+ a, TCG_REG_TMP, SHIFT_IMM_LSL(0));
319
+ }
320
+ return tcg_tst_eqne_cond(cond);
321
+}
322
+
323
+static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
324
+ TCGArg b, int b_const)
325
+{
326
+ if (b_const) {
327
+ return tgen_cmpi(s, cond, a, b);
328
+ } else {
329
+ return tgen_cmp(s, cond, a, b);
330
}
331
- tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
332
- return cond;
333
}
334
335
static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
336
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
337
.out_rr = tgen_not,
338
};
339
340
+static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg)
341
+{
342
+ tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0);
343
+ tcg_out_movi32(s, tcg_cond_to_arm_cond[cond], ret, neg ? -1 : 1);
344
+}
345
+
346
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
347
+ TCGReg a0, TCGReg a1, TCGReg a2)
348
+{
349
+ cond = tgen_cmp(s, cond, a1, a2);
350
+ finish_setcond(s, cond, a0, false);
351
+}
352
+
353
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
354
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
355
+{
356
+ cond = tgen_cmpi(s, cond, a1, a2);
357
+ finish_setcond(s, cond, a0, false);
358
+}
359
+
360
+static const TCGOutOpSetcond outop_setcond = {
361
+ .base.static_constraint = C_O1_I2(r, r, rIN),
362
+ .out_rrr = tgen_setcond,
363
+ .out_rri = tgen_setcondi,
364
+};
365
+
366
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
367
+ TCGReg a0, TCGReg a1, TCGReg a2)
368
+{
369
+ cond = tgen_cmp(s, cond, a1, a2);
370
+ finish_setcond(s, cond, a0, true);
371
+}
372
+
373
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
374
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
375
+{
376
+ cond = tgen_cmpi(s, cond, a1, a2);
377
+ finish_setcond(s, cond, a0, true);
378
+}
379
+
380
+static const TCGOutOpSetcond outop_negsetcond = {
381
+ .base.static_constraint = C_O1_I2(r, r, rIN),
382
+ .out_rrr = tgen_negsetcond,
383
+ .out_rri = tgen_negsetcondi,
384
+};
385
+
386
387
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
388
const TCGArg args[TCG_MAX_OP_ARGS],
389
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
390
c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
391
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
392
break;
393
- case INDEX_op_setcond_i32:
394
- c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
395
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
396
- ARITH_MOV, args[0], 0, 1);
397
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
398
- ARITH_MOV, args[0], 0, 0);
399
- break;
400
- case INDEX_op_negsetcond_i32:
401
- c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
402
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
403
- ARITH_MVN, args[0], 0, 0);
404
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
405
- ARITH_MOV, args[0], 0, 0);
406
- break;
407
408
case INDEX_op_brcond2_i32:
409
c = tcg_out_cmp2(s, args, const_args);
410
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
411
case INDEX_op_st_i32:
412
return C_O0_I2(r, r);
413
414
- case INDEX_op_setcond_i32:
415
- case INDEX_op_negsetcond_i32:
416
- return C_O1_I2(r, r, rIN);
417
-
418
case INDEX_op_brcond_i32:
419
return C_O0_I2(r, rIN);
420
case INDEX_op_deposit_i32:
421
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
422
index XXXXXXX..XXXXXXX 100644
423
--- a/tcg/i386/tcg-target.c.inc
424
+++ b/tcg/i386/tcg-target.c.inc
425
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
426
}
427
#endif
428
429
-static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
430
- TCGArg dest, TCGArg arg1, TCGArg arg2,
431
- int const_arg2, bool neg)
432
+static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
433
+ TCGReg dest, TCGReg arg1, TCGArg arg2,
434
+ bool const_arg2, bool neg)
435
{
436
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
437
int cmp_rexw = rexw;
438
bool inv = false;
439
bool cleared;
440
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
441
case TCG_COND_LT:
442
/* If arg2 is 0, extract the sign bit. */
443
if (const_arg2 && arg2 == 0) {
444
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, dest, arg1);
445
+ tcg_out_mov(s, type, dest, arg1);
446
if (inv) {
447
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest);
448
}
449
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
450
}
451
}
452
453
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
454
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
455
+{
456
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
457
+}
458
+
459
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
460
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
461
+{
462
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
463
+}
464
+
465
+static const TCGOutOpSetcond outop_setcond = {
466
+ .base.static_constraint = C_O1_I2(q, r, reT),
467
+ .out_rrr = tgen_setcond,
468
+ .out_rri = tgen_setcondi,
469
+};
470
+
471
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
472
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
473
+{
474
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
475
+}
476
+
477
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
478
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
479
+{
480
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
481
+}
482
+
483
+static const TCGOutOpSetcond outop_negsetcond = {
484
+ .base.static_constraint = C_O1_I2(q, r, reT),
485
+ .out_rrr = tgen_negsetcond,
486
+ .out_rri = tgen_negsetcondi,
487
+};
488
+
489
#if TCG_TARGET_REG_BITS == 32
490
static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
491
const int *const_args)
492
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
493
tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
494
arg_label(args[3]), 0);
495
break;
496
- OP_32_64(setcond):
497
- tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, false);
498
- break;
499
- OP_32_64(negsetcond):
500
- tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, true);
501
- break;
502
OP_32_64(movcond):
503
tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
504
break;
505
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
506
case INDEX_op_deposit_i64:
507
return C_O1_I2(q, 0, qi);
508
509
- case INDEX_op_setcond_i32:
510
- case INDEX_op_setcond_i64:
511
- case INDEX_op_negsetcond_i32:
512
- case INDEX_op_negsetcond_i64:
513
- return C_O1_I2(q, r, reT);
514
-
515
case INDEX_op_movcond_i32:
516
case INDEX_op_movcond_i64:
517
return C_O1_I4(r, r, reT, r, 0);
518
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
519
index XXXXXXX..XXXXXXX 100644
520
--- a/tcg/loongarch64/tcg-target.c.inc
521
+++ b/tcg/loongarch64/tcg-target.c.inc
522
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
523
}
524
}
525
526
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
527
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
528
+{
529
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false, false);
530
+}
531
+
532
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
533
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
534
+{
535
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true, false);
536
+}
537
+
538
+static const TCGOutOpSetcond outop_setcond = {
539
+ .base.static_constraint = C_O1_I2(r, r, rJ),
540
+ .out_rrr = tgen_setcond,
541
+ .out_rri = tgen_setcondi,
542
+};
543
+
544
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
545
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
546
+{
547
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false, true);
548
+}
549
+
550
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
551
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
552
+{
553
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true, true);
554
+}
555
+
556
+static const TCGOutOpSetcond outop_negsetcond = {
557
+ .base.static_constraint = C_O1_I2(r, r, rJ),
558
+ .out_rrr = tgen_negsetcond,
559
+ .out_rri = tgen_negsetcondi,
560
+};
561
+
562
static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
563
TCGReg c1, tcg_target_long c2, bool const2,
564
TCGReg v1, TCGReg v2)
565
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
566
tcg_out_opc_revb_d(s, a0, a1);
567
break;
568
569
- case INDEX_op_setcond_i32:
570
- case INDEX_op_setcond_i64:
571
- tcg_out_setcond(s, args[3], a0, a1, a2, c2, false);
572
- break;
573
- case INDEX_op_negsetcond_i32:
574
- case INDEX_op_negsetcond_i64:
575
- tcg_out_setcond(s, args[3], a0, a1, a2, c2, true);
576
- break;
577
-
578
case INDEX_op_movcond_i32:
579
case INDEX_op_movcond_i64:
580
tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
581
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
582
/* Must deposit into the same register as input */
583
return C_O1_I2(r, 0, rz);
584
585
- case INDEX_op_setcond_i32:
586
- case INDEX_op_setcond_i64:
587
- case INDEX_op_negsetcond_i32:
588
- case INDEX_op_negsetcond_i64:
589
- return C_O1_I2(r, r, rJ);
590
-
591
case INDEX_op_movcond_i32:
592
case INDEX_op_movcond_i64:
593
return C_O1_I4(r, rz, rJ, rz, rz);
594
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
595
index XXXXXXX..XXXXXXX 100644
596
--- a/tcg/mips/tcg-target.c.inc
597
+++ b/tcg/mips/tcg-target.c.inc
598
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_end(TCGContext *s, TCGReg ret, int tmpflags)
599
}
600
}
601
602
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
603
- TCGReg arg1, TCGReg arg2)
604
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
605
+ TCGReg ret, TCGReg arg1, TCGReg arg2)
606
{
607
int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2);
608
tcg_out_setcond_end(s, ret, tmpflags);
609
}
610
611
-static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
612
- TCGReg arg1, TCGReg arg2)
613
+static const TCGOutOpSetcond outop_setcond = {
614
+ .base.static_constraint = C_O1_I2(r, r, rz),
615
+ .out_rrr = tgen_setcond,
616
+};
617
+
618
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
619
+ TCGReg ret, TCGReg arg1, TCGReg arg2)
620
{
621
int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2);
622
TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
623
@@ -XXX,XX +XXX,XX @@ static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
624
}
625
}
626
627
+static const TCGOutOpSetcond outop_negsetcond = {
628
+ .base.static_constraint = C_O1_I2(r, r, rz),
629
+ .out_rrr = tgen_negsetcond,
630
+};
631
+
632
static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
633
TCGReg arg2, TCGLabel *l)
634
{
635
@@ -XXX,XX +XXX,XX @@ static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
636
break;
637
638
default:
639
- tcg_out_setcond(s, TCG_COND_EQ, TCG_TMP0, ah, bh);
640
- tcg_out_setcond(s, tcg_unsigned_cond(cond), TCG_TMP1, al, bl);
641
+ tgen_setcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_TMP0, ah, bh);
642
+ tgen_setcond(s, TCG_TYPE_I32, tcg_unsigned_cond(cond),
643
+ TCG_TMP1, al, bl);
644
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP0);
645
- tcg_out_setcond(s, tcg_high_cond(cond), TCG_TMP0, ah, bh);
646
+ tgen_setcond(s, TCG_TYPE_I32, tcg_high_cond(cond), TCG_TMP0, ah, bh);
647
tcg_out_opc_reg(s, OPC_OR, ret, TCG_TMP0, TCG_TMP1);
648
break;
649
}
650
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
651
tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
652
break;
653
654
- case INDEX_op_setcond_i32:
655
- case INDEX_op_setcond_i64:
656
- tcg_out_setcond(s, args[3], a0, a1, a2);
657
- break;
658
- case INDEX_op_negsetcond_i32:
659
- case INDEX_op_negsetcond_i64:
660
- tcg_out_negsetcond(s, args[3], a0, a1, a2);
661
- break;
662
case INDEX_op_setcond2_i32:
663
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
664
break;
665
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
666
case INDEX_op_st_i64:
667
return C_O0_I2(rz, r);
668
669
- case INDEX_op_setcond_i32:
670
- case INDEX_op_setcond_i64:
671
- case INDEX_op_negsetcond_i32:
672
- case INDEX_op_negsetcond_i64:
673
- return C_O1_I2(r, rz, rz);
674
-
675
case INDEX_op_deposit_i32:
676
case INDEX_op_deposit_i64:
677
return C_O1_I2(r, 0, rz);
678
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
679
index XXXXXXX..XXXXXXX 100644
680
--- a/tcg/ppc/tcg-target.c.inc
681
+++ b/tcg/ppc/tcg-target.c.inc
682
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
683
}
684
685
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
686
- TCGArg arg0, TCGArg arg1, TCGArg arg2,
687
- int const_arg2, bool neg)
688
+ TCGReg arg0, TCGReg arg1, TCGArg arg2,
689
+ bool const_arg2, bool neg)
690
{
691
int sh;
692
bool inv;
693
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
694
}
695
}
696
697
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
698
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
699
+{
700
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
701
+}
702
+
703
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
704
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
705
+{
706
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
707
+}
708
+
709
+static const TCGOutOpSetcond outop_setcond = {
710
+ .base.static_constraint = C_O1_I2(r, r, rC),
711
+ .out_rrr = tgen_setcond,
712
+ .out_rri = tgen_setcondi,
713
+};
714
+
715
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
716
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
717
+{
718
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
719
+}
720
+
721
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
722
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
723
+{
724
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
725
+}
726
+
727
+static const TCGOutOpSetcond outop_negsetcond = {
728
+ .base.static_constraint = C_O1_I2(r, r, rC),
729
+ .out_rrr = tgen_negsetcond,
730
+ .out_rri = tgen_negsetcondi,
731
+};
732
+
733
static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
734
{
735
tcg_out32(s, tcg_to_bc[cond] | bd);
736
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
737
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
738
break;
739
740
- case INDEX_op_setcond_i32:
741
- tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
742
- const_args[2], false);
743
- break;
744
- case INDEX_op_setcond_i64:
745
- tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
746
- const_args[2], false);
747
- break;
748
- case INDEX_op_negsetcond_i32:
749
- tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
750
- const_args[2], true);
751
- break;
752
- case INDEX_op_negsetcond_i64:
753
- tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
754
- const_args[2], true);
755
- break;
756
case INDEX_op_setcond2_i32:
757
tcg_out_setcond2(s, args, const_args);
758
break;
759
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
760
case INDEX_op_brcond_i32:
761
case INDEX_op_brcond_i64:
762
return C_O0_I2(r, rC);
763
- case INDEX_op_setcond_i32:
764
- case INDEX_op_setcond_i64:
765
- case INDEX_op_negsetcond_i32:
766
- case INDEX_op_negsetcond_i64:
767
- return C_O1_I2(r, r, rC);
768
case INDEX_op_movcond_i32:
769
case INDEX_op_movcond_i64:
770
return C_O1_I4(r, r, rC, rZ, rZ);
771
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
772
index XXXXXXX..XXXXXXX 100644
773
--- a/tcg/riscv/tcg-target.c.inc
774
+++ b/tcg/riscv/tcg-target.c.inc
775
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
776
}
777
}
778
779
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
780
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
781
+{
782
+ tcg_out_setcond(s, cond, dest, arg1, arg2, false);
783
+}
784
+
785
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
786
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
787
+{
788
+ tcg_out_setcond(s, cond, dest, arg1, arg2, true);
789
+}
790
+
791
+static const TCGOutOpSetcond outop_setcond = {
792
+ .base.static_constraint = C_O1_I2(r, r, rI),
793
+ .out_rrr = tgen_setcond,
794
+ .out_rri = tgen_setcondi,
795
+};
796
+
797
static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
798
TCGReg arg1, tcg_target_long arg2, bool c2)
799
{
800
@@ -XXX,XX +XXX,XX @@ static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
801
}
802
}
803
804
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
805
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
806
+{
807
+ tcg_out_negsetcond(s, cond, dest, arg1, arg2, false);
808
+}
809
+
810
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
811
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
812
+{
813
+ tcg_out_negsetcond(s, cond, dest, arg1, arg2, true);
814
+}
815
+
816
+static const TCGOutOpSetcond outop_negsetcond = {
817
+ .base.static_constraint = C_O1_I2(r, r, rI),
818
+ .out_rrr = tgen_negsetcond,
819
+ .out_rri = tgen_negsetcondi,
820
+};
821
+
822
static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
823
int val1, bool c_val1,
824
int val2, bool c_val2)
825
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
826
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
827
break;
828
829
- case INDEX_op_setcond_i32:
830
- case INDEX_op_setcond_i64:
831
- tcg_out_setcond(s, args[3], a0, a1, a2, c2);
832
- break;
833
-
834
- case INDEX_op_negsetcond_i32:
835
- case INDEX_op_negsetcond_i64:
836
- tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
837
- break;
838
-
839
case INDEX_op_movcond_i32:
840
case INDEX_op_movcond_i64:
841
tcg_out_movcond(s, args[5], a0, a1, a2, c2,
842
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
843
case INDEX_op_st_i64:
844
return C_O0_I2(rz, r);
845
846
- case INDEX_op_setcond_i32:
847
- case INDEX_op_setcond_i64:
848
- case INDEX_op_negsetcond_i32:
849
- case INDEX_op_negsetcond_i64:
850
- return C_O1_I2(r, r, rI);
851
-
852
case INDEX_op_brcond_i32:
853
case INDEX_op_brcond_i64:
854
return C_O0_I2(rz, rz);
855
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
856
index XXXXXXX..XXXXXXX 100644
857
--- a/tcg/s390x/tcg-target.c.inc
858
+++ b/tcg/s390x/tcg-target.c.inc
859
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
860
return tgen_cmp2(s, type, c, r1, c2, c2const, need_carry, &inv_cc);
861
}
862
863
-static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
864
- TCGReg dest, TCGReg c1, TCGArg c2,
865
- bool c2const, bool neg)
866
+static void tgen_setcond_int(TCGContext *s, TCGType type, TCGCond cond,
867
+ TCGReg dest, TCGReg c1, TCGArg c2,
868
+ bool c2const, bool neg)
869
{
870
int cc;
871
872
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
873
tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
874
}
875
876
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
877
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
878
+{
879
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, false);
880
+}
881
+
882
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
883
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
884
+{
885
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, false);
886
+}
887
+
888
+static const TCGOutOpSetcond outop_setcond = {
889
+ .base.static_constraint = C_O1_I2(r, r, rC),
890
+ .out_rrr = tgen_setcond,
891
+ .out_rri = tgen_setcondi,
892
+};
893
+
894
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
895
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
896
+{
897
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, false, true);
898
+}
899
+
900
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
901
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
902
+{
903
+ tgen_setcond_int(s, type, cond, dest, arg1, arg2, true, true);
904
+}
905
+
906
+static const TCGOutOpSetcond outop_negsetcond = {
907
+ .base.static_constraint = C_O1_I2(r, r, rC),
908
+ .out_rrr = tgen_negsetcond,
909
+ .out_rri = tgen_negsetcondi,
910
+};
911
+
912
static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
913
TCGArg v3, int v3const, TCGReg v4,
914
int cc, int inv_cc)
915
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
916
tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
917
args[1], const_args[1], arg_label(args[3]));
918
break;
919
- case INDEX_op_setcond_i32:
920
- tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
921
- args[2], const_args[2], false);
922
- break;
923
- case INDEX_op_negsetcond_i32:
924
- tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
925
- args[2], const_args[2], true);
926
- break;
927
case INDEX_op_movcond_i32:
928
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
929
args[2], const_args[2], args[3], const_args[3], args[4]);
930
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
931
tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
932
args[1], const_args[1], arg_label(args[3]));
933
break;
934
- case INDEX_op_setcond_i64:
935
- tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
936
- args[2], const_args[2], false);
937
- break;
938
- case INDEX_op_negsetcond_i64:
939
- tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
940
- args[2], const_args[2], true);
941
- break;
942
case INDEX_op_movcond_i64:
943
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
944
args[2], const_args[2], args[3], const_args[3], args[4]);
945
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
946
case INDEX_op_st_i64:
947
return C_O0_I2(r, r);
948
949
- case INDEX_op_setcond_i32:
950
- case INDEX_op_negsetcond_i32:
951
- case INDEX_op_setcond_i64:
952
- case INDEX_op_negsetcond_i64:
953
- return C_O1_I2(r, r, rC);
954
-
955
case INDEX_op_brcond_i32:
956
return C_O0_I2(r, ri);
957
case INDEX_op_brcond_i64:
958
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
959
index XXXXXXX..XXXXXXX 100644
960
--- a/tcg/sparc64/tcg-target.c.inc
961
+++ b/tcg/sparc64/tcg-target.c.inc
962
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
963
}
964
965
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
966
- TCGReg c1, int32_t c2, int c2const, bool neg)
967
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
968
{
969
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
970
switch (cond) {
971
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
972
}
973
974
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
975
- TCGReg c1, int32_t c2, int c2const, bool neg)
976
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
977
{
978
int rcond;
979
980
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
981
}
982
}
983
984
+static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
985
+ TCGReg ret, TCGReg c1,
986
+ TCGArg c2, bool c2const, bool neg)
987
+{
988
+ if (type == TCG_TYPE_I32) {
989
+ tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg);
990
+ } else {
991
+ tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg);
992
+ }
993
+}
994
+
995
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
996
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
997
+{
998
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
999
+}
1000
+
1001
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
1002
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
1003
+{
1004
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
1005
+}
1006
+
1007
+static const TCGOutOpSetcond outop_setcond = {
1008
+ .base.static_constraint = C_O1_I2(r, r, rJ),
1009
+ .out_rrr = tgen_setcond,
1010
+ .out_rri = tgen_setcondi,
1011
+};
1012
+
1013
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
1014
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
1015
+{
1016
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
1017
+}
1018
+
1019
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
1020
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
1021
+{
1022
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
1023
+}
1024
+
1025
+static const TCGOutOpSetcond outop_negsetcond = {
1026
+ .base.static_constraint = C_O1_I2(r, r, rJ),
1027
+ .out_rrr = tgen_negsetcond,
1028
+ .out_rri = tgen_negsetcondi,
1029
+};
1030
+
1031
static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
1032
TCGReg al, TCGReg ah, int32_t bl, int blconst,
1033
int32_t bh, int bhconst, int opl, int oph)
1034
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1035
case INDEX_op_brcond_i32:
1036
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1037
break;
1038
- case INDEX_op_setcond_i32:
1039
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
1040
- break;
1041
- case INDEX_op_negsetcond_i32:
1042
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
1043
- break;
1044
case INDEX_op_movcond_i32:
1045
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1046
break;
1047
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1048
case INDEX_op_brcond_i64:
1049
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1050
break;
1051
- case INDEX_op_setcond_i64:
1052
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
1053
- break;
1054
- case INDEX_op_negsetcond_i64:
1055
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
1056
- break;
1057
case INDEX_op_movcond_i64:
1058
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1059
break;
1060
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1061
case INDEX_op_qemu_st_i64:
1062
return C_O0_I2(rz, r);
1063
1064
- case INDEX_op_setcond_i32:
1065
- case INDEX_op_setcond_i64:
1066
- case INDEX_op_negsetcond_i32:
1067
- case INDEX_op_negsetcond_i64:
1068
- return C_O1_I2(r, rz, rJ);
1069
-
1070
case INDEX_op_brcond_i32:
1071
case INDEX_op_brcond_i64:
1072
return C_O0_I2(rz, rJ);
1073
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1074
index XXXXXXX..XXXXXXX 100644
1075
--- a/tcg/tci/tcg-target.c.inc
1076
+++ b/tcg/tci/tcg-target.c.inc
1077
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1078
case INDEX_op_st_i64:
1079
return C_O0_I2(r, r);
1080
1081
- case INDEX_op_setcond_i32:
1082
- case INDEX_op_setcond_i64:
1083
- case INDEX_op_negsetcond_i32:
1084
- case INDEX_op_negsetcond_i64:
1085
case INDEX_op_deposit_i32:
1086
case INDEX_op_deposit_i64:
1087
return C_O1_I2(r, r, r);
1088
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
1089
.out_rr = tgen_not,
1090
};
1091
1092
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1093
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
1094
+{
1095
+ TCGOpcode opc = (type == TCG_TYPE_I32
1096
+ ? INDEX_op_setcond_i32
1097
+ : INDEX_op_setcond_i64);
1098
+ tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
1099
+}
1100
+
1101
+static const TCGOutOpSetcond outop_setcond = {
1102
+ .base.static_constraint = C_O1_I2(r, r, r),
1103
+ .out_rrr = tgen_setcond,
1104
+};
1105
+
1106
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
1107
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
1108
+{
1109
+ tgen_setcond(s, type, cond, dest, arg1, arg2);
1110
+ tgen_neg(s, type, dest, dest);
1111
+}
1112
+
1113
+static const TCGOutOpSetcond outop_negsetcond = {
1114
+ .base.static_constraint = C_O1_I2(r, r, r),
1115
+ .out_rrr = tgen_negsetcond,
1116
+};
1117
+
1118
1119
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1120
const TCGArg args[TCG_MAX_OP_ARGS],
1121
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1122
tcg_out_op_l(s, opc, arg_label(args[0]));
1123
break;
1124
1125
- CASE_32_64(setcond)
1126
- tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
1127
- break;
1128
-
1129
CASE_32_64(movcond)
1130
case INDEX_op_setcond2_i32:
1131
tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
1132
args[3], args[4], args[5]);
1133
break;
1134
1135
- case INDEX_op_negsetcond_i32:
1136
- tcg_out_op_rrrc(s, INDEX_op_setcond_i32,
1137
- args[0], args[1], args[2], args[3]);
1138
- tcg_out_op_rr(s, INDEX_op_neg, args[0], args[0]);
1139
- break;
1140
- case INDEX_op_negsetcond_i64:
1141
- tcg_out_op_rrrc(s, INDEX_op_setcond_i64,
1142
- args[0], args[1], args[2], args[3]);
1143
- tcg_out_op_rr(s, INDEX_op_neg, args[0], args[0]);
1144
- break;
1145
-
1146
CASE_32_64(ld8u)
1147
CASE_32_64(ld8s)
1148
CASE_32_64(ld16u)
1149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1150
break;
1151
1152
CASE_32_64(brcond)
1153
- tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
1154
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
1155
- TCG_REG_TMP, args[0], args[1], args[2]);
1156
+ tgen_setcond(s, type, args[2], TCG_REG_TMP, args[0], args[1]);
1157
tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
1158
break;
1159
1160
--
1161
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 6 ++----
5
target/sh4/translate.c | 6 +++---
6
tcg/optimize.c | 32 ++++++++------------------------
7
tcg/tcg-op.c | 8 ++++----
8
tcg/tcg.c | 30 ++++++++++--------------------
9
tcg/tci.c | 14 +++++++-------
10
docs/devel/tcg-ops.rst | 4 ++--
11
tcg/tci/tcg-target-opc.h.inc | 1 +
12
tcg/tci/tcg-target.c.inc | 4 ++--
13
9 files changed, 39 insertions(+), 66 deletions(-)
14
1
15
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg-opc.h
18
+++ b/include/tcg/tcg-opc.h
19
@@ -XXX,XX +XXX,XX @@ DEF(mulu2, 2, 2, 0, TCG_OPF_INT)
20
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
21
DEF(nand, 1, 2, 0, TCG_OPF_INT)
22
DEF(neg, 1, 1, 0, TCG_OPF_INT)
23
+DEF(negsetcond, 1, 2, 1, TCG_OPF_INT)
24
DEF(nor, 1, 2, 0, TCG_OPF_INT)
25
DEF(not, 1, 1, 0, TCG_OPF_INT)
26
DEF(or, 1, 2, 0, TCG_OPF_INT)
27
@@ -XXX,XX +XXX,XX @@ DEF(remu, 1, 2, 0, TCG_OPF_INT)
28
DEF(rotl, 1, 2, 0, TCG_OPF_INT)
29
DEF(rotr, 1, 2, 0, TCG_OPF_INT)
30
DEF(sar, 1, 2, 0, TCG_OPF_INT)
31
+DEF(setcond, 1, 2, 1, TCG_OPF_INT)
32
DEF(shl, 1, 2, 0, TCG_OPF_INT)
33
DEF(shr, 1, 2, 0, TCG_OPF_INT)
34
DEF(sub, 1, 2, 0, TCG_OPF_INT)
35
DEF(xor, 1, 2, 0, TCG_OPF_INT)
36
37
-DEF(setcond_i32, 1, 2, 1, 0)
38
-DEF(negsetcond_i32, 1, 2, 1, 0)
39
DEF(movcond_i32, 1, 4, 1, 0)
40
/* load/store */
41
DEF(ld8u_i32, 1, 1, 1, 0)
42
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
43
DEF(bswap16_i32, 1, 1, 1, 0)
44
DEF(bswap32_i32, 1, 1, 1, 0)
45
46
-DEF(setcond_i64, 1, 2, 1, 0)
47
-DEF(negsetcond_i64, 1, 2, 1, 0)
48
DEF(movcond_i64, 1, 4, 1, 0)
49
/* load/store */
50
DEF(ld8u_i64, 1, 1, 1, 0)
51
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/sh4/translate.c
54
+++ b/target/sh4/translate.c
55
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
56
if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
57
goto fail;
58
}
59
- op_opc = INDEX_op_setcond_i32; /* placeholder */
60
+ op_opc = INDEX_op_setcond; /* placeholder */
61
op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
62
op_arg = REG(op_src);
63
64
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
65
if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
66
goto fail;
67
}
68
- op_opc = INDEX_op_setcond_i32;
69
+ op_opc = INDEX_op_setcond;
70
op_arg = tcg_constant_i32(0);
71
72
NEXT_INSN;
73
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
74
}
75
break;
76
77
- case INDEX_op_setcond_i32:
78
+ case INDEX_op_setcond:
79
if (st_src == ld_dst) {
80
goto fail;
81
}
82
diff --git a/tcg/optimize.c b/tcg/optimize.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/optimize.c
85
+++ b/tcg/optimize.c
86
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
87
if (ti_is_const(tt) && ti_is_const(ft)) {
88
uint64_t tv = ti_const_val(tt);
89
uint64_t fv = ti_const_val(ft);
90
- TCGOpcode opc, negopc;
91
TCGCond cond = op->args[5];
92
93
- switch (ctx->type) {
94
- case TCG_TYPE_I32:
95
- opc = INDEX_op_setcond_i32;
96
- negopc = INDEX_op_negsetcond_i32;
97
- tv = (int32_t)tv;
98
- fv = (int32_t)fv;
99
- break;
100
- case TCG_TYPE_I64:
101
- opc = INDEX_op_setcond_i64;
102
- negopc = INDEX_op_negsetcond_i64;
103
- break;
104
- default:
105
- g_assert_not_reached();
106
- }
107
-
108
if (tv == 1 && fv == 0) {
109
- op->opc = opc;
110
+ op->opc = INDEX_op_setcond;
111
op->args[3] = cond;
112
} else if (fv == 1 && tv == 0) {
113
- op->opc = opc;
114
+ op->opc = INDEX_op_setcond;
115
op->args[3] = tcg_invert_cond(cond);
116
} else if (tv == -1 && fv == 0) {
117
- op->opc = negopc;
118
+ op->opc = INDEX_op_negsetcond;
119
op->args[3] = cond;
120
} else if (fv == -1 && tv == 0) {
121
- op->opc = negopc;
122
+ op->opc = INDEX_op_negsetcond;
123
op->args[3] = tcg_invert_cond(cond);
124
}
125
}
126
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
127
do_setcond_low:
128
op->args[2] = op->args[3];
129
op->args[3] = cond;
130
- op->opc = INDEX_op_setcond_i32;
131
+ op->opc = INDEX_op_setcond;
132
return fold_setcond(ctx, op);
133
134
do_setcond_high:
135
op->args[1] = op->args[2];
136
op->args[2] = op->args[4];
137
op->args[3] = cond;
138
- op->opc = INDEX_op_setcond_i32;
139
+ op->opc = INDEX_op_setcond;
140
return fold_setcond(ctx, op);
141
}
142
143
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
144
case INDEX_op_shr:
145
done = fold_shift(&ctx, op);
146
break;
147
- CASE_OP_32_64(setcond):
148
+ case INDEX_op_setcond:
149
done = fold_setcond(&ctx, op);
150
break;
151
- CASE_OP_32_64(negsetcond):
152
+ case INDEX_op_negsetcond:
153
done = fold_negsetcond(&ctx, op);
154
break;
155
case INDEX_op_setcond2_i32:
156
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/tcg-op.c
159
+++ b/tcg/tcg-op.c
160
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
161
} else if (cond == TCG_COND_NEVER) {
162
tcg_gen_movi_i32(ret, 0);
163
} else {
164
- tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond);
165
+ tcg_gen_op4i_i32(INDEX_op_setcond, ret, arg1, arg2, cond);
166
}
167
}
168
169
@@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
170
} else if (cond == TCG_COND_NEVER) {
171
tcg_gen_movi_i32(ret, 0);
172
} else {
173
- tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond);
174
+ tcg_gen_op4i_i32(INDEX_op_negsetcond, ret, arg1, arg2, cond);
175
}
176
}
177
178
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
179
TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
180
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
181
} else {
182
- tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond);
183
+ tcg_gen_op4i_i64(INDEX_op_setcond, ret, arg1, arg2, cond);
184
}
185
}
186
}
187
@@ -XXX,XX +XXX,XX @@ void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
188
} else if (cond == TCG_COND_NEVER) {
189
tcg_gen_movi_i64(ret, 0);
190
} else if (TCG_TARGET_REG_BITS == 64) {
191
- tcg_gen_op4i_i64(INDEX_op_negsetcond_i64, ret, arg1, arg2, cond);
192
+ tcg_gen_op4i_i64(INDEX_op_negsetcond, ret, arg1, arg2, cond);
193
} else {
194
tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
195
TCGV_LOW(arg1), TCGV_HIGH(arg1),
196
diff --git a/tcg/tcg.c b/tcg/tcg.c
197
index XXXXXXX..XXXXXXX 100644
198
--- a/tcg/tcg.c
199
+++ b/tcg/tcg.c
200
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
201
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
202
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
203
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
204
- OUTOP(INDEX_op_negsetcond_i32, TCGOutOpSetcond, outop_negsetcond),
205
- OUTOP(INDEX_op_negsetcond_i64, TCGOutOpSetcond, outop_negsetcond),
206
+ OUTOP(INDEX_op_negsetcond, TCGOutOpSetcond, outop_negsetcond),
207
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
208
OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
209
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
210
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
211
OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
212
OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
213
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
214
- OUTOP(INDEX_op_setcond_i32, TCGOutOpSetcond, outop_setcond),
215
- OUTOP(INDEX_op_setcond_i64, TCGOutOpSetcond, outop_setcond),
216
+ OUTOP(INDEX_op_setcond, TCGOutOpSetcond, outop_setcond),
217
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
218
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
219
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
220
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
221
case INDEX_op_add:
222
case INDEX_op_and:
223
case INDEX_op_mov:
224
+ case INDEX_op_negsetcond:
225
case INDEX_op_or:
226
+ case INDEX_op_setcond:
227
case INDEX_op_xor:
228
return has_type;
229
230
- case INDEX_op_setcond_i32:
231
- case INDEX_op_negsetcond_i32:
232
case INDEX_op_brcond_i32:
233
case INDEX_op_movcond_i32:
234
case INDEX_op_ld8u_i32:
235
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
236
case INDEX_op_setcond2_i32:
237
return TCG_TARGET_REG_BITS == 32;
238
239
- case INDEX_op_setcond_i64:
240
- case INDEX_op_negsetcond_i64:
241
case INDEX_op_brcond_i64:
242
case INDEX_op_movcond_i64:
243
case INDEX_op_ld8u_i64:
244
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
245
}
246
switch (c) {
247
case INDEX_op_brcond_i32:
248
- case INDEX_op_setcond_i32:
249
- case INDEX_op_negsetcond_i32:
250
+ case INDEX_op_setcond:
251
+ case INDEX_op_negsetcond:
252
case INDEX_op_movcond_i32:
253
case INDEX_op_brcond2_i32:
254
case INDEX_op_setcond2_i32:
255
case INDEX_op_brcond_i64:
256
- case INDEX_op_setcond_i64:
257
- case INDEX_op_negsetcond_i64:
258
case INDEX_op_movcond_i64:
259
case INDEX_op_cmp_vec:
260
case INDEX_op_cmpsel_vec:
261
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
262
case INDEX_op_brcond_i64:
263
op_cond = op->args[2];
264
break;
265
- case INDEX_op_setcond_i32:
266
- case INDEX_op_setcond_i64:
267
- case INDEX_op_negsetcond_i32:
268
- case INDEX_op_negsetcond_i64:
269
+ case INDEX_op_setcond:
270
+ case INDEX_op_negsetcond:
271
case INDEX_op_cmp_vec:
272
op_cond = op->args[3];
273
break;
274
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
275
}
276
break;
277
278
- case INDEX_op_setcond_i32:
279
- case INDEX_op_setcond_i64:
280
- case INDEX_op_negsetcond_i32:
281
- case INDEX_op_negsetcond_i64:
282
+ case INDEX_op_setcond:
283
+ case INDEX_op_negsetcond:
284
{
285
const TCGOutOpSetcond *out =
286
container_of(all_outop[op->opc], TCGOutOpSetcond, base);
287
diff --git a/tcg/tci.c b/tcg/tci.c
288
index XXXXXXX..XXXXXXX 100644
289
--- a/tcg/tci.c
290
+++ b/tcg/tci.c
291
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
292
tci_args_l(insn, tb_ptr, &ptr);
293
tb_ptr = ptr;
294
continue;
295
- case INDEX_op_setcond_i32:
296
- tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
297
- regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
298
- break;
299
case INDEX_op_movcond_i32:
300
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
301
tmp32 = tci_compare32(regs[r1], regs[r2], condition);
302
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
303
regs[r0] = tci_compare64(T1, T2, condition);
304
break;
305
#elif TCG_TARGET_REG_BITS == 64
306
- case INDEX_op_setcond_i64:
307
+ case INDEX_op_setcond:
308
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
309
regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
310
break;
311
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
312
tmp32 = regs[r1];
313
regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
314
break;
315
+ case INDEX_op_tci_setcond32:
316
+ tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
317
+ regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
318
+ break;
319
320
/* Shift/rotate operations. */
321
322
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
323
op_name, str_r(r0), ptr);
324
break;
325
326
- case INDEX_op_setcond_i32:
327
- case INDEX_op_setcond_i64:
328
+ case INDEX_op_setcond:
329
+ case INDEX_op_tci_setcond32:
330
tci_args_rrrc(insn, &r0, &r1, &r2, &c);
331
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
332
op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
333
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
334
index XXXXXXX..XXXXXXX 100644
335
--- a/docs/devel/tcg-ops.rst
336
+++ b/docs/devel/tcg-ops.rst
337
@@ -XXX,XX +XXX,XX @@ Conditional moves
338
339
.. list-table::
340
341
- * - setcond_i32/i64 *dest*, *t1*, *t2*, *cond*
342
+ * - setcond *dest*, *t1*, *t2*, *cond*
343
344
- | *dest* = (*t1* *cond* *t2*)
345
|
346
| Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
347
348
- * - negsetcond_i32/i64 *dest*, *t1*, *t2*, *cond*
349
+ * - negsetcond *dest*, *t1*, *t2*, *cond*
350
351
- | *dest* = -(*t1* *cond* *t2*)
352
|
353
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
354
index XXXXXXX..XXXXXXX 100644
355
--- a/tcg/tci/tcg-target-opc.h.inc
356
+++ b/tcg/tci/tcg-target-opc.h.inc
357
@@ -XXX,XX +XXX,XX @@ DEF(tci_rems32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
358
DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
359
DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
360
DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
361
+DEF(tci_setcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
362
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
363
index XXXXXXX..XXXXXXX 100644
364
--- a/tcg/tci/tcg-target.c.inc
365
+++ b/tcg/tci/tcg-target.c.inc
366
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
367
TCGReg dest, TCGReg arg1, TCGReg arg2)
368
{
369
TCGOpcode opc = (type == TCG_TYPE_I32
370
- ? INDEX_op_setcond_i32
371
- : INDEX_op_setcond_i64);
372
+ ? INDEX_op_tci_setcond32
373
+ : INDEX_op_setcond);
374
tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
375
}
376
377
--
378
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/loongarch64/tcg-target-con-set.h | 2 +-
5
tcg/mips/tcg-target-con-set.h | 4 +--
6
tcg/riscv/tcg-target-con-set.h | 2 +-
7
tcg/sparc64/tcg-target-con-set.h | 2 +-
8
tcg/tcg.c | 26 +++++++++++++++++++
9
tcg/tci.c | 9 ++-----
10
tcg/aarch64/tcg-target.c.inc | 39 +++++++++++++++-------------
11
tcg/arm/tcg-target.c.inc | 27 ++++++++++++++-----
12
tcg/i386/tcg-target.c.inc | 28 ++++++++++++++------
13
tcg/loongarch64/tcg-target.c.inc | 18 +++++--------
14
tcg/mips/tcg-target.c.inc | 20 +++++++-------
15
tcg/ppc/tcg-target.c.inc | 31 +++++++++++-----------
16
tcg/riscv/tcg-target.c.inc | 18 +++++--------
17
tcg/s390x/tcg-target.c.inc | 31 ++++++++++++----------
18
tcg/sparc64/tcg-target.c.inc | 38 ++++++++++++++++++++-------
19
tcg/tci/tcg-target.c.inc | 20 +++++++-------
20
16 files changed, 190 insertions(+), 125 deletions(-)
21
1
22
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/loongarch64/tcg-target-con-set.h
25
+++ b/tcg/loongarch64/tcg-target-con-set.h
26
@@ -XXX,XX +XXX,XX @@
27
*/
28
C_O0_I1(r)
29
C_O0_I2(rz, r)
30
-C_O0_I2(rz, rz)
31
+C_O0_I2(r, rz)
32
C_O0_I2(w, r)
33
C_O0_I3(r, r, r)
34
C_O1_I1(r, r)
35
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/mips/tcg-target-con-set.h
38
+++ b/tcg/mips/tcg-target-con-set.h
39
@@ -XXX,XX +XXX,XX @@
40
* tcg-target-con-str.h; the constraint combination is inclusive or.
41
*/
42
C_O0_I1(r)
43
+C_O0_I2(r, rz)
44
C_O0_I2(rz, r)
45
-C_O0_I2(rz, rz)
46
-C_O0_I3(rz, r, r)
47
C_O0_I3(rz, rz, r)
48
C_O0_I4(rz, rz, rz, rz)
49
-C_O0_I4(rz, rz, r, r)
50
C_O1_I1(r, r)
51
C_O1_I2(r, 0, rz)
52
C_O1_I2(r, r, r)
53
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/riscv/tcg-target-con-set.h
56
+++ b/tcg/riscv/tcg-target-con-set.h
57
@@ -XXX,XX +XXX,XX @@
58
*/
59
C_O0_I1(r)
60
C_O0_I2(rz, r)
61
-C_O0_I2(rz, rz)
62
+C_O0_I2(r, rz)
63
C_O1_I1(r, r)
64
C_O1_I2(r, r, r)
65
C_O1_I2(r, r, ri)
66
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/sparc64/tcg-target-con-set.h
69
+++ b/tcg/sparc64/tcg-target-con-set.h
70
@@ -XXX,XX +XXX,XX @@
71
*/
72
C_O0_I1(r)
73
C_O0_I2(rz, r)
74
-C_O0_I2(rz, rJ)
75
+C_O0_I2(r, rJ)
76
C_O1_I1(r, r)
77
C_O1_I2(r, r, r)
78
C_O1_I2(r, r, rJ)
79
diff --git a/tcg/tcg.c b/tcg/tcg.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/tcg/tcg.c
82
+++ b/tcg/tcg.c
83
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBinary {
84
TCGReg a0, TCGReg a1, tcg_target_long a2);
85
} TCGOutOpBinary;
86
87
+typedef struct TCGOutOpBrcond {
88
+ TCGOutOp base;
89
+ void (*out_rr)(TCGContext *s, TCGType type, TCGCond cond,
90
+ TCGReg a1, TCGReg a2, TCGLabel *label);
91
+ void (*out_ri)(TCGContext *s, TCGType type, TCGCond cond,
92
+ TCGReg a1, tcg_target_long a2, TCGLabel *label);
93
+} TCGOutOpBrcond;
94
+
95
typedef struct TCGOutOpDivRem {
96
TCGOutOp base;
97
void (*out_rr01r)(TCGContext *s, TCGType type,
98
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
99
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
100
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
101
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
102
+ OUTOP(INDEX_op_brcond_i32, TCGOutOpBrcond, outop_brcond),
103
+ OUTOP(INDEX_op_brcond_i64, TCGOutOpBrcond, outop_brcond),
104
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
105
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
106
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
107
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
108
}
109
break;
110
111
+ case INDEX_op_brcond_i32:
112
+ case INDEX_op_brcond_i64:
113
+ {
114
+ const TCGOutOpBrcond *out = &outop_brcond;
115
+ TCGCond cond = new_args[2];
116
+ TCGLabel *label = arg_label(new_args[3]);
117
+
118
+ tcg_debug_assert(!const_args[0]);
119
+ if (const_args[1]) {
120
+ out->out_ri(s, type, cond, new_args[0], new_args[1], label);
121
+ } else {
122
+ out->out_rr(s, type, cond, new_args[0], new_args[1], label);
123
+ }
124
+ }
125
+ break;
126
+
127
case INDEX_op_setcond:
128
case INDEX_op_negsetcond:
129
{
130
diff --git a/tcg/tci.c b/tcg/tci.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/tci.c
133
+++ b/tcg/tci.c
134
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
135
regs[r0] = sextract32(regs[r1], pos, len);
136
break;
137
case INDEX_op_brcond_i32:
138
+ case INDEX_op_brcond_i64:
139
tci_args_rl(insn, tb_ptr, &r0, &ptr);
140
- if ((uint32_t)regs[r0]) {
141
+ if (regs[r0]) {
142
tb_ptr = ptr;
143
}
144
break;
145
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
146
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
147
regs[r0] = sextract64(regs[r1], pos, len);
148
break;
149
- case INDEX_op_brcond_i64:
150
- tci_args_rl(insn, tb_ptr, &r0, &ptr);
151
- if (regs[r0]) {
152
- tb_ptr = ptr;
153
- }
154
- break;
155
case INDEX_op_ext_i32_i64:
156
tci_args_rr(insn, &r0, &r1);
157
regs[r0] = (int32_t)regs[r1];
158
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tcg/aarch64/tcg-target.c.inc
161
+++ b/tcg/aarch64/tcg-target.c.inc
162
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
163
}
164
}
165
166
-static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
167
- TCGArg b, bool b_const, TCGLabel *l)
168
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
169
+ TCGReg a, TCGReg b, TCGLabel *l)
170
+{
171
+ tgen_cmp(s, type, c, a, b);
172
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
173
+ tcg_out_insn(s, 3202, B_C, c, 0);
174
+}
175
+
176
+static void tgen_brcondi(TCGContext *s, TCGType ext, TCGCond c,
177
+ TCGReg a, tcg_target_long b, TCGLabel *l)
178
{
179
int tbit = -1;
180
bool need_cmp = true;
181
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
182
case TCG_COND_EQ:
183
case TCG_COND_NE:
184
/* cmp xN,0; b.ne L -> cbnz xN,L */
185
- if (b_const && b == 0) {
186
+ if (b == 0) {
187
need_cmp = false;
188
}
189
break;
190
case TCG_COND_LT:
191
case TCG_COND_GE:
192
/* cmp xN,0; b.mi L -> tbnz xN,63,L */
193
- if (b_const && b == 0) {
194
+ if (b == 0) {
195
c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ);
196
tbit = ext ? 63 : 31;
197
need_cmp = false;
198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
199
case TCG_COND_TSTEQ:
200
case TCG_COND_TSTNE:
201
/* tst xN,0xffffffff; b.ne L -> cbnz wN,L */
202
- if (b_const && b == UINT32_MAX) {
203
+ if (b == UINT32_MAX) {
204
c = tcg_tst_eqne_cond(c);
205
ext = TCG_TYPE_I32;
206
need_cmp = false;
207
break;
208
}
209
/* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
210
- if (b_const && is_power_of_2(b)) {
211
+ if (is_power_of_2(b)) {
212
tbit = ctz64(b);
213
need_cmp = false;
214
}
215
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
216
}
217
218
if (need_cmp) {
219
- tcg_out_cmp(s, ext, c, a, b, b_const);
220
+ tgen_cmpi(s, ext, c, a, b);
221
tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
222
tcg_out_insn(s, 3202, B_C, c, 0);
223
return;
224
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
225
}
226
}
227
228
+static const TCGOutOpBrcond outop_brcond = {
229
+ .base.static_constraint = C_O0_I2(r, rC),
230
+ .out_rr = tgen_brcond,
231
+ .out_ri = tgen_brcondi,
232
+};
233
+
234
static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
235
TCGReg rd, TCGReg rn)
236
{
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
238
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
239
break;
240
241
- case INDEX_op_brcond_i32:
242
- a1 = (int32_t)a1;
243
- /* FALLTHRU */
244
- case INDEX_op_brcond_i64:
245
- tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
246
- break;
247
-
248
case INDEX_op_movcond_i32:
249
a2 = (int32_t)a2;
250
/* FALLTHRU */
251
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
252
case INDEX_op_st_i64:
253
return C_O0_I2(rz, r);
254
255
- case INDEX_op_brcond_i32:
256
- case INDEX_op_brcond_i64:
257
- return C_O0_I2(r, rC);
258
-
259
case INDEX_op_movcond_i32:
260
case INDEX_op_movcond_i64:
261
return C_O1_I4(r, r, rC, rz, rz);
262
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/tcg/arm/tcg-target.c.inc
265
+++ b/tcg/arm/tcg-target.c.inc
266
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
267
.out_rr = tgen_not,
268
};
269
270
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
271
+ TCGReg a0, TCGReg a1, TCGLabel *l)
272
+{
273
+ cond = tgen_cmp(s, cond, a0, a1);
274
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
275
+}
276
+
277
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
278
+ TCGReg a0, tcg_target_long a1, TCGLabel *l)
279
+{
280
+ cond = tgen_cmpi(s, cond, a0, a1);
281
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
282
+}
283
+
284
+static const TCGOutOpBrcond outop_brcond = {
285
+ .base.static_constraint = C_O0_I2(r, rIN),
286
+ .out_rr = tgen_brcond,
287
+ .out_ri = tgen_brcondi,
288
+};
289
+
290
static void finish_setcond(TCGContext *s, TCGCond cond, TCGReg ret, bool neg)
291
{
292
tcg_out_movi32(s, tcg_cond_to_arm_cond[tcg_invert_cond(cond)], ret, 0);
293
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
294
tcg_out_mov_reg(s, COND_AL, args[0], a0);
295
break;
296
297
- case INDEX_op_brcond_i32:
298
- c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
299
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
300
- break;
301
-
302
case INDEX_op_brcond2_i32:
303
c = tcg_out_cmp2(s, args, const_args);
304
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
305
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
306
case INDEX_op_st_i32:
307
return C_O0_I2(r, r);
308
309
- case INDEX_op_brcond_i32:
310
- return C_O0_I2(r, rIN);
311
case INDEX_op_deposit_i32:
312
return C_O1_I2(r, 0, rZ);
313
case INDEX_op_extract2_i32:
314
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
315
index XXXXXXX..XXXXXXX 100644
316
--- a/tcg/i386/tcg-target.c.inc
317
+++ b/tcg/i386/tcg-target.c.inc
318
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
319
tcg_out_jxx(s, jcc, label, small);
320
}
321
322
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
323
+ TCGReg arg1, TCGReg arg2, TCGLabel *label)
324
+{
325
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
326
+ tcg_out_brcond(s, rexw, cond, arg1, arg2, false, label, false);
327
+}
328
+
329
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
330
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *label)
331
+{
332
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
333
+ tcg_out_brcond(s, rexw, cond, arg1, arg2, true, label, false);
334
+}
335
+
336
+static const TCGOutOpBrcond outop_brcond = {
337
+ .base.static_constraint = C_O0_I2(r, reT),
338
+ .out_rr = tgen_brcond,
339
+ .out_ri = tgen_brcondi,
340
+};
341
+
342
#if TCG_TARGET_REG_BITS == 32
343
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
344
const int *const_args, bool small)
345
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
346
}
347
break;
348
349
- OP_32_64(brcond):
350
- tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
351
- arg_label(args[3]), 0);
352
- break;
353
OP_32_64(movcond):
354
tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
355
break;
356
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
357
case INDEX_op_st_i64:
358
return C_O0_I2(re, r);
359
360
- case INDEX_op_brcond_i32:
361
- case INDEX_op_brcond_i64:
362
- return C_O0_I2(r, reT);
363
-
364
case INDEX_op_bswap16_i32:
365
case INDEX_op_bswap16_i64:
366
case INDEX_op_bswap32_i32:
367
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
368
index XXXXXXX..XXXXXXX 100644
369
--- a/tcg/loongarch64/tcg-target.c.inc
370
+++ b/tcg/loongarch64/tcg-target.c.inc
371
@@ -XXX,XX +XXX,XX @@ static const struct {
372
[TCG_COND_GTU] = { OPC_BGTU, false }
373
};
374
375
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
376
- TCGReg arg2, TCGLabel *l)
377
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
378
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
379
{
380
LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
381
382
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
383
tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
384
}
385
386
+static const TCGOutOpBrcond outop_brcond = {
387
+ .base.static_constraint = C_O0_I2(r, rz),
388
+ .out_rr = tgen_brcond,
389
+};
390
+
391
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
392
{
393
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
394
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
395
tcg_out_opc_b(s, 0);
396
break;
397
398
- case INDEX_op_brcond_i32:
399
- case INDEX_op_brcond_i64:
400
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
401
- break;
402
-
403
case INDEX_op_extrh_i64_i32:
404
tcg_out_opc_srai_d(s, a0, a1, 32);
405
break;
406
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
407
case INDEX_op_qemu_st_i128:
408
return C_O0_I3(r, r, r);
409
410
- case INDEX_op_brcond_i32:
411
- case INDEX_op_brcond_i64:
412
- return C_O0_I2(rz, rz);
413
-
414
case INDEX_op_extu_i32_i64:
415
case INDEX_op_extrl_i64_i32:
416
case INDEX_op_extrh_i64_i32:
417
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
418
index XXXXXXX..XXXXXXX 100644
419
--- a/tcg/mips/tcg-target.c.inc
420
+++ b/tcg/mips/tcg-target.c.inc
421
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
422
.out_rrr = tgen_negsetcond,
423
};
424
425
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
426
- TCGReg arg2, TCGLabel *l)
427
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
428
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
429
{
430
static const MIPSInsn b_zero[16] = {
431
[TCG_COND_LT] = OPC_BLTZ,
432
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
433
tcg_out_nop(s);
434
}
435
436
+static const TCGOutOpBrcond outop_brcond = {
437
+ .base.static_constraint = C_O0_I2(r, rz),
438
+ .out_rr = tgen_brcond,
439
+};
440
+
441
static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
442
TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
443
{
444
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
445
}
446
break;
447
case INDEX_op_br:
448
- tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
449
- arg_label(a0));
450
+ tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ,
451
+ TCG_REG_ZERO, TCG_REG_ZERO, arg_label(a0));
452
break;
453
454
case INDEX_op_ld8u_i32:
455
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
456
}
457
break;
458
459
- case INDEX_op_brcond_i32:
460
- case INDEX_op_brcond_i64:
461
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
462
- break;
463
case INDEX_op_brcond2_i32:
464
tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
465
break;
466
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
467
case INDEX_op_deposit_i32:
468
case INDEX_op_deposit_i64:
469
return C_O1_I2(r, 0, rz);
470
- case INDEX_op_brcond_i32:
471
- case INDEX_op_brcond_i64:
472
- return C_O0_I2(rz, rz);
473
case INDEX_op_movcond_i32:
474
case INDEX_op_movcond_i64:
475
return (use_mips32r6_instructions
476
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
477
index XXXXXXX..XXXXXXX 100644
478
--- a/tcg/ppc/tcg-target.c.inc
479
+++ b/tcg/ppc/tcg-target.c.inc
480
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
481
tcg_out_bc(s, cond, bd);
482
}
483
484
-static void tcg_out_brcond(TCGContext *s, TCGCond cond,
485
- TCGArg arg1, TCGArg arg2, int const_arg2,
486
- TCGLabel *l, TCGType type)
487
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
488
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
489
{
490
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
491
+ tcg_out_cmp(s, cond, arg1, arg2, false, 0, type);
492
tcg_out_bc_lab(s, cond, l);
493
}
494
495
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
496
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
497
+{
498
+ tcg_out_cmp(s, cond, arg1, arg2, true, 0, type);
499
+ tcg_out_bc_lab(s, cond, l);
500
+}
501
+
502
+static const TCGOutOpBrcond outop_brcond = {
503
+ .base.static_constraint = C_O0_I2(r, rC),
504
+ .out_rr = tgen_brcond,
505
+ .out_ri = tgen_brcondi,
506
+};
507
+
508
static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
509
TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
510
TCGArg v2, bool const_c2)
511
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
512
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
513
break;
514
515
- case INDEX_op_brcond_i32:
516
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
517
- arg_label(args[3]), TCG_TYPE_I32);
518
- break;
519
- case INDEX_op_brcond_i64:
520
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
521
- arg_label(args[3]), TCG_TYPE_I64);
522
- break;
523
case INDEX_op_brcond2_i32:
524
tcg_out_brcond2(s, args, const_args);
525
break;
526
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
527
case INDEX_op_st_i64:
528
return C_O0_I2(r, r);
529
530
- case INDEX_op_brcond_i32:
531
- case INDEX_op_brcond_i64:
532
- return C_O0_I2(r, rC);
533
case INDEX_op_movcond_i32:
534
case INDEX_op_movcond_i64:
535
return C_O1_I4(r, r, rC, rZ, rZ);
536
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
537
index XXXXXXX..XXXXXXX 100644
538
--- a/tcg/riscv/tcg-target.c.inc
539
+++ b/tcg/riscv/tcg-target.c.inc
540
@@ -XXX,XX +XXX,XX @@ static const struct {
541
[TCG_COND_GTU] = { OPC_BLTU, true }
542
};
543
544
-static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
545
- TCGReg arg2, TCGLabel *l)
546
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
547
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
548
{
549
RISCVInsn op = tcg_brcond_to_riscv[cond].op;
550
551
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
552
tcg_out_opc_branch(s, op, arg1, arg2, 0);
553
}
554
555
+static const TCGOutOpBrcond outop_brcond = {
556
+ .base.static_constraint = C_O0_I2(r, rz),
557
+ .out_rr = tgen_brcond,
558
+};
559
+
560
#define SETCOND_INV TCG_TARGET_NB_REGS
561
#define SETCOND_NEZ (SETCOND_INV << 1)
562
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
563
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
564
const_args[4], const_args[5], true, false);
565
break;
566
567
- case INDEX_op_brcond_i32:
568
- case INDEX_op_brcond_i64:
569
- tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
570
- break;
571
-
572
case INDEX_op_movcond_i32:
573
case INDEX_op_movcond_i64:
574
tcg_out_movcond(s, args[5], a0, a1, a2, c2,
575
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
576
case INDEX_op_st_i64:
577
return C_O0_I2(rz, r);
578
579
- case INDEX_op_brcond_i32:
580
- case INDEX_op_brcond_i64:
581
- return C_O0_I2(rz, rz);
582
-
583
case INDEX_op_movcond_i32:
584
case INDEX_op_movcond_i64:
585
return C_O1_I4(r, r, rI, rM, rM);
586
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
587
index XXXXXXX..XXXXXXX 100644
588
--- a/tcg/s390x/tcg-target.c.inc
589
+++ b/tcg/s390x/tcg-target.c.inc
590
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
591
tgen_branch(s, cc, l);
592
}
593
594
+static void tgen_brcondr(TCGContext *s, TCGType type, TCGCond c,
595
+ TCGReg a0, TCGReg a1, TCGLabel *l)
596
+{
597
+ tgen_brcond(s, type, c, a0, a1, false, l);
598
+}
599
+
600
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond c,
601
+ TCGReg a0, tcg_target_long a1, TCGLabel *l)
602
+{
603
+ tgen_brcond(s, type, c, a0, a1, true, l);
604
+}
605
+
606
+static const TCGOutOpBrcond outop_brcond = {
607
+ .base.static_constraint = C_O0_I2(r, rC),
608
+ .out_rr = tgen_brcondr,
609
+ .out_ri = tgen_brcondi,
610
+};
611
+
612
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *dest)
613
{
614
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
615
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
616
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
617
break;
618
619
- case INDEX_op_brcond_i32:
620
- tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
621
- args[1], const_args[1], arg_label(args[3]));
622
- break;
623
case INDEX_op_movcond_i32:
624
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
625
args[2], const_args[2], args[3], const_args[3], args[4]);
626
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
627
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
628
break;
629
630
- case INDEX_op_brcond_i64:
631
- tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
632
- args[1], const_args[1], arg_label(args[3]));
633
- break;
634
case INDEX_op_movcond_i64:
635
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
636
args[2], const_args[2], args[3], const_args[3], args[4]);
637
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
638
case INDEX_op_st_i64:
639
return C_O0_I2(r, r);
640
641
- case INDEX_op_brcond_i32:
642
- return C_O0_I2(r, ri);
643
- case INDEX_op_brcond_i64:
644
- return C_O0_I2(r, rC);
645
-
646
case INDEX_op_bswap16_i32:
647
case INDEX_op_bswap16_i64:
648
case INDEX_op_bswap32_i32:
649
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
650
index XXXXXXX..XXXXXXX 100644
651
--- a/tcg/sparc64/tcg-target.c.inc
652
+++ b/tcg/sparc64/tcg-target.c.inc
653
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
654
}
655
}
656
657
+static void tcg_out_brcond(TCGContext *s, TCGType type, TCGCond cond,
658
+ TCGReg arg1, TCGArg arg2, bool const_arg2,
659
+ TCGLabel *l)
660
+{
661
+ if (type == TCG_TYPE_I32) {
662
+ tcg_out_brcond_i32(s, cond, arg1, arg2, const_arg2, l);
663
+ } else {
664
+ tcg_out_brcond_i64(s, cond, arg1, arg2, const_arg2, l);
665
+ }
666
+}
667
+
668
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
669
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
670
+{
671
+ tcg_out_brcond(s, type, cond, arg1, arg2, false, l);
672
+}
673
+
674
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
675
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
676
+{
677
+ tcg_out_brcond(s, type, cond, arg1, arg2, true, l);
678
+}
679
+
680
+static const TCGOutOpBrcond outop_brcond = {
681
+ .base.static_constraint = C_O0_I2(r, rJ),
682
+ .out_rr = tgen_brcond,
683
+ .out_ri = tgen_brcondi,
684
+};
685
+
686
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
687
TCGReg ret, TCGReg c1,
688
TCGArg c2, bool c2const, bool neg)
689
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
690
tcg_out_ldst(s, a0, a1, a2, STW);
691
break;
692
693
- case INDEX_op_brcond_i32:
694
- tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
695
- break;
696
case INDEX_op_movcond_i32:
697
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
698
break;
699
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
700
tcg_out_ldst(s, a0, a1, a2, STX);
701
break;
702
703
- case INDEX_op_brcond_i64:
704
- tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
705
- break;
706
case INDEX_op_movcond_i64:
707
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
708
break;
709
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
710
case INDEX_op_qemu_st_i64:
711
return C_O0_I2(rz, r);
712
713
- case INDEX_op_brcond_i32:
714
- case INDEX_op_brcond_i64:
715
- return C_O0_I2(rz, rJ);
716
case INDEX_op_movcond_i32:
717
case INDEX_op_movcond_i64:
718
return C_O1_I4(r, rz, rJ, rI, 0);
719
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
720
index XXXXXXX..XXXXXXX 100644
721
--- a/tcg/tci/tcg-target.c.inc
722
+++ b/tcg/tci/tcg-target.c.inc
723
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
724
case INDEX_op_deposit_i64:
725
return C_O1_I2(r, r, r);
726
727
- case INDEX_op_brcond_i32:
728
- case INDEX_op_brcond_i64:
729
- return C_O0_I2(r, r);
730
-
731
case INDEX_op_add2_i32:
732
case INDEX_op_add2_i64:
733
case INDEX_op_sub2_i32:
734
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
735
.out_rrr = tgen_negsetcond,
736
};
737
738
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
739
+ TCGReg arg0, TCGReg arg1, TCGLabel *l)
740
+{
741
+ tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
742
+ tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, l);
743
+}
744
+
745
+static const TCGOutOpBrcond outop_brcond = {
746
+ .base.static_constraint = C_O0_I2(r, r),
747
+ .out_rr = tgen_brcond,
748
+};
749
750
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
751
const TCGArg args[TCG_MAX_OP_ARGS],
752
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
753
tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
754
break;
755
756
- CASE_32_64(brcond)
757
- tgen_setcond(s, type, args[2], TCG_REG_TMP, args[0], args[1]);
758
- tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
759
- break;
760
-
761
case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
762
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
763
tcg_out_op_rr(s, opc, args[0], args[1]);
764
--
765
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 4 +---
6
tcg/optimize.c | 6 +++---
7
tcg/tcg-op.c | 4 ++--
8
tcg/tcg.c | 24 ++++++++----------------
9
tcg/tci.c | 6 ++----
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 4 ++--
12
7 files changed, 19 insertions(+), 31 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
19
DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
20
21
DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
22
+DEF(brcond, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | TCG_OPF_INT)
23
24
DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
25
26
@@ -XXX,XX +XXX,XX @@ DEF(extract_i32, 1, 1, 2, 0)
27
DEF(sextract_i32, 1, 1, 2, 0)
28
DEF(extract2_i32, 1, 2, 1, 0)
29
30
-DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
31
-
32
DEF(add2_i32, 2, 4, 0, 0)
33
DEF(sub2_i32, 2, 4, 0, 0)
34
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
35
@@ -XXX,XX +XXX,XX @@ DEF(extu_i32_i64, 1, 1, 0, 0)
36
DEF(extrl_i64_i32, 1, 1, 0, 0)
37
DEF(extrh_i64_i32, 1, 1, 0, 0)
38
39
-DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
40
DEF(bswap16_i64, 1, 1, 1, 0)
41
DEF(bswap32_i64, 1, 1, 1, 0)
42
DEF(bswap64_i64, 1, 1, 1, 0)
43
diff --git a/tcg/optimize.c b/tcg/optimize.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/optimize.c
46
+++ b/tcg/optimize.c
47
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
48
break;
49
50
do_brcond_low:
51
- op->opc = INDEX_op_brcond_i32;
52
+ op->opc = INDEX_op_brcond;
53
op->args[1] = op->args[2];
54
op->args[2] = cond;
55
op->args[3] = label;
56
return fold_brcond(ctx, op);
57
58
do_brcond_high:
59
- op->opc = INDEX_op_brcond_i32;
60
+ op->opc = INDEX_op_brcond;
61
op->args[0] = op->args[1];
62
op->args[1] = op->args[3];
63
op->args[2] = cond;
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
case INDEX_op_andc_vec:
66
done = fold_andc(&ctx, op);
67
break;
68
- CASE_OP_32_64(brcond):
69
+ case INDEX_op_brcond:
70
done = fold_brcond(&ctx, op);
71
break;
72
case INDEX_op_brcond2_i32:
73
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/tcg-op.c
76
+++ b/tcg/tcg-op.c
77
@@ -XXX,XX +XXX,XX @@ void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l)
78
if (cond == TCG_COND_ALWAYS) {
79
tcg_gen_br(l);
80
} else if (cond != TCG_COND_NEVER) {
81
- TCGOp *op = tcg_gen_op4ii_i32(INDEX_op_brcond_i32,
82
+ TCGOp *op = tcg_gen_op4ii_i32(INDEX_op_brcond,
83
arg1, arg2, cond, label_arg(l));
84
add_as_label_use(l, op);
85
}
86
@@ -XXX,XX +XXX,XX @@ void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l)
87
TCGV_HIGH(arg1), TCGV_LOW(arg2),
88
TCGV_HIGH(arg2), cond, label_arg(l));
89
} else {
90
- op = tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond,
91
+ op = tcg_gen_op4ii_i64(INDEX_op_brcond, arg1, arg2, cond,
92
label_arg(l));
93
}
94
add_as_label_use(l, op);
95
diff --git a/tcg/tcg.c b/tcg/tcg.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/tcg.c
98
+++ b/tcg/tcg.c
99
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
100
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
101
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
102
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
103
- OUTOP(INDEX_op_brcond_i32, TCGOutOpBrcond, outop_brcond),
104
- OUTOP(INDEX_op_brcond_i64, TCGOutOpBrcond, outop_brcond),
105
+ OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
106
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
107
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
108
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
109
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
110
111
case INDEX_op_add:
112
case INDEX_op_and:
113
+ case INDEX_op_brcond:
114
case INDEX_op_mov:
115
case INDEX_op_negsetcond:
116
case INDEX_op_or:
117
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
118
case INDEX_op_xor:
119
return has_type;
120
121
- case INDEX_op_brcond_i32:
122
case INDEX_op_movcond_i32:
123
case INDEX_op_ld8u_i32:
124
case INDEX_op_ld8s_i32:
125
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
126
case INDEX_op_setcond2_i32:
127
return TCG_TARGET_REG_BITS == 32;
128
129
- case INDEX_op_brcond_i64:
130
case INDEX_op_movcond_i64:
131
case INDEX_op_ld8u_i64:
132
case INDEX_op_ld8s_i64:
133
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
134
op->args[k++]));
135
}
136
switch (c) {
137
- case INDEX_op_brcond_i32:
138
+ case INDEX_op_brcond:
139
case INDEX_op_setcond:
140
case INDEX_op_negsetcond:
141
case INDEX_op_movcond_i32:
142
case INDEX_op_brcond2_i32:
143
case INDEX_op_setcond2_i32:
144
- case INDEX_op_brcond_i64:
145
case INDEX_op_movcond_i64:
146
case INDEX_op_cmp_vec:
147
case INDEX_op_cmpsel_vec:
148
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
149
switch (c) {
150
case INDEX_op_set_label:
151
case INDEX_op_br:
152
- case INDEX_op_brcond_i32:
153
- case INDEX_op_brcond_i64:
154
+ case INDEX_op_brcond:
155
case INDEX_op_brcond2_i32:
156
col += ne_fprintf(f, "%s$L%d", k ? "," : "",
157
arg_label(op->args[k])->id);
158
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
159
case INDEX_op_br:
160
remove_label_use(op, 0);
161
break;
162
- case INDEX_op_brcond_i32:
163
- case INDEX_op_brcond_i64:
164
+ case INDEX_op_brcond:
165
remove_label_use(op, 3);
166
break;
167
case INDEX_op_brcond2_i32:
168
@@ -XXX,XX +XXX,XX @@ static void move_label_uses(TCGLabel *to, TCGLabel *from)
169
case INDEX_op_br:
170
op->args[0] = label_arg(to);
171
break;
172
- case INDEX_op_brcond_i32:
173
- case INDEX_op_brcond_i64:
174
+ case INDEX_op_brcond:
175
op->args[3] = label_arg(to);
176
break;
177
case INDEX_op_brcond2_i32:
178
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
179
o_allocated_regs = s->reserved_regs;
180
181
switch (op->opc) {
182
- case INDEX_op_brcond_i32:
183
- case INDEX_op_brcond_i64:
184
+ case INDEX_op_brcond:
185
op_cond = op->args[2];
186
break;
187
case INDEX_op_setcond:
188
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
189
}
190
break;
191
192
- case INDEX_op_brcond_i32:
193
- case INDEX_op_brcond_i64:
194
+ case INDEX_op_brcond:
195
{
196
const TCGOutOpBrcond *out = &outop_brcond;
197
TCGCond cond = new_args[2];
198
diff --git a/tcg/tci.c b/tcg/tci.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/tcg/tci.c
201
+++ b/tcg/tci.c
202
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
203
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
204
regs[r0] = sextract32(regs[r1], pos, len);
205
break;
206
- case INDEX_op_brcond_i32:
207
- case INDEX_op_brcond_i64:
208
+ case INDEX_op_brcond:
209
tci_args_rl(insn, tb_ptr, &r0, &ptr);
210
if (regs[r0]) {
211
tb_ptr = ptr;
212
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
213
info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
214
break;
215
216
- case INDEX_op_brcond_i32:
217
- case INDEX_op_brcond_i64:
218
+ case INDEX_op_brcond:
219
tci_args_rl(insn, tb_ptr, &r0, &ptr);
220
info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
221
op_name, str_r(r0), ptr);
222
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
223
index XXXXXXX..XXXXXXX 100644
224
--- a/docs/devel/tcg-ops.rst
225
+++ b/docs/devel/tcg-ops.rst
226
@@ -XXX,XX +XXX,XX @@ Jumps/Labels
227
228
- | Jump to label.
229
230
- * - brcond_i32/i64 *t0*, *t1*, *cond*, *label*
231
+ * - brcond *t0*, *t1*, *cond*, *label*
232
233
- | Conditional jump if *t0* *cond* *t1* is true. *cond* can be:
234
|
235
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/tci/tcg-target.c.inc
238
+++ b/tcg/tci/tcg-target.c.inc
239
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
240
TCGReg arg0, TCGReg arg1, TCGLabel *l)
241
{
242
tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
243
- tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, l);
244
+ tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
245
}
246
247
static const TCGOutOpBrcond outop_brcond = {
248
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
249
case INDEX_op_brcond2_i32:
250
tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
251
args[0], args[1], args[2], args[3], args[4]);
252
- tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
253
+ tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, arg_label(args[5]));
254
break;
255
#endif
256
257
--
258
2.43.0
259
260
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/loongarch64/tcg-target-con-set.h | 2 +-
5
tcg/mips/tcg-target-con-set.h | 3 ++-
6
tcg/s390x/tcg-target-con-set.h | 1 -
7
tcg/sparc64/tcg-target-con-set.h | 2 +-
8
tcg/tcg.c | 23 +++++++++++++++++++++++
9
tcg/tci.c | 12 ++++++------
10
tcg/aarch64/tcg-target.c.inc | 26 +++++++++++++-------------
11
tcg/arm/tcg-target.c.inc | 24 ++++++++++++++----------
12
tcg/i386/tcg-target.c.inc | 23 +++++++++++------------
13
tcg/loongarch64/tcg-target.c.inc | 23 +++++++++--------------
14
tcg/mips/tcg-target.c.inc | 25 ++++++++++++-------------
15
tcg/ppc/tcg-target.c.inc | 24 ++++++++----------------
16
tcg/riscv/tcg-target.c.inc | 26 ++++++++++----------------
17
tcg/s390x/tcg-target.c.inc | 26 ++++++++------------------
18
tcg/sparc64/tcg-target.c.inc | 28 ++++++++++++++++------------
19
tcg/tci/tcg-target-opc.h.inc | 1 +
20
tcg/tci/tcg-target.c.inc | 18 +++++++++++++++---
21
17 files changed, 150 insertions(+), 137 deletions(-)
22
1
23
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/loongarch64/tcg-target-con-set.h
26
+++ b/tcg/loongarch64/tcg-target-con-set.h
27
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, w)
28
C_O1_I2(w, w, wM)
29
C_O1_I2(w, w, wA)
30
C_O1_I3(w, w, w, w)
31
-C_O1_I4(r, rz, rJ, rz, rz)
32
+C_O1_I4(r, r, rJ, rz, rz)
33
C_N2_I1(r, r, r)
34
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tcg/mips/tcg-target-con-set.h
37
+++ b/tcg/mips/tcg-target-con-set.h
38
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK)
39
C_O1_I2(r, r, rJ)
40
C_O1_I2(r, r, rz)
41
C_O1_I2(r, r, rzW)
42
-C_O1_I4(r, rz, rz, rz, 0)
43
+C_O1_I4(r, r, rz, rz, 0)
44
+C_O1_I4(r, r, rz, rz, rz)
45
C_O1_I4(r, rz, rz, rz, rz)
46
C_O2_I1(r, r, r)
47
C_O2_I2(r, r, r, r)
48
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/tcg/s390x/tcg-target-con-set.h
51
+++ b/tcg/s390x/tcg-target-con-set.h
52
@@ -XXX,XX +XXX,XX @@ C_O1_I2(v, v, v)
53
C_O1_I3(v, v, v, v)
54
C_O1_I4(v, v, v, vZ, v)
55
C_O1_I4(v, v, v, vZM, v)
56
-C_O1_I4(r, r, ri, rI, r)
57
C_O1_I4(r, r, rC, rI, r)
58
C_O2_I1(o, m, r)
59
C_O2_I2(o, m, 0, r)
60
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/sparc64/tcg-target-con-set.h
63
+++ b/tcg/sparc64/tcg-target-con-set.h
64
@@ -XXX,XX +XXX,XX @@ C_O0_I2(r, rJ)
65
C_O1_I1(r, r)
66
C_O1_I2(r, r, r)
67
C_O1_I2(r, r, rJ)
68
-C_O1_I4(r, rz, rJ, rI, 0)
69
+C_O1_I4(r, r, rJ, rI, 0)
70
C_O2_I2(r, r, r, r)
71
C_O2_I4(r, r, rz, rz, rJ, rJ)
72
diff --git a/tcg/tcg.c b/tcg/tcg.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/tcg/tcg.c
75
+++ b/tcg/tcg.c
76
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpDivRem {
77
TCGReg a0, TCGReg a1, TCGReg a4);
78
} TCGOutOpDivRem;
79
80
+typedef struct TCGOutOpMovcond {
81
+ TCGOutOp base;
82
+ void (*out)(TCGContext *s, TCGType type, TCGCond cond,
83
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
84
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf);
85
+} TCGOutOpMovcond;
86
+
87
typedef struct TCGOutOpMul2 {
88
TCGOutOp base;
89
void (*out_rrrr)(TCGContext *s, TCGType type,
90
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
91
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
92
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
93
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
94
+ OUTOP(INDEX_op_movcond_i32, TCGOutOpMovcond, outop_movcond),
95
+ OUTOP(INDEX_op_movcond_i64, TCGOutOpMovcond, outop_movcond),
96
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
97
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
98
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
99
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
100
}
101
break;
102
103
+ case INDEX_op_movcond_i32:
104
+ case INDEX_op_movcond_i64:
105
+ {
106
+ const TCGOutOpMovcond *out = &outop_movcond;
107
+ TCGCond cond = new_args[5];
108
+
109
+ tcg_debug_assert(!const_args[1]);
110
+ out->out(s, type, cond, new_args[0],
111
+ new_args[1], new_args[2], const_args[2],
112
+ new_args[3], const_args[3],
113
+ new_args[4], const_args[4]);
114
+ }
115
+ break;
116
+
117
case INDEX_op_setcond:
118
case INDEX_op_negsetcond:
119
{
120
diff --git a/tcg/tci.c b/tcg/tci.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/tcg/tci.c
123
+++ b/tcg/tci.c
124
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
125
tci_args_l(insn, tb_ptr, &ptr);
126
tb_ptr = ptr;
127
continue;
128
- case INDEX_op_movcond_i32:
129
- tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
130
- tmp32 = tci_compare32(regs[r1], regs[r2], condition);
131
- regs[r0] = regs[tmp32 ? r3 : r4];
132
- break;
133
#if TCG_TARGET_REG_BITS == 32
134
case INDEX_op_setcond2_i32:
135
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
136
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
137
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
138
regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
139
break;
140
+ case INDEX_op_tci_movcond32:
141
+ tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
142
+ tmp32 = tci_compare32(regs[r1], regs[r2], condition);
143
+ regs[r0] = regs[tmp32 ? r3 : r4];
144
+ break;
145
146
/* Shift/rotate operations. */
147
148
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
149
op_name, str_r(r0), str_r(r1), pos, len);
150
break;
151
152
- case INDEX_op_movcond_i32:
153
+ case INDEX_op_tci_movcond32:
154
case INDEX_op_movcond_i64:
155
case INDEX_op_setcond2_i32:
156
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
157
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/aarch64/tcg-target.c.inc
160
+++ b/tcg/aarch64/tcg-target.c.inc
161
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
162
.out_rri = tgen_negsetcondi,
163
};
164
165
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
166
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
167
+ TCGArg vt, bool const_vt, TCGArg vf, bool const_vf)
168
+{
169
+ tcg_out_cmp(s, type, cond, c1, c2, const_c2);
170
+ tcg_out_insn(s, 3506, CSEL, type, ret, vt, vf, cond);
171
+}
172
+
173
+static const TCGOutOpMovcond outop_movcond = {
174
+ .base.static_constraint = C_O1_I4(r, r, rC, rz, rz),
175
+ .out = tgen_movcond,
176
+};
177
+
178
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
179
const TCGArg args[TCG_MAX_OP_ARGS],
180
const int const_args[TCG_MAX_OP_ARGS])
181
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
182
TCGArg a0 = args[0];
183
TCGArg a1 = args[1];
184
TCGArg a2 = args[2];
185
- int c2 = const_args[2];
186
187
switch (opc) {
188
case INDEX_op_goto_ptr:
189
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
190
tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
191
break;
192
193
- case INDEX_op_movcond_i32:
194
- a2 = (int32_t)a2;
195
- /* FALLTHRU */
196
- case INDEX_op_movcond_i64:
197
- tcg_out_cmp(s, ext, args[5], a1, a2, c2);
198
- tcg_out_insn(s, 3506, CSEL, ext, a0, args[3], args[4], args[5]);
199
- break;
200
-
201
case INDEX_op_qemu_ld_i32:
202
case INDEX_op_qemu_ld_i64:
203
tcg_out_qemu_ld(s, a0, a1, a2, ext);
204
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
205
case INDEX_op_st_i64:
206
return C_O0_I2(rz, r);
207
208
- case INDEX_op_movcond_i32:
209
- case INDEX_op_movcond_i64:
210
- return C_O1_I4(r, r, rC, rz, rz);
211
-
212
case INDEX_op_qemu_ld_i32:
213
case INDEX_op_qemu_ld_i64:
214
return C_O1_I1(r, r);
215
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/arm/tcg-target.c.inc
218
+++ b/tcg/arm/tcg-target.c.inc
219
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
220
.out_rri = tgen_negsetcondi,
221
};
222
223
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
224
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
225
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
226
+{
227
+ cond = tcg_out_cmp(s, cond, c1, c2, const_c2);
228
+ tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[cond], ARITH_MOV, ARITH_MVN,
229
+ ret, 0, vt, const_vt);
230
+}
231
+
232
+static const TCGOutOpMovcond outop_movcond = {
233
+ .base.static_constraint = C_O1_I4(r, r, rIN, rIK, 0),
234
+ .out = tgen_movcond,
235
+};
236
+
237
238
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
239
const TCGArg args[TCG_MAX_OP_ARGS],
240
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
241
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
242
break;
243
244
- case INDEX_op_movcond_i32:
245
- /* Constraints mean that v2 is always in the same register as dest,
246
- * so we only need to do "if condition passed, move v1 to dest".
247
- */
248
- c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
249
- tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
250
- ARITH_MVN, args[0], 0, args[3], const_args[3]);
251
- break;
252
case INDEX_op_add2_i32:
253
a0 = args[0], a1 = args[1], a2 = args[2];
254
a3 = args[3], a4 = args[4], a5 = args[5];
255
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
256
return C_O1_I2(r, 0, rZ);
257
case INDEX_op_extract2_i32:
258
return C_O1_I2(r, rZ, rZ);
259
- case INDEX_op_movcond_i32:
260
- return C_O1_I4(r, r, rIN, rIK, 0);
261
case INDEX_op_add2_i32:
262
return C_O2_I4(r, r, r, r, rIN, rIK);
263
case INDEX_op_sub2_i32:
264
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/tcg/i386/tcg-target.c.inc
267
+++ b/tcg/i386/tcg-target.c.inc
268
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
269
tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1);
270
}
271
272
-static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
273
- TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
274
- TCGReg v1)
275
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
276
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
277
+ TCGArg vt, bool const_vt,
278
+ TCGArg vf, bool consf_vf)
279
{
280
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
281
int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw);
282
- tcg_out_cmov(s, jcc, rexw, dest, v1);
283
+ tcg_out_cmov(s, jcc, rexw, dest, vt);
284
}
285
286
+static const TCGOutOpMovcond outop_movcond = {
287
+ .base.static_constraint = C_O1_I4(r, r, reT, r, 0),
288
+ .out = tgen_movcond,
289
+};
290
+
291
static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
292
{
293
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
294
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
295
}
296
break;
297
298
- OP_32_64(movcond):
299
- tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
300
- break;
301
-
302
OP_32_64(bswap16):
303
if (a2 & TCG_BSWAP_OS) {
304
/* Output must be sign-extended. */
305
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
306
case INDEX_op_deposit_i64:
307
return C_O1_I2(q, 0, qi);
308
309
- case INDEX_op_movcond_i32:
310
- case INDEX_op_movcond_i64:
311
- return C_O1_I4(r, r, reT, r, 0);
312
-
313
case INDEX_op_add2_i32:
314
case INDEX_op_add2_i64:
315
case INDEX_op_sub2_i32:
316
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
317
index XXXXXXX..XXXXXXX 100644
318
--- a/tcg/loongarch64/tcg-target.c.inc
319
+++ b/tcg/loongarch64/tcg-target.c.inc
320
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
321
.out_rri = tgen_negsetcondi,
322
};
323
324
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
325
- TCGReg c1, tcg_target_long c2, bool const2,
326
- TCGReg v1, TCGReg v2)
327
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
328
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
329
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
330
{
331
- int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
332
+ int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const_c2);
333
TCGReg t;
334
335
/* Standardize the test below to t != 0. */
336
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
337
}
338
}
339
340
+static const TCGOutOpMovcond outop_movcond = {
341
+ .base.static_constraint = C_O1_I4(r, r, rJ, rz, rz),
342
+ .out = tgen_movcond,
343
+};
344
+
345
/*
346
* Branch helpers
347
*/
348
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
349
TCGArg a1 = args[1];
350
TCGArg a2 = args[2];
351
TCGArg a3 = args[3];
352
- int c2 = const_args[2];
353
354
switch (opc) {
355
case INDEX_op_mb:
356
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
357
tcg_out_opc_revb_d(s, a0, a1);
358
break;
359
360
- case INDEX_op_movcond_i32:
361
- case INDEX_op_movcond_i64:
362
- tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
363
- break;
364
-
365
case INDEX_op_ld8s_i32:
366
case INDEX_op_ld8s_i64:
367
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
368
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
369
/* Must deposit into the same register as input */
370
return C_O1_I2(r, 0, rz);
371
372
- case INDEX_op_movcond_i32:
373
- case INDEX_op_movcond_i64:
374
- return C_O1_I4(r, rz, rJ, rz, rz);
375
-
376
case INDEX_op_ld_vec:
377
case INDEX_op_dupm_vec:
378
case INDEX_op_dup_vec:
379
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
380
index XXXXXXX..XXXXXXX 100644
381
--- a/tcg/mips/tcg-target.c.inc
382
+++ b/tcg/mips/tcg-target.c.inc
383
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
384
tcg_out_nop(s);
385
}
386
387
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
388
- TCGReg c1, TCGReg c2, TCGReg v1, TCGReg v2)
389
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
390
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
391
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
392
{
393
int tmpflags;
394
bool eqz;
395
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
396
}
397
}
398
399
+static const TCGOutOpMovcond outop_movcond = {
400
+ .base.static_constraint = (use_mips32r6_instructions
401
+ ? C_O1_I4(r, r, rz, rz, rz)
402
+ : C_O1_I4(r, r, rz, rz, 0)),
403
+ .out = tgen_movcond,
404
+};
405
+
406
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
407
{
408
/*
409
@@ -XXX,XX +XXX,XX @@ static void tgen_clz(TCGContext *s, TCGType type,
410
if (use_mips32r6_instructions) {
411
MIPSInsn opcv6 = type == TCG_TYPE_I32 ? OPC_CLZ_R6 : OPC_DCLZ_R6;
412
tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
413
- tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
414
+ tgen_movcond(s, TCG_TYPE_REG, TCG_COND_EQ, a0, a1, a2, false,
415
+ TCG_TMP0, false, TCG_REG_ZERO, false);
416
} else {
417
MIPSInsn opcv2 = type == TCG_TYPE_I32 ? OPC_CLZ : OPC_DCLZ;
418
if (a0 == a2) {
419
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
420
tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
421
break;
422
423
- case INDEX_op_movcond_i32:
424
- case INDEX_op_movcond_i64:
425
- tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
426
- break;
427
-
428
case INDEX_op_setcond2_i32:
429
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
430
break;
431
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
432
case INDEX_op_deposit_i32:
433
case INDEX_op_deposit_i64:
434
return C_O1_I2(r, 0, rz);
435
- case INDEX_op_movcond_i32:
436
- case INDEX_op_movcond_i64:
437
- return (use_mips32r6_instructions
438
- ? C_O1_I4(r, rz, rz, rz, rz)
439
- : C_O1_I4(r, rz, rz, rz, 0));
440
case INDEX_op_add2_i32:
441
case INDEX_op_sub2_i32:
442
return C_O2_I4(r, r, rz, rz, rN, rN);
443
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
444
index XXXXXXX..XXXXXXX 100644
445
--- a/tcg/ppc/tcg-target.c.inc
446
+++ b/tcg/ppc/tcg-target.c.inc
447
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond outop_brcond = {
448
.out_ri = tgen_brcondi,
449
};
450
451
-static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
452
- TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
453
- TCGArg v2, bool const_c2)
454
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
455
+ TCGReg dest, TCGReg c1, TCGArg c2, bool const_c2,
456
+ TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
457
{
458
/* If for some reason both inputs are zero, don't produce bad code. */
459
if (v1 == 0 && v2 == 0) {
460
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
461
}
462
}
463
464
+static const TCGOutOpMovcond outop_movcond = {
465
+ .base.static_constraint = C_O1_I4(r, r, rC, rZ, rZ),
466
+ .out = tgen_movcond,
467
+};
468
+
469
static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
470
TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
471
{
472
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
473
}
474
break;
475
476
- case INDEX_op_movcond_i32:
477
- tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
478
- args[3], args[4], const_args[2]);
479
- break;
480
- case INDEX_op_movcond_i64:
481
- tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
482
- args[3], args[4], const_args[2]);
483
- break;
484
-
485
#if TCG_TARGET_REG_BITS == 64
486
case INDEX_op_add2_i64:
487
#else
488
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
489
case INDEX_op_st_i64:
490
return C_O0_I2(r, r);
491
492
- case INDEX_op_movcond_i32:
493
- case INDEX_op_movcond_i64:
494
- return C_O1_I4(r, r, rC, rZ, rZ);
495
-
496
case INDEX_op_deposit_i32:
497
case INDEX_op_deposit_i64:
498
return C_O1_I2(r, 0, rZ);
499
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
500
index XXXXXXX..XXXXXXX 100644
501
--- a/tcg/riscv/tcg-target.c.inc
502
+++ b/tcg/riscv/tcg-target.c.inc
503
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
504
tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
505
}
506
507
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
508
- TCGReg cmp1, int cmp2, bool c_cmp2,
509
- TCGReg val1, bool c_val1,
510
- TCGReg val2, bool c_val2)
511
+static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
512
+ TCGReg ret, TCGReg cmp1, TCGArg cmp2, bool c_cmp2,
513
+ TCGArg val1, bool c_val1,
514
+ TCGArg val2, bool c_val2)
515
{
516
int tmpflags;
517
TCGReg t;
518
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
519
}
520
}
521
522
+static const TCGOutOpMovcond outop_movcond = {
523
+ .base.static_constraint = C_O1_I4(r, r, rI, rM, rM),
524
+ .out = tcg_out_movcond,
525
+};
526
+
527
static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
528
TCGReg ret, TCGReg src1, int src2, bool c_src2)
529
{
530
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
531
* Note that constraints put 'ret' in a new register, so the
532
* computation above did not clobber either 'src1' or 'src2'.
533
*/
534
- tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
535
+ tcg_out_movcond(s, type, TCG_COND_EQ, ret, src1, 0, true,
536
src2, c_src2, ret, false);
537
}
538
}
539
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
540
TCGArg a0 = args[0];
541
TCGArg a1 = args[1];
542
TCGArg a2 = args[2];
543
- int c2 = const_args[2];
544
545
switch (opc) {
546
case INDEX_op_goto_ptr:
547
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
548
const_args[4], const_args[5], true, false);
549
break;
550
551
- case INDEX_op_movcond_i32:
552
- case INDEX_op_movcond_i64:
553
- tcg_out_movcond(s, args[5], a0, a1, a2, c2,
554
- args[3], const_args[3], args[4], const_args[4]);
555
- break;
556
-
557
case INDEX_op_qemu_ld_i32:
558
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
559
break;
560
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
561
case INDEX_op_st_i64:
562
return C_O0_I2(rz, r);
563
564
- case INDEX_op_movcond_i32:
565
- case INDEX_op_movcond_i64:
566
- return C_O1_I4(r, r, rI, rM, rM);
567
-
568
case INDEX_op_add2_i32:
569
case INDEX_op_add2_i64:
570
case INDEX_op_sub2_i32:
571
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
572
index XXXXXXX..XXXXXXX 100644
573
--- a/tcg/s390x/tcg-target.c.inc
574
+++ b/tcg/s390x/tcg-target.c.inc
575
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest,
576
tcg_out_insn(s, RRFc, LOCGR, dest, src, cc);
577
}
578
579
-static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
580
- TCGReg c1, TCGArg c2, int c2const,
581
- TCGArg v3, int v3const, TCGReg v4)
582
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c,
583
+ TCGReg dest, TCGReg c1, TCGArg c2, bool c2const,
584
+ TCGArg v3, bool v3const, TCGArg v4, bool v4const)
585
{
586
int cc, inv_cc;
587
588
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
589
tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc);
590
}
591
592
+static const TCGOutOpMovcond outop_movcond = {
593
+ .base.static_constraint = C_O1_I4(r, r, rC, rI, r),
594
+ .out = tgen_movcond,
595
+};
596
+
597
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
598
int ofs, int len, int z)
599
{
600
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
601
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
602
break;
603
604
- case INDEX_op_movcond_i32:
605
- tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
606
- args[2], const_args[2], args[3], const_args[3], args[4]);
607
- break;
608
-
609
case INDEX_op_qemu_ld_i32:
610
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
611
break;
612
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
613
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
614
break;
615
616
- case INDEX_op_movcond_i64:
617
- tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
618
- args[2], const_args[2], args[3], const_args[3], args[4]);
619
- break;
620
-
621
OP_32_64(deposit):
622
a0 = args[0], a1 = args[1], a2 = args[2];
623
if (const_args[1]) {
624
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
625
case INDEX_op_deposit_i64:
626
return C_O1_I2(r, rZ, r);
627
628
- case INDEX_op_movcond_i32:
629
- return C_O1_I4(r, r, ri, rI, r);
630
- case INDEX_op_movcond_i64:
631
- return C_O1_I4(r, r, rC, rI, r);
632
-
633
case INDEX_op_add2_i32:
634
case INDEX_op_sub2_i32:
635
return C_N1_O1_I4(r, r, 0, 1, ri, r);
636
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
637
index XXXXXXX..XXXXXXX 100644
638
--- a/tcg/sparc64/tcg-target.c.inc
639
+++ b/tcg/sparc64/tcg-target.c.inc
640
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
641
.out_rri = tgen_negsetcondi,
642
};
643
644
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
645
+ TCGReg ret, TCGReg c1, TCGArg c2, bool c2const,
646
+ TCGArg v1, bool v1const, TCGArg v2, bool v2consf)
647
+{
648
+ if (type == TCG_TYPE_I32) {
649
+ tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const);
650
+ } else {
651
+ tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const);
652
+ }
653
+}
654
+
655
+static const TCGOutOpMovcond outop_movcond = {
656
+ .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0),
657
+ .out = tgen_movcond,
658
+};
659
+
660
static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
661
TCGReg al, TCGReg ah, int32_t bl, int blconst,
662
int32_t bh, int bhconst, int opl, int oph)
663
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
664
const int const_args[TCG_MAX_OP_ARGS])
665
{
666
TCGArg a0, a1, a2;
667
- int c2;
668
669
/* Hoist the loads of the most common arguments. */
670
a0 = args[0];
671
a1 = args[1];
672
a2 = args[2];
673
- c2 = const_args[2];
674
675
switch (opc) {
676
case INDEX_op_goto_ptr:
677
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
678
tcg_out_ldst(s, a0, a1, a2, STW);
679
break;
680
681
- case INDEX_op_movcond_i32:
682
- tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
683
- break;
684
-
685
case INDEX_op_add2_i32:
686
tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
687
args[4], const_args[4], args[5], const_args[5],
688
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
689
tcg_out_ldst(s, a0, a1, a2, STX);
690
break;
691
692
- case INDEX_op_movcond_i64:
693
- tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
694
- break;
695
case INDEX_op_add2_i64:
696
tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
697
const_args[4], args[5], const_args[5], false);
698
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
699
case INDEX_op_qemu_st_i64:
700
return C_O0_I2(rz, r);
701
702
- case INDEX_op_movcond_i32:
703
- case INDEX_op_movcond_i64:
704
- return C_O1_I4(r, rz, rJ, rI, 0);
705
case INDEX_op_add2_i32:
706
case INDEX_op_add2_i64:
707
case INDEX_op_sub2_i32:
708
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
709
index XXXXXXX..XXXXXXX 100644
710
--- a/tcg/tci/tcg-target-opc.h.inc
711
+++ b/tcg/tci/tcg-target-opc.h.inc
712
@@ -XXX,XX +XXX,XX @@ DEF(tci_remu32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
713
DEF(tci_rotl32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
714
DEF(tci_rotr32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
715
DEF(tci_setcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
716
+DEF(tci_movcond32, 1, 2, 1, TCG_OPF_NOT_PRESENT)
717
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
718
index XXXXXXX..XXXXXXX 100644
719
--- a/tcg/tci/tcg-target.c.inc
720
+++ b/tcg/tci/tcg-target.c.inc
721
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
722
return C_O0_I4(r, r, r, r);
723
#endif
724
725
- case INDEX_op_movcond_i32:
726
- case INDEX_op_movcond_i64:
727
case INDEX_op_setcond2_i32:
728
return C_O1_I4(r, r, r, r, r);
729
730
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond outop_brcond = {
731
.out_rr = tgen_brcond,
732
};
733
734
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
735
+ TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
736
+ TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
737
+{
738
+ TCGOpcode opc = (type == TCG_TYPE_I32
739
+ ? INDEX_op_tci_movcond32
740
+ : INDEX_op_movcond_i64);
741
+ tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
742
+}
743
+
744
+static const TCGOutOpMovcond outop_movcond = {
745
+ .base.static_constraint = C_O1_I4(r, r, r, r, r),
746
+ .out = tgen_movcond,
747
+};
748
+
749
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
750
const TCGArg args[TCG_MAX_OP_ARGS],
751
const int const_args[TCG_MAX_OP_ARGS])
752
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
753
tcg_out_op_l(s, opc, arg_label(args[0]));
754
break;
755
756
- CASE_32_64(movcond)
757
case INDEX_op_setcond2_i32:
758
tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
759
args[3], args[4], args[5]);
760
--
761
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 2 +-
6
tcg/tcg-op.c | 4 ++--
7
tcg/tcg.c | 15 +++++----------
8
tcg/tci.c | 4 ++--
9
docs/devel/tcg-ops.rst | 2 +-
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 13 insertions(+), 19 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(divs2, 2, 3, 0, TCG_OPF_INT)
18
DEF(divu, 1, 2, 0, TCG_OPF_INT)
19
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
20
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
21
+DEF(movcond, 1, 4, 1, TCG_OPF_INT)
22
DEF(mul, 1, 2, 0, TCG_OPF_INT)
23
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
24
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(shr, 1, 2, 0, TCG_OPF_INT)
26
DEF(sub, 1, 2, 0, TCG_OPF_INT)
27
DEF(xor, 1, 2, 0, TCG_OPF_INT)
28
29
-DEF(movcond_i32, 1, 4, 1, 0)
30
/* load/store */
31
DEF(ld8u_i32, 1, 1, 1, 0)
32
DEF(ld8s_i32, 1, 1, 1, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(setcond2_i32, 1, 4, 1, 0)
34
DEF(bswap16_i32, 1, 1, 1, 0)
35
DEF(bswap32_i32, 1, 1, 1, 0)
36
37
-DEF(movcond_i64, 1, 4, 1, 0)
38
/* load/store */
39
DEF(ld8u_i64, 1, 1, 1, 0)
40
DEF(ld8s_i64, 1, 1, 1, 0)
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
46
case INDEX_op_mov_vec:
47
done = fold_mov(&ctx, op);
48
break;
49
- CASE_OP_32_64(movcond):
50
+ case INDEX_op_movcond:
51
done = fold_movcond(&ctx, op);
52
break;
53
case INDEX_op_mul:
54
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/tcg-op.c
57
+++ b/tcg/tcg-op.c
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
59
} else if (cond == TCG_COND_NEVER) {
60
tcg_gen_mov_i32(ret, v2);
61
} else {
62
- tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond);
63
+ tcg_gen_op6i_i32(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
64
}
65
}
66
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
68
} else if (cond == TCG_COND_NEVER) {
69
tcg_gen_mov_i64(ret, v2);
70
} else if (TCG_TARGET_REG_BITS == 64) {
71
- tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond);
72
+ tcg_gen_op6i_i64(INDEX_op_movcond, ret, c1, c2, v1, v2, cond);
73
} else {
74
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
75
TCGv_i32 zero = tcg_constant_i32(0);
76
diff --git a/tcg/tcg.c b/tcg/tcg.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tcg.c
79
+++ b/tcg/tcg.c
80
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
81
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
82
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
83
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
84
- OUTOP(INDEX_op_movcond_i32, TCGOutOpMovcond, outop_movcond),
85
- OUTOP(INDEX_op_movcond_i64, TCGOutOpMovcond, outop_movcond),
86
+ OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
87
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
88
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
89
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
90
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
91
case INDEX_op_and:
92
case INDEX_op_brcond:
93
case INDEX_op_mov:
94
+ case INDEX_op_movcond:
95
case INDEX_op_negsetcond:
96
case INDEX_op_or:
97
case INDEX_op_setcond:
98
case INDEX_op_xor:
99
return has_type;
100
101
- case INDEX_op_movcond_i32:
102
case INDEX_op_ld8u_i32:
103
case INDEX_op_ld8s_i32:
104
case INDEX_op_ld16u_i32:
105
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
106
case INDEX_op_setcond2_i32:
107
return TCG_TARGET_REG_BITS == 32;
108
109
- case INDEX_op_movcond_i64:
110
case INDEX_op_ld8u_i64:
111
case INDEX_op_ld8s_i64:
112
case INDEX_op_ld16u_i64:
113
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
114
case INDEX_op_brcond:
115
case INDEX_op_setcond:
116
case INDEX_op_negsetcond:
117
- case INDEX_op_movcond_i32:
118
+ case INDEX_op_movcond:
119
case INDEX_op_brcond2_i32:
120
case INDEX_op_setcond2_i32:
121
- case INDEX_op_movcond_i64:
122
case INDEX_op_cmp_vec:
123
case INDEX_op_cmpsel_vec:
124
if (op->args[k] < ARRAY_SIZE(cond_name)
125
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
126
case INDEX_op_brcond2_i32:
127
op_cond = op->args[4];
128
break;
129
- case INDEX_op_movcond_i32:
130
- case INDEX_op_movcond_i64:
131
+ case INDEX_op_movcond:
132
case INDEX_op_setcond2_i32:
133
case INDEX_op_cmpsel_vec:
134
op_cond = op->args[5];
135
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
136
}
137
break;
138
139
- case INDEX_op_movcond_i32:
140
- case INDEX_op_movcond_i64:
141
+ case INDEX_op_movcond:
142
{
143
const TCGOutOpMovcond *out = &outop_movcond;
144
TCGCond cond = new_args[5];
145
diff --git a/tcg/tci.c b/tcg/tci.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/tci.c
148
+++ b/tcg/tci.c
149
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
150
tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
151
regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
152
break;
153
- case INDEX_op_movcond_i64:
154
+ case INDEX_op_movcond:
155
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
156
tmp32 = tci_compare64(regs[r1], regs[r2], condition);
157
regs[r0] = regs[tmp32 ? r3 : r4];
158
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
159
break;
160
161
case INDEX_op_tci_movcond32:
162
- case INDEX_op_movcond_i64:
163
+ case INDEX_op_movcond:
164
case INDEX_op_setcond2_i32:
165
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
166
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
167
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
168
index XXXXXXX..XXXXXXX 100644
169
--- a/docs/devel/tcg-ops.rst
170
+++ b/docs/devel/tcg-ops.rst
171
@@ -XXX,XX +XXX,XX @@ Conditional moves
172
|
173
| Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
174
175
- * - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
176
+ * - movcond *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
177
178
- | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*)
179
|
180
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
181
index XXXXXXX..XXXXXXX 100644
182
--- a/tcg/tci/tcg-target.c.inc
183
+++ b/tcg/tci/tcg-target.c.inc
184
@@ -XXX,XX +XXX,XX @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
185
{
186
TCGOpcode opc = (type == TCG_TYPE_I32
187
? INDEX_op_tci_movcond32
188
- : INDEX_op_movcond_i64);
189
+ : INDEX_op_movcond);
190
tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
191
}
192
193
--
194
2.43.0
195
196
diff view generated by jsdifflib
Deleted patch
1
Use U and C constraints for brcond2 and setcond2, so that tcg_out_cmp2
2
automatically passes in-range constants to tcg_out_cmp.
3
1
4
Tested-by: Nicholas Piggin <npiggin@gmail.com>
5
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/ppc/tcg-target-con-set.h | 4 +--
10
tcg/ppc/tcg-target.c.inc | 49 ++++++++++++------------------------
11
2 files changed, 18 insertions(+), 35 deletions(-)
12
13
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/ppc/tcg-target-con-set.h
16
+++ b/tcg/ppc/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ C_O0_I2(r, rC)
18
C_O0_I2(v, r)
19
C_O0_I3(r, r, r)
20
C_O0_I3(o, m, r)
21
-C_O0_I4(r, r, ri, ri)
22
+C_O0_I4(r, r, rU, rC)
23
C_O0_I4(r, r, r, r)
24
C_O1_I1(r, r)
25
C_O1_I1(v, r)
26
@@ -XXX,XX +XXX,XX @@ C_O1_I2(v, v, v)
27
C_O1_I3(v, v, v, v)
28
C_O1_I4(v, v, v, vZM, v)
29
C_O1_I4(r, r, rC, rZ, rZ)
30
-C_O1_I4(r, r, r, ri, ri)
31
+C_O1_I4(r, r, r, rU, rC)
32
C_O2_I1(r, r, r)
33
C_N1O1_I1(o, m, r)
34
C_O2_I2(r, r, r, r)
35
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/ppc/tcg-target.c.inc
38
+++ b/tcg/ppc/tcg-target.c.inc
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
40
}
41
42
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
43
- int const_arg2, int cr, TCGType type)
44
+ bool const_arg2, int cr, TCGType type)
45
{
46
- int imm;
47
uint32_t op;
48
49
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
51
case TCG_COND_EQ:
52
case TCG_COND_NE:
53
if (const_arg2) {
54
- if ((int16_t) arg2 == arg2) {
55
+ if ((int16_t)arg2 == arg2) {
56
op = CMPI;
57
- imm = 1;
58
- break;
59
- } else if ((uint16_t) arg2 == arg2) {
60
- op = CMPLI;
61
- imm = 1;
62
break;
63
}
64
+ tcg_debug_assert((uint16_t)arg2 == arg2);
65
+ op = CMPLI;
66
+ break;
67
}
68
op = CMPL;
69
- imm = 0;
70
break;
71
72
case TCG_COND_TSTEQ:
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
74
case TCG_COND_LE:
75
case TCG_COND_GT:
76
if (const_arg2) {
77
- if ((int16_t) arg2 == arg2) {
78
- op = CMPI;
79
- imm = 1;
80
- break;
81
- }
82
+ tcg_debug_assert((int16_t)arg2 == arg2);
83
+ op = CMPI;
84
+ break;
85
}
86
op = CMP;
87
- imm = 0;
88
break;
89
90
case TCG_COND_LTU:
91
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
92
case TCG_COND_LEU:
93
case TCG_COND_GTU:
94
if (const_arg2) {
95
- if ((uint16_t) arg2 == arg2) {
96
- op = CMPLI;
97
- imm = 1;
98
- break;
99
- }
100
+ tcg_debug_assert((uint16_t)arg2 == arg2);
101
+ op = CMPLI;
102
+ break;
103
}
104
op = CMPL;
105
- imm = 0;
106
break;
107
108
default:
109
g_assert_not_reached();
110
}
111
op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
112
-
113
- if (imm) {
114
- tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
115
- } else {
116
- if (const_arg2) {
117
- tcg_out_movi(s, type, TCG_REG_R0, arg2);
118
- arg2 = TCG_REG_R0;
119
- }
120
- tcg_out32(s, op | RA(arg1) | RB(arg2));
121
- }
122
+ op |= RA(arg1);
123
+ op |= const_arg2 ? arg2 & 0xffff : RB(arg2);
124
+ tcg_out32(s, op);
125
}
126
127
static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
128
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
129
case INDEX_op_deposit_i64:
130
return C_O1_I2(r, 0, rZ);
131
case INDEX_op_brcond2_i32:
132
- return C_O0_I4(r, r, ri, ri);
133
+ return C_O0_I4(r, r, rU, rC);
134
case INDEX_op_setcond2_i32:
135
- return C_O1_I4(r, r, r, ri, ri);
136
+ return C_O1_I4(r, r, r, rU, rC);
137
case INDEX_op_add2_i64:
138
case INDEX_op_add2_i32:
139
return C_O2_I4(r, r, r, r, rI, rZM);
140
--
141
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Pass explicit arguments instead of arrays.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/arm/tcg-target.c.inc | 18 ++++++------------
8
1 file changed, 6 insertions(+), 12 deletions(-)
9
10
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.c.inc
13
+++ b/tcg/arm/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
15
}
16
}
17
18
-static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
19
- const int *const_args)
20
+static TCGCond tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
21
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh)
22
{
23
- TCGReg al = args[0];
24
- TCGReg ah = args[1];
25
- TCGArg bl = args[2];
26
- TCGArg bh = args[3];
27
- TCGCond cond = args[4];
28
- int const_bl = const_args[2];
29
- int const_bh = const_args[3];
30
-
31
switch (cond) {
32
case TCG_COND_EQ:
33
case TCG_COND_NE:
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
35
break;
36
37
case INDEX_op_brcond2_i32:
38
- c = tcg_out_cmp2(s, args, const_args);
39
+ c = tcg_out_cmp2(s, args[4], args[0], args[1], args[2], const_args[2],
40
+ args[3], const_args[3]);
41
tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
42
break;
43
case INDEX_op_setcond2_i32:
44
- c = tcg_out_cmp2(s, args + 1, const_args + 1);
45
+ c = tcg_out_cmp2(s, args[5], args[1], args[2], args[3], const_args[3],
46
+ args[4], const_args[4]);
47
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
48
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
49
ARITH_MOV, args[0], 0, 0);
50
--
51
2.43.0
52
53
diff view generated by jsdifflib
Deleted patch
1
Tested-by: Nicholas Piggin <npiggin@gmail.com>
2
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/ppc/tcg-target.c.inc | 21 +++++++--------------
8
1 file changed, 7 insertions(+), 14 deletions(-)
9
1
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/ppc/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
15
}
16
}
17
18
-static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
19
- const int *const_args)
20
+static void tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
21
+ TCGArg bl, bool blconst, TCGArg bh, bool bhconst)
22
{
23
static const struct { uint8_t bit1, bit2; } bits[] = {
24
[TCG_COND_LT ] = { CR_LT, CR_LT },
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
26
[TCG_COND_GEU] = { CR_GT, CR_LT },
27
};
28
29
- TCGCond cond = args[4], cond2;
30
- TCGArg al, ah, bl, bh;
31
- int blconst, bhconst;
32
+ TCGCond cond2;
33
int op, bit1, bit2;
34
35
- al = args[0];
36
- ah = args[1];
37
- bl = args[2];
38
- bh = args[3];
39
- blconst = const_args[2];
40
- bhconst = const_args[3];
41
-
42
switch (cond) {
43
case TCG_COND_EQ:
44
op = CRAND;
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
46
static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
47
const int *const_args)
48
{
49
- tcg_out_cmp2(s, args + 1, const_args + 1);
50
+ tcg_out_cmp2(s, args[5], args[1], args[2], args[3], const_args[3],
51
+ args[4], const_args[4]);
52
tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
53
tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
54
}
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
56
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
57
const int *const_args)
58
{
59
- tcg_out_cmp2(s, args, const_args);
60
+ tcg_out_cmp2(s, args[4], args[0], args[1], args[2], const_args[2],
61
+ args[3], const_args[3]);
62
tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
63
}
64
65
--
66
2.43.0
67
68
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/mips/tcg-target-con-set.h | 2 +-
5
tcg/tcg.c | 30 +++++++++++++++++
6
tcg/arm/tcg-target.c.inc | 20 +++++++----
7
tcg/i386/tcg-target.c.inc | 62 ++++++++++++++++++-----------------
8
tcg/mips/tcg-target.c.inc | 19 ++++++-----
9
tcg/ppc/tcg-target.c.inc | 25 +++++++-------
10
tcg/tci/tcg-target.c.inc | 30 +++++++++--------
11
7 files changed, 118 insertions(+), 70 deletions(-)
12
1
13
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/mips/tcg-target-con-set.h
16
+++ b/tcg/mips/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
18
C_O0_I2(r, rz)
19
C_O0_I2(rz, r)
20
C_O0_I3(rz, rz, r)
21
-C_O0_I4(rz, rz, rz, rz)
22
+C_O0_I4(r, r, rz, rz)
23
C_O1_I1(r, r)
24
C_O1_I2(r, 0, rz)
25
C_O1_I2(r, r, r)
26
diff --git a/tcg/tcg.c b/tcg/tcg.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/tcg.c
29
+++ b/tcg/tcg.c
30
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBrcond {
31
TCGReg a1, tcg_target_long a2, TCGLabel *label);
32
} TCGOutOpBrcond;
33
34
+typedef struct TCGOutOpBrcond2 {
35
+ TCGOutOp base;
36
+ void (*out)(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
37
+ TCGArg bl, bool const_bl,
38
+ TCGArg bh, bool const_bh, TCGLabel *l);
39
+} TCGOutOpBrcond2;
40
+
41
typedef struct TCGOutOpDivRem {
42
TCGOutOp base;
43
void (*out_rr01r)(TCGContext *s, TCGType type,
44
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
45
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
46
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
47
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
48
+
49
+#if TCG_TARGET_REG_BITS == 32
50
+ OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
51
+#endif
52
};
53
54
#undef OUTOP
55
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
56
}
57
break;
58
59
+#if TCG_TARGET_REG_BITS == 32
60
+ case INDEX_op_brcond2_i32:
61
+ {
62
+ const TCGOutOpBrcond2 *out = &outop_brcond2;
63
+ TCGCond cond = new_args[4];
64
+ TCGLabel *label = arg_label(new_args[5]);
65
+
66
+ tcg_debug_assert(!const_args[0]);
67
+ tcg_debug_assert(!const_args[1]);
68
+ out->out(s, cond, new_args[0], new_args[1],
69
+ new_args[2], const_args[2],
70
+ new_args[3], const_args[3], label);
71
+ }
72
+ break;
73
+#else
74
+ case INDEX_op_brcond2_i32:
75
+ g_assert_not_reached();
76
+#endif
77
+
78
default:
79
if (def->flags & TCG_OPF_VECTOR) {
80
tcg_out_vec_op(s, op->opc, type - TCG_TYPE_V64,
81
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/arm/tcg-target.c.inc
84
+++ b/tcg/arm/tcg-target.c.inc
85
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
86
.out = tgen_movcond,
87
};
88
89
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
90
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh,
91
+ TCGLabel *l)
92
+{
93
+ cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
94
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[cond], l);
95
+}
96
+
97
+static const TCGOutOpBrcond2 outop_brcond2 = {
98
+ .base.static_constraint = C_O0_I4(r, r, rI, rI),
99
+ .out = tgen_brcond2,
100
+};
101
+
102
103
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
104
const TCGArg args[TCG_MAX_OP_ARGS],
105
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
106
tcg_out_mov_reg(s, COND_AL, args[0], a0);
107
break;
108
109
- case INDEX_op_brcond2_i32:
110
- c = tcg_out_cmp2(s, args[4], args[0], args[1], args[2], const_args[2],
111
- args[3], const_args[3]);
112
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
113
- break;
114
case INDEX_op_setcond2_i32:
115
c = tcg_out_cmp2(s, args[5], args[1], args[2], args[3], const_args[3],
116
args[4], const_args[4]);
117
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
118
return C_O2_I4(r, r, r, r, rIN, rIK);
119
case INDEX_op_sub2_i32:
120
return C_O2_I4(r, r, rI, rI, rIN, rIK);
121
- case INDEX_op_brcond2_i32:
122
- return C_O0_I4(r, r, rI, rI);
123
case INDEX_op_setcond2_i32:
124
return C_O1_I4(r, r, r, rI, rI);
125
126
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/i386/tcg-target.c.inc
129
+++ b/tcg/i386/tcg-target.c.inc
130
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond outop_brcond = {
131
.out_ri = tgen_brcondi,
132
};
133
134
-#if TCG_TARGET_REG_BITS == 32
135
-static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
136
- const int *const_args, bool small)
137
+static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al,
138
+ TCGReg ah, TCGArg bl, bool blconst,
139
+ TCGArg bh, bool bhconst,
140
+ TCGLabel *label_this, bool small)
141
{
142
TCGLabel *label_next = gen_new_label();
143
- TCGLabel *label_this = arg_label(args[5]);
144
- TCGCond cond = args[4];
145
146
switch (cond) {
147
case TCG_COND_EQ:
148
case TCG_COND_TSTEQ:
149
tcg_out_brcond(s, 0, tcg_invert_cond(cond),
150
- args[0], args[2], const_args[2], label_next, 1);
151
- tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
152
- label_this, small);
153
+ al, bl, blconst, label_next, true);
154
+ tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small);
155
break;
156
157
case TCG_COND_NE:
158
case TCG_COND_TSTNE:
159
- tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2],
160
- label_this, small);
161
- tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
162
- label_this, small);
163
+ tcg_out_brcond(s, 0, cond, al, bl, blconst, label_this, small);
164
+ tcg_out_brcond(s, 0, cond, ah, bh, bhconst, label_this, small);
165
break;
166
167
default:
168
- tcg_out_brcond(s, 0, tcg_high_cond(cond), args[1],
169
- args[3], const_args[3], label_this, small);
170
+ tcg_out_brcond(s, 0, tcg_high_cond(cond),
171
+ ah, bh, bhconst, label_this, small);
172
tcg_out_jxx(s, JCC_JNE, label_next, 1);
173
- tcg_out_brcond(s, 0, tcg_unsigned_cond(cond), args[0],
174
- args[2], const_args[2], label_this, small);
175
+ tcg_out_brcond(s, 0, tcg_unsigned_cond(cond),
176
+ al, bl, blconst, label_this, small);
177
break;
178
}
179
tcg_out_label(s, label_next);
180
}
181
+
182
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al,
183
+ TCGReg ah, TCGArg bl, bool blconst,
184
+ TCGArg bh, bool bhconst, TCGLabel *l)
185
+{
186
+ tcg_out_brcond2(s, cond, al, ah, bl, blconst, bh, bhconst, l, false);
187
+}
188
+
189
+#if TCG_TARGET_REG_BITS != 32
190
+__attribute__((unused))
191
#endif
192
+static const TCGOutOpBrcond2 outop_brcond2 = {
193
+ .base.static_constraint = C_O0_I4(r, r, ri, ri),
194
+ .out = tgen_brcond2,
195
+};
196
197
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
198
TCGReg dest, TCGReg arg1, TCGArg arg2,
199
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
200
static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
201
const int *const_args)
202
{
203
- TCGArg new_args[6];
204
TCGLabel *label_true, *label_over;
205
206
- memcpy(new_args, args+1, 5*sizeof(TCGArg));
207
-
208
if (args[0] == args[1] || args[0] == args[2]
209
|| (!const_args[3] && args[0] == args[3])
210
|| (!const_args[4] && args[0] == args[4])) {
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
212
label_true = gen_new_label();
213
label_over = gen_new_label();
214
215
- new_args[5] = label_arg(label_true);
216
- tcg_out_brcond2(s, new_args, const_args+1, 1);
217
+ tcg_out_brcond2(s, args[5], args[1], args[2], args[3], const_args[3],
218
+ args[4], const_args[4], label_true, true);
219
220
tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
221
tcg_out_jxx(s, JCC_JMP, label_over, 1);
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
223
tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
224
225
label_over = gen_new_label();
226
- new_args[4] = tcg_invert_cond(new_args[4]);
227
- new_args[5] = label_arg(label_over);
228
- tcg_out_brcond2(s, new_args, const_args+1, 1);
229
+ tcg_out_brcond2(s, tcg_invert_cond(args[5]), args[1], args[2],
230
+ args[3], const_args[3],
231
+ args[4], const_args[4], label_over, true);
232
+
233
234
tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
235
tcg_out_label(s, label_over);
236
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
237
break;
238
239
#if TCG_TARGET_REG_BITS == 32
240
- case INDEX_op_brcond2_i32:
241
- tcg_out_brcond2(s, args, const_args, 0);
242
- break;
243
case INDEX_op_setcond2_i32:
244
tcg_out_setcond2(s, args, const_args);
245
break;
246
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
247
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
248
return C_O0_I3(L, L, L);
249
250
- case INDEX_op_brcond2_i32:
251
- return C_O0_I4(r, r, ri, ri);
252
-
253
case INDEX_op_setcond2_i32:
254
return C_O1_I4(r, r, r, ri, ri);
255
256
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
257
index XXXXXXX..XXXXXXX 100644
258
--- a/tcg/mips/tcg-target.c.inc
259
+++ b/tcg/mips/tcg-target.c.inc
260
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
261
tcg_out_setcond_end(s, ret, tmpflags);
262
}
263
264
-static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
265
- TCGReg bl, TCGReg bh, TCGLabel *l)
266
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
267
+ TCGArg bl, bool const_bl,
268
+ TCGArg bh, bool const_bh, TCGLabel *l)
269
{
270
int tmpflags = tcg_out_setcond2_int(s, cond, TCG_TMP0, al, ah, bl, bh);
271
TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
272
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
273
tcg_out_nop(s);
274
}
275
276
+#if TCG_TARGET_REG_BITS != 32
277
+__attribute__((unused))
278
+#endif
279
+static const TCGOutOpBrcond2 outop_brcond2 = {
280
+ .base.static_constraint = C_O0_I4(r, r, rz, rz),
281
+ .out = tgen_brcond2,
282
+};
283
+
284
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
285
TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
286
TCGArg v1, bool const_v1, TCGArg v2, bool const_v2)
287
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
288
}
289
break;
290
291
- case INDEX_op_brcond2_i32:
292
- tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
293
- break;
294
-
295
case INDEX_op_setcond2_i32:
296
tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
297
break;
298
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
299
return C_O2_I4(r, r, rz, rz, rN, rN);
300
case INDEX_op_setcond2_i32:
301
return C_O1_I4(r, rz, rz, rz, rz);
302
- case INDEX_op_brcond2_i32:
303
- return C_O0_I4(rz, rz, rz, rz);
304
305
case INDEX_op_qemu_ld_i32:
306
return C_O1_I1(r, r);
307
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
308
index XXXXXXX..XXXXXXX 100644
309
--- a/tcg/ppc/tcg-target.c.inc
310
+++ b/tcg/ppc/tcg-target.c.inc
311
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
312
tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
313
}
314
315
-static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
316
- const int *const_args)
317
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
318
+ TCGArg bl, bool const_bl,
319
+ TCGArg bh, bool const_bh, TCGLabel *l)
320
{
321
- tcg_out_cmp2(s, args[4], args[0], args[1], args[2], const_args[2],
322
- args[3], const_args[3]);
323
- tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
324
+ assert(TCG_TARGET_REG_BITS == 32);
325
+ tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
326
+ tcg_out_bc_lab(s, TCG_COND_EQ, l);
327
}
328
329
+#if TCG_TARGET_REG_BITS != 32
330
+__attribute__((unused))
331
+#endif
332
+static const TCGOutOpBrcond2 outop_brcond2 = {
333
+ .base.static_constraint = C_O0_I4(r, r, rU, rC),
334
+ .out = tgen_brcond2,
335
+};
336
+
337
static void tcg_out_mb(TCGContext *s, TCGArg a0)
338
{
339
uint32_t insn;
340
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
341
tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
342
break;
343
344
- case INDEX_op_brcond2_i32:
345
- tcg_out_brcond2(s, args, const_args);
346
- break;
347
-
348
case INDEX_op_qemu_ld_i32:
349
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
350
break;
351
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
352
case INDEX_op_deposit_i32:
353
case INDEX_op_deposit_i64:
354
return C_O1_I2(r, 0, rZ);
355
- case INDEX_op_brcond2_i32:
356
- return C_O0_I4(r, r, rU, rC);
357
case INDEX_op_setcond2_i32:
358
return C_O1_I4(r, r, r, rU, rC);
359
case INDEX_op_add2_i64:
360
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/tci/tcg-target.c.inc
363
+++ b/tcg/tci/tcg-target.c.inc
364
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
365
case INDEX_op_sub2_i64:
366
return C_O2_I4(r, r, r, r, r, r);
367
368
-#if TCG_TARGET_REG_BITS == 32
369
- case INDEX_op_brcond2_i32:
370
- return C_O0_I4(r, r, r, r);
371
-#endif
372
-
373
case INDEX_op_setcond2_i32:
374
return C_O1_I4(r, r, r, r, r);
375
376
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
377
.out = tgen_movcond,
378
};
379
380
+static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
381
+ TCGArg bl, bool const_bl,
382
+ TCGArg bh, bool const_bh, TCGLabel *l)
383
+{
384
+ tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
385
+ al, ah, bl, bh, cond);
386
+ tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
387
+}
388
+
389
+#if TCG_TARGET_REG_BITS != 32
390
+__attribute__((unused))
391
+#endif
392
+static const TCGOutOpBrcond2 outop_brcond2 = {
393
+ .base.static_constraint = C_O0_I4(r, r, r, r),
394
+ .out = tgen_brcond2,
395
+};
396
+
397
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
398
const TCGArg args[TCG_MAX_OP_ARGS],
399
const int const_args[TCG_MAX_OP_ARGS])
400
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
401
args[3], args[4], args[5]);
402
break;
403
404
-#if TCG_TARGET_REG_BITS == 32
405
- case INDEX_op_brcond2_i32:
406
- tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
407
- args[0], args[1], args[2], args[3], args[4]);
408
- tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, arg_label(args[5]));
409
- break;
410
-#endif
411
-
412
case INDEX_op_qemu_ld_i64:
413
case INDEX_op_qemu_st_i64:
414
if (TCG_TARGET_REG_BITS == 32) {
415
--
416
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/mips/tcg-target-con-set.h | 2 +-
5
tcg/tcg.c | 19 ++++++++++
6
tcg/arm/tcg-target.c.inc | 25 ++++++------
7
tcg/i386/tcg-target.c.inc | 71 +++++++++++++++++------------------
8
tcg/mips/tcg-target.c.inc | 20 ++++++----
9
tcg/ppc/tcg-target.c.inc | 25 ++++++------
10
tcg/tci/tcg-target.c.inc | 24 ++++++++----
11
7 files changed, 110 insertions(+), 76 deletions(-)
12
1
13
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/mips/tcg-target-con-set.h
16
+++ b/tcg/mips/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rz)
18
C_O1_I2(r, r, rzW)
19
C_O1_I4(r, r, rz, rz, 0)
20
C_O1_I4(r, r, rz, rz, rz)
21
-C_O1_I4(r, rz, rz, rz, rz)
22
+C_O1_I4(r, r, r, rz, rz)
23
C_O2_I1(r, r, r)
24
C_O2_I2(r, r, r, r)
25
C_O2_I4(r, r, rz, rz, rN, rN)
26
diff --git a/tcg/tcg.c b/tcg/tcg.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/tcg.c
29
+++ b/tcg/tcg.c
30
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpSetcond {
31
TCGReg ret, TCGReg a1, tcg_target_long a2);
32
} TCGOutOpSetcond;
33
34
+typedef struct TCGOutOpSetcond2 {
35
+ TCGOutOp base;
36
+ void (*out)(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah,
37
+ TCGArg bl, bool const_bl, TCGArg bh, bool const_bh);
38
+} TCGOutOpSetcond2;
39
+
40
typedef struct TCGOutOpSubtract {
41
TCGOutOp base;
42
void (*out_rrr)(TCGContext *s, TCGType type,
43
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
44
45
#if TCG_TARGET_REG_BITS == 32
46
OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
47
+ OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
48
#endif
49
};
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
52
new_args[3], const_args[3], label);
53
}
54
break;
55
+ case INDEX_op_setcond2_i32:
56
+ {
57
+ const TCGOutOpSetcond2 *out = &outop_setcond2;
58
+ TCGCond cond = new_args[5];
59
+
60
+ tcg_debug_assert(!const_args[1]);
61
+ tcg_debug_assert(!const_args[2]);
62
+ out->out(s, cond, new_args[0], new_args[1], new_args[2],
63
+ new_args[3], const_args[3], new_args[4], const_args[4]);
64
+ }
65
+ break;
66
#else
67
case INDEX_op_brcond2_i32:
68
+ case INDEX_op_setcond2_i32:
69
g_assert_not_reached();
70
#endif
71
72
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
73
index XXXXXXX..XXXXXXX 100644
74
--- a/tcg/arm/tcg-target.c.inc
75
+++ b/tcg/arm/tcg-target.c.inc
76
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond2 outop_brcond2 = {
77
.out = tgen_brcond2,
78
};
79
80
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
81
+ TCGReg al, TCGReg ah,
82
+ TCGArg bl, bool const_bl,
83
+ TCGArg bh, bool const_bh)
84
+{
85
+ cond = tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
86
+ finish_setcond(s, cond, ret, false);
87
+}
88
+
89
+static const TCGOutOpSetcond2 outop_setcond2 = {
90
+ .base.static_constraint = C_O1_I4(r, r, r, rI, rI),
91
+ .out = tgen_setcond2,
92
+};
93
94
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
95
const TCGArg args[TCG_MAX_OP_ARGS],
96
const int const_args[TCG_MAX_OP_ARGS])
97
{
98
TCGArg a0, a1, a2, a3, a4, a5;
99
- int c;
100
101
switch (opc) {
102
case INDEX_op_goto_ptr:
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
104
tcg_out_mov_reg(s, COND_AL, args[0], a0);
105
break;
106
107
- case INDEX_op_setcond2_i32:
108
- c = tcg_out_cmp2(s, args[5], args[1], args[2], args[3], const_args[3],
109
- args[4], const_args[4]);
110
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
111
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
112
- ARITH_MOV, args[0], 0, 0);
113
- break;
114
-
115
case INDEX_op_qemu_ld_i32:
116
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
117
break;
118
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
119
return C_O2_I4(r, r, r, r, rIN, rIK);
120
case INDEX_op_sub2_i32:
121
return C_O2_I4(r, r, rI, rI, rIN, rIK);
122
- case INDEX_op_setcond2_i32:
123
- return C_O1_I4(r, r, r, rI, rI);
124
-
125
case INDEX_op_qemu_ld_i32:
126
return C_O1_I1(r, q);
127
case INDEX_op_qemu_ld_i64:
128
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/i386/tcg-target.c.inc
131
+++ b/tcg/i386/tcg-target.c.inc
132
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
133
.out_rri = tgen_negsetcondi,
134
};
135
136
-#if TCG_TARGET_REG_BITS == 32
137
-static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
138
- const int *const_args)
139
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
140
+ TCGReg al, TCGReg ah,
141
+ TCGArg bl, bool const_bl,
142
+ TCGArg bh, bool const_bh)
143
{
144
- TCGLabel *label_true, *label_over;
145
+ TCGLabel *label_over = gen_new_label();
146
147
- if (args[0] == args[1] || args[0] == args[2]
148
- || (!const_args[3] && args[0] == args[3])
149
- || (!const_args[4] && args[0] == args[4])) {
150
- /* When the destination overlaps with one of the argument
151
- registers, don't do anything tricky. */
152
- label_true = gen_new_label();
153
- label_over = gen_new_label();
154
+ if (ret == al || ret == ah
155
+ || (!const_bl && ret == bl)
156
+ || (!const_bh && ret == bh)) {
157
+ /*
158
+ * When the destination overlaps with one of the argument
159
+ * registers, don't do anything tricky.
160
+ */
161
+ TCGLabel *label_true = gen_new_label();
162
163
- tcg_out_brcond2(s, args[5], args[1], args[2], args[3], const_args[3],
164
- args[4], const_args[4], label_true, true);
165
+ tcg_out_brcond2(s, cond, al, ah, bl, const_bl,
166
+ bh, const_bh, label_true, true);
167
168
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
169
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 0);
170
tcg_out_jxx(s, JCC_JMP, label_over, 1);
171
tcg_out_label(s, label_true);
172
173
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
174
- tcg_out_label(s, label_over);
175
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
176
} else {
177
- /* When the destination does not overlap one of the arguments,
178
- clear the destination first, jump if cond false, and emit an
179
- increment in the true case. This results in smaller code. */
180
+ /*
181
+ * When the destination does not overlap one of the arguments,
182
+ * clear the destination first, jump if cond false, and emit an
183
+ * increment in the true case. This results in smaller code.
184
+ */
185
+ tcg_out_movi(s, TCG_TYPE_I32, ret, 0);
186
187
- tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
188
+ tcg_out_brcond2(s, tcg_invert_cond(cond), al, ah, bl, const_bl,
189
+ bh, const_bh, label_over, true);
190
191
- label_over = gen_new_label();
192
- tcg_out_brcond2(s, tcg_invert_cond(args[5]), args[1], args[2],
193
- args[3], const_args[3],
194
- args[4], const_args[4], label_over, true);
195
-
196
-
197
- tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
198
- tcg_out_label(s, label_over);
199
+ tgen_arithi(s, ARITH_ADD, ret, 1, 0);
200
}
201
+ tcg_out_label(s, label_over);
202
}
203
+
204
+#if TCG_TARGET_REG_BITS != 32
205
+__attribute__((unused))
206
#endif
207
+static const TCGOutOpSetcond2 outop_setcond2 = {
208
+ .base.static_constraint = C_O1_I4(r, r, r, ri, ri),
209
+ .out = tgen_setcond2,
210
+};
211
212
static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
213
TCGReg dest, TCGReg v1)
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
215
}
216
break;
217
218
-#if TCG_TARGET_REG_BITS == 32
219
- case INDEX_op_setcond2_i32:
220
- tcg_out_setcond2(s, args, const_args);
221
- break;
222
-#else /* TCG_TARGET_REG_BITS == 64 */
223
+#if TCG_TARGET_REG_BITS == 64
224
case INDEX_op_ld32s_i64:
225
tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
226
break;
227
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
228
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
229
return C_O0_I3(L, L, L);
230
231
- case INDEX_op_setcond2_i32:
232
- return C_O1_I4(r, r, r, ri, ri);
233
-
234
case INDEX_op_ld_vec:
235
case INDEX_op_dupm_vec:
236
return C_O1_I1(x, r);
237
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/mips/tcg-target.c.inc
240
+++ b/tcg/mips/tcg-target.c.inc
241
@@ -XXX,XX +XXX,XX @@ static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
242
return ret | flags;
243
}
244
245
-static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
246
- TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
247
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
248
+ TCGReg al, TCGReg ah,
249
+ TCGArg bl, bool const_bl,
250
+ TCGArg bh, bool const_bh)
251
{
252
int tmpflags = tcg_out_setcond2_int(s, cond, ret, al, ah, bl, bh);
253
tcg_out_setcond_end(s, ret, tmpflags);
254
}
255
256
+#if TCG_TARGET_REG_BITS != 32
257
+__attribute__((unused))
258
+#endif
259
+static const TCGOutOpSetcond2 outop_setcond2 = {
260
+ .base.static_constraint = C_O1_I4(r, r, r, rz, rz),
261
+ .out = tgen_setcond2,
262
+};
263
+
264
static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
265
TCGArg bl, bool const_bl,
266
TCGArg bh, bool const_bh, TCGLabel *l)
267
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
268
}
269
break;
270
271
- case INDEX_op_setcond2_i32:
272
- tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
273
- break;
274
-
275
case INDEX_op_qemu_ld_i32:
276
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
277
break;
278
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
279
case INDEX_op_add2_i32:
280
case INDEX_op_sub2_i32:
281
return C_O2_I4(r, r, rz, rz, rN, rN);
282
- case INDEX_op_setcond2_i32:
283
- return C_O1_I4(r, rz, rz, rz, rz);
284
285
case INDEX_op_qemu_ld_i32:
286
return C_O1_I1(r, r);
287
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
288
index XXXXXXX..XXXXXXX 100644
289
--- a/tcg/ppc/tcg-target.c.inc
290
+++ b/tcg/ppc/tcg-target.c.inc
291
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
292
}
293
}
294
295
-static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
296
- const int *const_args)
297
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
298
+ TCGReg al, TCGReg ah,
299
+ TCGArg bl, bool const_bl,
300
+ TCGArg bh, bool const_bh)
301
{
302
- tcg_out_cmp2(s, args[5], args[1], args[2], args[3], const_args[3],
303
- args[4], const_args[4]);
304
+ tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh);
305
tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
306
- tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
307
+ tcg_out_rlw(s, RLWINM, ret, TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
308
}
309
310
+#if TCG_TARGET_REG_BITS != 32
311
+__attribute__((unused))
312
+#endif
313
+static const TCGOutOpSetcond2 outop_setcond2 = {
314
+ .base.static_constraint = C_O1_I4(r, r, r, rU, rC),
315
+ .out = tgen_setcond2,
316
+};
317
+
318
static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
319
TCGArg bl, bool const_bl,
320
TCGArg bh, bool const_bh, TCGLabel *l)
321
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
322
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
323
break;
324
325
- case INDEX_op_setcond2_i32:
326
- tcg_out_setcond2(s, args, const_args);
327
- break;
328
-
329
case INDEX_op_bswap16_i32:
330
case INDEX_op_bswap16_i64:
331
tcg_out_bswap16(s, args[0], args[1], args[2]);
332
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
333
case INDEX_op_deposit_i32:
334
case INDEX_op_deposit_i64:
335
return C_O1_I2(r, 0, rZ);
336
- case INDEX_op_setcond2_i32:
337
- return C_O1_I4(r, r, r, rU, rC);
338
case INDEX_op_add2_i64:
339
case INDEX_op_add2_i32:
340
return C_O2_I4(r, r, r, r, rI, rZM);
341
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
342
index XXXXXXX..XXXXXXX 100644
343
--- a/tcg/tci/tcg-target.c.inc
344
+++ b/tcg/tci/tcg-target.c.inc
345
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
346
case INDEX_op_sub2_i64:
347
return C_O2_I4(r, r, r, r, r, r);
348
349
- case INDEX_op_setcond2_i32:
350
- return C_O1_I4(r, r, r, r, r);
351
-
352
case INDEX_op_qemu_ld_i32:
353
return C_O1_I1(r, r);
354
case INDEX_op_qemu_ld_i64:
355
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond2 outop_brcond2 = {
356
.out = tgen_brcond2,
357
};
358
359
+static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
360
+ TCGReg al, TCGReg ah,
361
+ TCGArg bl, bool const_bl,
362
+ TCGArg bh, bool const_bh)
363
+{
364
+ tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond);
365
+}
366
+
367
+#if TCG_TARGET_REG_BITS != 32
368
+__attribute__((unused))
369
+#endif
370
+static const TCGOutOpSetcond2 outop_setcond2 = {
371
+ .base.static_constraint = C_O1_I4(r, r, r, r, r),
372
+ .out = tgen_setcond2,
373
+};
374
+
375
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
376
const TCGArg args[TCG_MAX_OP_ARGS],
377
const int const_args[TCG_MAX_OP_ARGS])
378
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
379
tcg_out_op_l(s, opc, arg_label(args[0]));
380
break;
381
382
- case INDEX_op_setcond2_i32:
383
- tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
384
- args[3], args[4], args[5]);
385
- break;
386
-
387
CASE_32_64(ld8u)
388
CASE_32_64(ld8s)
389
CASE_32_64(ld16u)
390
--
391
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 -
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 -
7
tcg/loongarch64/tcg-target-has.h | 2 -
8
tcg/mips/tcg-target-has.h | 2 -
9
tcg/ppc/tcg-target-has.h | 2 -
10
tcg/riscv/tcg-target-has.h | 2 -
11
tcg/s390x/tcg-target-has.h | 2 -
12
tcg/sparc64/tcg-target-has.h | 2 -
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 -
15
tcg/tcg-op.c | 4 +-
16
tcg/tcg.c | 23 ++++++++--
17
tcg/tci.c | 2 -
18
tcg/aarch64/tcg-target.c.inc | 30 +++++++------
19
tcg/arm/tcg-target.c.inc | 42 +++++++++---------
20
tcg/i386/tcg-target.c.inc | 48 +++++++++++---------
21
tcg/loongarch64/tcg-target.c.inc | 28 +++++++-----
22
tcg/mips/tcg-target.c.inc | 74 +++++++++++++++----------------
23
tcg/ppc/tcg-target.c.inc | 76 ++++++++++++++++----------------
24
tcg/riscv/tcg-target.c.inc | 33 +++++++++-----
25
tcg/s390x/tcg-target.c.inc | 40 ++++++++---------
26
tcg/sparc64/tcg-target.c.inc | 4 ++
27
tcg/tci/tcg-target.c.inc | 21 ++++++---
28
24 files changed, 235 insertions(+), 210 deletions(-)
29
1
30
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target-has.h
33
+++ b/tcg/aarch64/tcg-target-has.h
34
@@ -XXX,XX +XXX,XX @@
35
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
36
37
/* optional instructions */
38
-#define TCG_TARGET_HAS_bswap16_i32 1
39
#define TCG_TARGET_HAS_bswap32_i32 1
40
#define TCG_TARGET_HAS_extract2_i32 1
41
#define TCG_TARGET_HAS_add2_i32 1
42
@@ -XXX,XX +XXX,XX @@
43
#define TCG_TARGET_HAS_extr_i64_i32 0
44
#define TCG_TARGET_HAS_qemu_st8_i32 0
45
46
-#define TCG_TARGET_HAS_bswap16_i64 1
47
#define TCG_TARGET_HAS_bswap32_i64 1
48
#define TCG_TARGET_HAS_bswap64_i64 1
49
#define TCG_TARGET_HAS_extract2_i64 1
50
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target-has.h
53
+++ b/tcg/arm/tcg-target-has.h
54
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
55
#endif
56
57
/* optional instructions */
58
-#define TCG_TARGET_HAS_bswap16_i32 1
59
#define TCG_TARGET_HAS_bswap32_i32 1
60
#define TCG_TARGET_HAS_extract2_i32 1
61
#define TCG_TARGET_HAS_qemu_st8_i32 0
62
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target-has.h
65
+++ b/tcg/i386/tcg-target-has.h
66
@@ -XXX,XX +XXX,XX @@
67
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
68
69
/* optional instructions */
70
-#define TCG_TARGET_HAS_bswap16_i32 1
71
#define TCG_TARGET_HAS_bswap32_i32 1
72
#define TCG_TARGET_HAS_extract2_i32 1
73
#define TCG_TARGET_HAS_add2_i32 1
74
@@ -XXX,XX +XXX,XX @@
75
#if TCG_TARGET_REG_BITS == 64
76
/* Keep 32-bit values zero-extended in a register. */
77
#define TCG_TARGET_HAS_extr_i64_i32 1
78
-#define TCG_TARGET_HAS_bswap16_i64 1
79
#define TCG_TARGET_HAS_bswap32_i64 1
80
#define TCG_TARGET_HAS_bswap64_i64 1
81
#define TCG_TARGET_HAS_extract2_i64 1
82
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target-has.h
85
+++ b/tcg/loongarch64/tcg-target-has.h
86
@@ -XXX,XX +XXX,XX @@
87
#define TCG_TARGET_HAS_extract2_i32 0
88
#define TCG_TARGET_HAS_add2_i32 0
89
#define TCG_TARGET_HAS_sub2_i32 0
90
-#define TCG_TARGET_HAS_bswap16_i32 1
91
#define TCG_TARGET_HAS_bswap32_i32 1
92
#define TCG_TARGET_HAS_qemu_st8_i32 0
93
94
/* 64-bit operations */
95
#define TCG_TARGET_HAS_extract2_i64 0
96
#define TCG_TARGET_HAS_extr_i64_i32 1
97
-#define TCG_TARGET_HAS_bswap16_i64 1
98
#define TCG_TARGET_HAS_bswap32_i64 1
99
#define TCG_TARGET_HAS_bswap64_i64 1
100
#define TCG_TARGET_HAS_add2_i64 0
101
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/mips/tcg-target-has.h
104
+++ b/tcg/mips/tcg-target-has.h
105
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
106
#endif
107
108
/* optional instructions */
109
-#define TCG_TARGET_HAS_bswap16_i32 1
110
#define TCG_TARGET_HAS_bswap32_i32 1
111
112
#if TCG_TARGET_REG_BITS == 64
113
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
114
#define TCG_TARGET_HAS_qemu_st8_i32 0
115
116
#if TCG_TARGET_REG_BITS == 64
117
-#define TCG_TARGET_HAS_bswap16_i64 1
118
#define TCG_TARGET_HAS_bswap32_i64 1
119
#define TCG_TARGET_HAS_bswap64_i64 1
120
#define TCG_TARGET_HAS_extract2_i64 0
121
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/ppc/tcg-target-has.h
124
+++ b/tcg/ppc/tcg-target-has.h
125
@@ -XXX,XX +XXX,XX @@
126
#define have_vsx (cpuinfo & CPUINFO_VSX)
127
128
/* optional instructions */
129
-#define TCG_TARGET_HAS_bswap16_i32 1
130
#define TCG_TARGET_HAS_bswap32_i32 1
131
#define TCG_TARGET_HAS_extract2_i32 0
132
#define TCG_TARGET_HAS_qemu_st8_i32 0
133
@@ -XXX,XX +XXX,XX @@
134
#define TCG_TARGET_HAS_add2_i32 0
135
#define TCG_TARGET_HAS_sub2_i32 0
136
#define TCG_TARGET_HAS_extr_i64_i32 0
137
-#define TCG_TARGET_HAS_bswap16_i64 1
138
#define TCG_TARGET_HAS_bswap32_i64 1
139
#define TCG_TARGET_HAS_bswap64_i64 1
140
#define TCG_TARGET_HAS_extract2_i64 0
141
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
142
index XXXXXXX..XXXXXXX 100644
143
--- a/tcg/riscv/tcg-target-has.h
144
+++ b/tcg/riscv/tcg-target-has.h
145
@@ -XXX,XX +XXX,XX @@
146
#define TCG_TARGET_HAS_extract2_i32 0
147
#define TCG_TARGET_HAS_add2_i32 1
148
#define TCG_TARGET_HAS_sub2_i32 1
149
-#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
150
#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
151
#define TCG_TARGET_HAS_qemu_st8_i32 0
152
153
#define TCG_TARGET_HAS_extract2_i64 0
154
#define TCG_TARGET_HAS_extr_i64_i32 1
155
-#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
156
#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
157
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
158
#define TCG_TARGET_HAS_add2_i64 1
159
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/s390x/tcg-target-has.h
162
+++ b/tcg/s390x/tcg-target-has.h
163
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
164
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
165
166
/* optional instructions */
167
-#define TCG_TARGET_HAS_bswap16_i32 1
168
#define TCG_TARGET_HAS_bswap32_i32 1
169
#define TCG_TARGET_HAS_extract2_i32 0
170
#define TCG_TARGET_HAS_add2_i32 1
171
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
172
#define TCG_TARGET_HAS_extr_i64_i32 0
173
#define TCG_TARGET_HAS_qemu_st8_i32 0
174
175
-#define TCG_TARGET_HAS_bswap16_i64 1
176
#define TCG_TARGET_HAS_bswap32_i64 1
177
#define TCG_TARGET_HAS_bswap64_i64 1
178
#define TCG_TARGET_HAS_extract2_i64 0
179
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/sparc64/tcg-target-has.h
182
+++ b/tcg/sparc64/tcg-target-has.h
183
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
184
#endif
185
186
/* optional instructions */
187
-#define TCG_TARGET_HAS_bswap16_i32 0
188
#define TCG_TARGET_HAS_bswap32_i32 0
189
#define TCG_TARGET_HAS_extract2_i32 0
190
#define TCG_TARGET_HAS_add2_i32 1
191
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
192
#define TCG_TARGET_HAS_qemu_st8_i32 0
193
194
#define TCG_TARGET_HAS_extr_i64_i32 0
195
-#define TCG_TARGET_HAS_bswap16_i64 0
196
#define TCG_TARGET_HAS_bswap32_i64 0
197
#define TCG_TARGET_HAS_bswap64_i64 0
198
#define TCG_TARGET_HAS_extract2_i64 0
199
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/tcg/tcg-has.h
202
+++ b/tcg/tcg-has.h
203
@@ -XXX,XX +XXX,XX @@
204
#if TCG_TARGET_REG_BITS == 32
205
/* Turn some undef macros into false macros. */
206
#define TCG_TARGET_HAS_extr_i64_i32 0
207
-#define TCG_TARGET_HAS_bswap16_i64 0
208
#define TCG_TARGET_HAS_bswap32_i64 0
209
#define TCG_TARGET_HAS_bswap64_i64 0
210
#define TCG_TARGET_HAS_extract2_i64 0
211
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/tci/tcg-target-has.h
214
+++ b/tcg/tci/tcg-target-has.h
215
@@ -XXX,XX +XXX,XX @@
216
#ifndef TCG_TARGET_HAS_H
217
#define TCG_TARGET_HAS_H
218
219
-#define TCG_TARGET_HAS_bswap16_i32 1
220
#define TCG_TARGET_HAS_bswap32_i32 1
221
#define TCG_TARGET_HAS_extract2_i32 0
222
#define TCG_TARGET_HAS_qemu_st8_i32 0
223
224
#if TCG_TARGET_REG_BITS == 64
225
#define TCG_TARGET_HAS_extr_i64_i32 0
226
-#define TCG_TARGET_HAS_bswap16_i64 1
227
#define TCG_TARGET_HAS_bswap32_i64 1
228
#define TCG_TARGET_HAS_bswap64_i64 1
229
#define TCG_TARGET_HAS_extract2_i64 0
230
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
231
index XXXXXXX..XXXXXXX 100644
232
--- a/tcg/tcg-op.c
233
+++ b/tcg/tcg-op.c
234
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
235
/* Only one extension flag may be present. */
236
tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
237
238
- if (TCG_TARGET_HAS_bswap16_i32) {
239
+ if (tcg_op_supported(INDEX_op_bswap16_i32, TCG_TYPE_I32, 0)) {
240
tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
241
} else {
242
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
243
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
244
} else {
245
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
246
}
247
- } else if (TCG_TARGET_HAS_bswap16_i64) {
248
+ } else if (tcg_op_supported(INDEX_op_bswap16_i64, TCG_TYPE_I64, 0)) {
249
tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
250
} else {
251
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
252
diff --git a/tcg/tcg.c b/tcg/tcg.c
253
index XXXXXXX..XXXXXXX 100644
254
--- a/tcg/tcg.c
255
+++ b/tcg/tcg.c
256
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBrcond2 {
257
TCGArg bh, bool const_bh, TCGLabel *l);
258
} TCGOutOpBrcond2;
259
260
+typedef struct TCGOutOpBswap {
261
+ TCGOutOp base;
262
+ void (*out_rr)(TCGContext *s, TCGType type,
263
+ TCGReg a0, TCGReg a1, unsigned flags);
264
+} TCGOutOpBswap;
265
+
266
typedef struct TCGOutOpDivRem {
267
TCGOutOp base;
268
void (*out_rr01r)(TCGContext *s, TCGType type,
269
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
270
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
271
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
272
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
273
+ OUTOP(INDEX_op_bswap16_i32, TCGOutOpBswap, outop_bswap16),
274
+ OUTOP(INDEX_op_bswap16_i64, TCGOutOpBswap, outop_bswap16),
275
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
276
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
277
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
278
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
279
return TCG_TARGET_HAS_add2_i32;
280
case INDEX_op_sub2_i32:
281
return TCG_TARGET_HAS_sub2_i32;
282
- case INDEX_op_bswap16_i32:
283
- return TCG_TARGET_HAS_bswap16_i32;
284
case INDEX_op_bswap32_i32:
285
return TCG_TARGET_HAS_bswap32_i32;
286
287
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
288
case INDEX_op_extrl_i64_i32:
289
case INDEX_op_extrh_i64_i32:
290
return TCG_TARGET_HAS_extr_i64_i32;
291
- case INDEX_op_bswap16_i64:
292
- return TCG_TARGET_HAS_bswap16_i64;
293
case INDEX_op_bswap32_i64:
294
return TCG_TARGET_HAS_bswap32_i64;
295
case INDEX_op_bswap64_i64:
296
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
297
}
298
break;
299
300
+ case INDEX_op_bswap16_i32:
301
+ case INDEX_op_bswap16_i64:
302
+ {
303
+ const TCGOutOpBswap *out =
304
+ container_of(all_outop[op->opc], TCGOutOpBswap, base);
305
+
306
+ tcg_debug_assert(!const_args[1]);
307
+ out->out_rr(s, type, new_args[0], new_args[1], new_args[2]);
308
+ }
309
+ break;
310
+
311
case INDEX_op_divs2:
312
case INDEX_op_divu2:
313
{
314
diff --git a/tcg/tci.c b/tcg/tci.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/tcg/tci.c
317
+++ b/tcg/tci.c
318
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
319
tci_write_reg64(regs, r1, r0, T1 - T2);
320
break;
321
#endif
322
-#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
323
CASE_32_64(bswap16)
324
tci_args_rr(insn, &r0, &r1);
325
regs[r0] = bswap16(regs[r1]);
326
break;
327
-#endif
328
#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
329
CASE_32_64(bswap32)
330
tci_args_rr(insn, &r0, &r1);
331
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
332
index XXXXXXX..XXXXXXX 100644
333
--- a/tcg/aarch64/tcg-target.c.inc
334
+++ b/tcg/aarch64/tcg-target.c.inc
335
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
336
.out_rri = tgen_xori,
337
};
338
339
+static void tgen_bswap16(TCGContext *s, TCGType type,
340
+ TCGReg a0, TCGReg a1, unsigned flags)
341
+{
342
+ tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
343
+ if (flags & TCG_BSWAP_OS) {
344
+ /* Output must be sign-extended. */
345
+ tcg_out_ext16s(s, type, a0, a0);
346
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
347
+ /* Output must be zero-extended, but input isn't. */
348
+ tcg_out_ext16u(s, a0, a0);
349
+ }
350
+}
351
+
352
+static const TCGOutOpBswap outop_bswap16 = {
353
+ .base.static_constraint = C_O1_I1(r, r),
354
+ .out_rr = tgen_bswap16,
355
+};
356
357
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
358
{
359
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
360
case INDEX_op_bswap32_i32:
361
tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
362
break;
363
- case INDEX_op_bswap16_i64:
364
- case INDEX_op_bswap16_i32:
365
- tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
366
- if (a2 & TCG_BSWAP_OS) {
367
- /* Output must be sign-extended. */
368
- tcg_out_ext16s(s, ext, a0, a0);
369
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
370
- /* Output must be zero-extended, but input isn't. */
371
- tcg_out_ext16u(s, a0, a0);
372
- }
373
- break;
374
375
case INDEX_op_deposit_i64:
376
case INDEX_op_deposit_i32:
377
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
378
case INDEX_op_ld32u_i64:
379
case INDEX_op_ld32s_i64:
380
case INDEX_op_ld_i64:
381
- case INDEX_op_bswap16_i32:
382
case INDEX_op_bswap32_i32:
383
- case INDEX_op_bswap16_i64:
384
case INDEX_op_bswap32_i64:
385
case INDEX_op_bswap64_i64:
386
case INDEX_op_ext_i32_i64:
387
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
388
index XXXXXXX..XXXXXXX 100644
389
--- a/tcg/arm/tcg-target.c.inc
390
+++ b/tcg/arm/tcg-target.c.inc
391
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
392
g_assert_not_reached();
393
}
394
395
-static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
396
- TCGReg rd, TCGReg rn, int flags)
397
-{
398
- if (flags & TCG_BSWAP_OS) {
399
- /* revsh */
400
- tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
401
- return;
402
- }
403
-
404
- /* rev16 */
405
- tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
406
- if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
407
- /* uxth */
408
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
409
- }
410
-}
411
-
412
static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
413
{
414
/* rev */
415
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
416
.out_rri = tgen_xori,
417
};
418
419
+static void tgen_bswap16(TCGContext *s, TCGType type,
420
+ TCGReg rd, TCGReg rn, unsigned flags)
421
+{
422
+ if (flags & TCG_BSWAP_OS) {
423
+ /* revsh */
424
+ tcg_out32(s, 0x06ff0fb0 | (COND_AL << 28) | (rd << 12) | rn);
425
+ return;
426
+ }
427
+
428
+ /* rev16 */
429
+ tcg_out32(s, 0x06bf0fb0 | (COND_AL << 28) | (rd << 12) | rn);
430
+ if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
431
+ tcg_out_ext16u(s, rd, rd);
432
+ }
433
+}
434
+
435
+static const TCGOutOpBswap outop_bswap16 = {
436
+ .base.static_constraint = C_O1_I1(r, r),
437
+ .out_rr = tgen_bswap16,
438
+};
439
+
440
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
441
{
442
tgen_subfi(s, type, a0, 0, a1);
443
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
444
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
445
break;
446
447
- case INDEX_op_bswap16_i32:
448
- tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
449
- break;
450
case INDEX_op_bswap32_i32:
451
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
452
break;
453
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
454
case INDEX_op_ld16u_i32:
455
case INDEX_op_ld16s_i32:
456
case INDEX_op_ld_i32:
457
- case INDEX_op_bswap16_i32:
458
case INDEX_op_bswap32_i32:
459
case INDEX_op_extract_i32:
460
case INDEX_op_sextract_i32:
461
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
462
index XXXXXXX..XXXXXXX 100644
463
--- a/tcg/i386/tcg-target.c.inc
464
+++ b/tcg/i386/tcg-target.c.inc
465
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
466
.out_rri = tgen_xori,
467
};
468
469
+static void tgen_bswap16(TCGContext *s, TCGType type,
470
+ TCGReg a0, TCGReg a1, unsigned flags)
471
+{
472
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
473
+
474
+ if (flags & TCG_BSWAP_OS) {
475
+ /* Output must be sign-extended. */
476
+ if (rexw) {
477
+ tcg_out_bswap64(s, a0);
478
+ tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
479
+ } else {
480
+ tcg_out_bswap32(s, a0);
481
+ tcg_out_shifti(s, SHIFT_SAR, a0, 16);
482
+ }
483
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
484
+ /* Output must be zero-extended, but input isn't. */
485
+ tcg_out_bswap32(s, a0);
486
+ tcg_out_shifti(s, SHIFT_SHR, a0, 16);
487
+ } else {
488
+ tcg_out_rolw_8(s, a0);
489
+ }
490
+}
491
+
492
+static const TCGOutOpBswap outop_bswap16 = {
493
+ .base.static_constraint = C_O1_I1(r, 0),
494
+ .out_rr = tgen_bswap16,
495
+};
496
+
497
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
498
{
499
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
500
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
501
}
502
break;
503
504
- OP_32_64(bswap16):
505
- if (a2 & TCG_BSWAP_OS) {
506
- /* Output must be sign-extended. */
507
- if (rexw) {
508
- tcg_out_bswap64(s, a0);
509
- tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
510
- } else {
511
- tcg_out_bswap32(s, a0);
512
- tcg_out_shifti(s, SHIFT_SAR, a0, 16);
513
- }
514
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
515
- /* Output must be zero-extended, but input isn't. */
516
- tcg_out_bswap32(s, a0);
517
- tcg_out_shifti(s, SHIFT_SHR, a0, 16);
518
- } else {
519
- tcg_out_rolw_8(s, a0);
520
- }
521
- break;
522
OP_32_64(bswap32):
523
tcg_out_bswap32(s, a0);
524
if (rexw && (a2 & TCG_BSWAP_OS)) {
525
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
526
case INDEX_op_st_i64:
527
return C_O0_I2(re, r);
528
529
- case INDEX_op_bswap16_i32:
530
- case INDEX_op_bswap16_i64:
531
case INDEX_op_bswap32_i32:
532
case INDEX_op_bswap32_i64:
533
case INDEX_op_bswap64_i64:
534
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
535
index XXXXXXX..XXXXXXX 100644
536
--- a/tcg/loongarch64/tcg-target.c.inc
537
+++ b/tcg/loongarch64/tcg-target.c.inc
538
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
539
.out_rri = tgen_xori,
540
};
541
542
+static void tgen_bswap16(TCGContext *s, TCGType type,
543
+ TCGReg a0, TCGReg a1, unsigned flags)
544
+{
545
+ tcg_out_opc_revb_2h(s, a0, a1);
546
+ if (flags & TCG_BSWAP_OS) {
547
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
548
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
549
+ tcg_out_ext16u(s, a0, a0);
550
+ }
551
+}
552
+
553
+static const TCGOutOpBswap outop_bswap16 = {
554
+ .base.static_constraint = C_O1_I1(r, r),
555
+ .out_rr = tgen_bswap16,
556
+};
557
+
558
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
559
{
560
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
561
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
562
tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
563
break;
564
565
- case INDEX_op_bswap16_i32:
566
- case INDEX_op_bswap16_i64:
567
- tcg_out_opc_revb_2h(s, a0, a1);
568
- if (a2 & TCG_BSWAP_OS) {
569
- tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
570
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
571
- tcg_out_ext16u(s, a0, a0);
572
- }
573
- break;
574
-
575
case INDEX_op_bswap32_i32:
576
/* All 32-bit values are computed sign-extended in the register. */
577
a2 = TCG_BSWAP_OS;
578
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
579
case INDEX_op_extract_i64:
580
case INDEX_op_sextract_i32:
581
case INDEX_op_sextract_i64:
582
- case INDEX_op_bswap16_i32:
583
- case INDEX_op_bswap16_i64:
584
case INDEX_op_bswap32_i32:
585
case INDEX_op_bswap32_i64:
586
case INDEX_op_bswap64_i64:
587
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
588
index XXXXXXX..XXXXXXX 100644
589
--- a/tcg/mips/tcg-target.c.inc
590
+++ b/tcg/mips/tcg-target.c.inc
591
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
592
g_assert_not_reached();
593
}
594
595
-static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
596
-{
597
- /* ret and arg can't be register tmp0 */
598
- tcg_debug_assert(ret != TCG_TMP0);
599
- tcg_debug_assert(arg != TCG_TMP0);
600
-
601
- /* With arg = abcd: */
602
- if (use_mips32r2_instructions) {
603
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
604
- if (flags & TCG_BSWAP_OS) {
605
- tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
606
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
607
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
608
- }
609
- return;
610
- }
611
-
612
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
613
- if (!(flags & TCG_BSWAP_IZ)) {
614
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
615
- }
616
- if (flags & TCG_BSWAP_OS) {
617
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
618
- tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
619
- } else {
620
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
621
- if (flags & TCG_BSWAP_OZ) {
622
- tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
623
- }
624
- }
625
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
626
-}
627
-
628
static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
629
{
630
if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) {
631
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
632
.out_rri = tgen_xori,
633
};
634
635
+static void tgen_bswap16(TCGContext *s, TCGType type,
636
+ TCGReg ret, TCGReg arg, unsigned flags)
637
+{
638
+ /* With arg = abcd: */
639
+ if (use_mips32r2_instructions) {
640
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); /* badc */
641
+ if (flags & TCG_BSWAP_OS) {
642
+ tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); /* ssdc */
643
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
644
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xffff); /* 00dc */
645
+ }
646
+ return;
647
+ }
648
+
649
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); /* 0abc */
650
+ if (!(flags & TCG_BSWAP_IZ)) {
651
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0x00ff); /* 000c */
652
+ }
653
+ if (flags & TCG_BSWAP_OS) {
654
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); /* d000 */
655
+ tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); /* ssd0 */
656
+ } else {
657
+ tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); /* bcd0 */
658
+ if (flags & TCG_BSWAP_OZ) {
659
+ tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); /* 00d0 */
660
+ }
661
+ }
662
+ tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); /* ssdc */
663
+}
664
+
665
+static const TCGOutOpBswap outop_bswap16 = {
666
+ .base.static_constraint = C_O1_I1(r, r),
667
+ .out_rr = tgen_bswap16,
668
+};
669
+
670
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
671
{
672
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
673
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
674
tcg_out_ldst(s, i1, a0, a1, a2);
675
break;
676
677
- case INDEX_op_bswap16_i32:
678
- case INDEX_op_bswap16_i64:
679
- tcg_out_bswap16(s, a0, a1, a2);
680
- break;
681
case INDEX_op_bswap32_i32:
682
tcg_out_bswap32(s, a0, a1, 0);
683
break;
684
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
685
case INDEX_op_ld16u_i32:
686
case INDEX_op_ld16s_i32:
687
case INDEX_op_ld_i32:
688
- case INDEX_op_bswap16_i32:
689
case INDEX_op_bswap32_i32:
690
case INDEX_op_extract_i32:
691
case INDEX_op_sextract_i32:
692
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
693
case INDEX_op_ld32s_i64:
694
case INDEX_op_ld32u_i64:
695
case INDEX_op_ld_i64:
696
- case INDEX_op_bswap16_i64:
697
case INDEX_op_bswap32_i64:
698
case INDEX_op_bswap64_i64:
699
case INDEX_op_ext_i32_i64:
700
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
701
index XXXXXXX..XXXXXXX 100644
702
--- a/tcg/ppc/tcg-target.c.inc
703
+++ b/tcg/ppc/tcg-target.c.inc
704
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
705
tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
706
}
707
708
-static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
709
-{
710
- TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
711
-
712
- if (have_isa_3_10) {
713
- tcg_out32(s, BRH | RA(dst) | RS(src));
714
- if (flags & TCG_BSWAP_OS) {
715
- tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
716
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
717
- tcg_out_ext16u(s, dst, dst);
718
- }
719
- return;
720
- }
721
-
722
- /*
723
- * In the following,
724
- * dep(a, b, m) -> (a & ~m) | (b & m)
725
- *
726
- * Begin with: src = xxxxabcd
727
- */
728
- /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
729
- tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
730
- /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
731
- tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
732
-
733
- if (flags & TCG_BSWAP_OS) {
734
- tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
735
- } else {
736
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
737
- }
738
-}
739
-
740
static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
741
{
742
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
743
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
744
.out_rri = tgen_xori,
745
};
746
747
+static void tgen_bswap16(TCGContext *s, TCGType type,
748
+ TCGReg dst, TCGReg src, unsigned flags)
749
+{
750
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
751
+
752
+ if (have_isa_3_10) {
753
+ tcg_out32(s, BRH | RA(dst) | RS(src));
754
+ if (flags & TCG_BSWAP_OS) {
755
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
756
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
757
+ tcg_out_ext16u(s, dst, dst);
758
+ }
759
+ return;
760
+ }
761
+
762
+ /*
763
+ * In the following,
764
+ * dep(a, b, m) -> (a & ~m) | (b & m)
765
+ *
766
+ * Begin with: src = xxxxabcd
767
+ */
768
+ /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
769
+ tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
770
+ /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
771
+ tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
772
+
773
+ if (flags & TCG_BSWAP_OS) {
774
+ tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
775
+ } else {
776
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
777
+ }
778
+}
779
+
780
+static const TCGOutOpBswap outop_bswap16 = {
781
+ .base.static_constraint = C_O1_I1(r, r),
782
+ .out_rr = tgen_bswap16,
783
+};
784
+
785
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
786
{
787
tcg_out32(s, NEG | RT(a0) | RA(a1));
788
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
789
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
790
break;
791
792
- case INDEX_op_bswap16_i32:
793
- case INDEX_op_bswap16_i64:
794
- tcg_out_bswap16(s, args[0], args[1], args[2]);
795
- break;
796
case INDEX_op_bswap32_i32:
797
tcg_out_bswap32(s, args[0], args[1], 0);
798
break;
799
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
800
case INDEX_op_ld16u_i32:
801
case INDEX_op_ld16s_i32:
802
case INDEX_op_ld_i32:
803
- case INDEX_op_bswap16_i32:
804
case INDEX_op_bswap32_i32:
805
case INDEX_op_extract_i32:
806
case INDEX_op_sextract_i32:
807
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
808
case INDEX_op_ld_i64:
809
case INDEX_op_ext_i32_i64:
810
case INDEX_op_extu_i32_i64:
811
- case INDEX_op_bswap16_i64:
812
case INDEX_op_bswap32_i64:
813
case INDEX_op_bswap64_i64:
814
case INDEX_op_extract_i64:
815
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
816
index XXXXXXX..XXXXXXX 100644
817
--- a/tcg/riscv/tcg-target.c.inc
818
+++ b/tcg/riscv/tcg-target.c.inc
819
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
820
.out_rri = tgen_xori,
821
};
822
823
+static TCGConstraintSetIndex cset_bswap(TCGType type, unsigned flags)
824
+{
825
+ return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented;
826
+}
827
+
828
+static void tgen_bswap16(TCGContext *s, TCGType type,
829
+ TCGReg a0, TCGReg a1, unsigned flags)
830
+{
831
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
832
+ if (flags & TCG_BSWAP_OZ) {
833
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
834
+ } else {
835
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
836
+ }
837
+}
838
+
839
+static const TCGOutOpBswap outop_bswap16 = {
840
+ .base.static_constraint = C_Dynamic,
841
+ .base.dynamic_constraint = cset_bswap,
842
+ .out_rr = tgen_bswap16,
843
+};
844
+
845
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
846
{
847
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
848
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
849
tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
850
}
851
break;
852
- case INDEX_op_bswap16_i64:
853
- case INDEX_op_bswap16_i32:
854
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
855
- if (a2 & TCG_BSWAP_OZ) {
856
- tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
857
- } else {
858
- tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
859
- }
860
- break;
861
862
case INDEX_op_add2_i32:
863
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
864
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
865
case INDEX_op_extract_i64:
866
case INDEX_op_sextract_i32:
867
case INDEX_op_sextract_i64:
868
- case INDEX_op_bswap16_i32:
869
case INDEX_op_bswap32_i32:
870
- case INDEX_op_bswap16_i64:
871
case INDEX_op_bswap32_i64:
872
case INDEX_op_bswap64_i64:
873
return C_O1_I1(r, r);
874
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
875
index XXXXXXX..XXXXXXX 100644
876
--- a/tcg/s390x/tcg-target.c.inc
877
+++ b/tcg/s390x/tcg-target.c.inc
878
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
879
.out_rri = tgen_xori_3,
880
};
881
882
+static void tgen_bswap16(TCGContext *s, TCGType type,
883
+ TCGReg a0, TCGReg a1, unsigned flags)
884
+{
885
+ if (type == TCG_TYPE_I32) {
886
+ tcg_out_insn(s, RRE, LRVR, a0, a1);
887
+ tcg_out_sh32(s, (flags & TCG_BSWAP_OS ? RS_SRA : RS_SRL),
888
+ a0, TCG_REG_NONE, 16);
889
+ } else {
890
+ tcg_out_insn(s, RRE, LRVGR, a0, a1);
891
+ tcg_out_sh64(s, (flags & TCG_BSWAP_OS ? RSY_SRAG : RSY_SRLG),
892
+ a0, a0, TCG_REG_NONE, 48);
893
+ }
894
+}
895
+
896
+static const TCGOutOpBswap outop_bswap16 = {
897
+ .base.static_constraint = C_O1_I1(r, r),
898
+ .out_rr = tgen_bswap16,
899
+};
900
+
901
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
902
{
903
if (type == TCG_TYPE_I32) {
904
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
905
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
906
break;
907
908
- case INDEX_op_bswap16_i32:
909
- a0 = args[0], a1 = args[1], a2 = args[2];
910
- tcg_out_insn(s, RRE, LRVR, a0, a1);
911
- if (a2 & TCG_BSWAP_OS) {
912
- tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
913
- } else {
914
- tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
915
- }
916
- break;
917
- case INDEX_op_bswap16_i64:
918
- a0 = args[0], a1 = args[1], a2 = args[2];
919
- tcg_out_insn(s, RRE, LRVGR, a0, a1);
920
- if (a2 & TCG_BSWAP_OS) {
921
- tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
922
- } else {
923
- tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
924
- }
925
- break;
926
-
927
case INDEX_op_bswap32_i32:
928
tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
929
break;
930
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
931
case INDEX_op_st_i64:
932
return C_O0_I2(r, r);
933
934
- case INDEX_op_bswap16_i32:
935
- case INDEX_op_bswap16_i64:
936
case INDEX_op_bswap32_i32:
937
case INDEX_op_bswap32_i64:
938
case INDEX_op_bswap64_i64:
939
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
940
index XXXXXXX..XXXXXXX 100644
941
--- a/tcg/sparc64/tcg-target.c.inc
942
+++ b/tcg/sparc64/tcg-target.c.inc
943
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_xor = {
944
.out_rri = tgen_xori,
945
};
946
947
+static const TCGOutOpBswap outop_bswap16 = {
948
+ .base.static_constraint = C_NotImplemented,
949
+};
950
+
951
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
952
{
953
tgen_sub(s, type, a0, TCG_REG_G0, a1);
954
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
955
index XXXXXXX..XXXXXXX 100644
956
--- a/tcg/tci/tcg-target.c.inc
957
+++ b/tcg/tci/tcg-target.c.inc
958
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
959
case INDEX_op_ld_i64:
960
case INDEX_op_ext_i32_i64:
961
case INDEX_op_extu_i32_i64:
962
- case INDEX_op_bswap16_i32:
963
- case INDEX_op_bswap16_i64:
964
case INDEX_op_bswap32_i32:
965
case INDEX_op_bswap32_i64:
966
case INDEX_op_bswap64_i64:
967
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_ctpop = {
968
.out_rr = tgen_ctpop,
969
};
970
971
+static void tgen_bswap16(TCGContext *s, TCGType type,
972
+ TCGReg a0, TCGReg a1, unsigned flags)
973
+{
974
+ tcg_out_op_rr(s, INDEX_op_bswap16_i32, a0, a1);
975
+ if (flags & TCG_BSWAP_OS) {
976
+ tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
977
+ }
978
+}
979
+
980
+static const TCGOutOpBswap outop_bswap16 = {
981
+ .base.static_constraint = C_O1_I1(r, r),
982
+ .out_rr = tgen_bswap16,
983
+};
984
+
985
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
986
{
987
tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
988
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
989
tcg_out_op_rr(s, opc, args[0], args[1]);
990
break;
991
992
- case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
993
- case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
994
- width = 16;
995
- goto do_bswap;
996
case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
997
width = 32;
998
- do_bswap:
999
/* The base tci bswaps zero-extend, and ignore high bits. */
1000
tcg_out_op_rr(s, opc, args[0], args[1]);
1001
if (args[2] & TCG_BSWAP_OS) {
1002
--
1003
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 3 +--
6
tcg/optimize.c | 7 +++----
7
tcg/tcg-op.c | 8 ++++----
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 5 ++---
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 2 +-
12
7 files changed, 15 insertions(+), 21 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
19
DEF(add, 1, 2, 0, TCG_OPF_INT)
20
DEF(and, 1, 2, 0, TCG_OPF_INT)
21
DEF(andc, 1, 2, 0, TCG_OPF_INT)
22
+DEF(bswap16, 1, 1, 1, TCG_OPF_INT)
23
DEF(clz, 1, 2, 0, TCG_OPF_INT)
24
DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
25
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
26
@@ -XXX,XX +XXX,XX @@ DEF(sub2_i32, 2, 4, 0, 0)
27
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
28
DEF(setcond2_i32, 1, 4, 1, 0)
29
30
-DEF(bswap16_i32, 1, 1, 1, 0)
31
DEF(bswap32_i32, 1, 1, 1, 0)
32
33
/* load/store */
34
@@ -XXX,XX +XXX,XX @@ DEF(extu_i32_i64, 1, 1, 0, 0)
35
DEF(extrl_i64_i32, 1, 1, 0, 0)
36
DEF(extrh_i64_i32, 1, 1, 0, 0)
37
38
-DEF(bswap16_i64, 1, 1, 1, 0)
39
DEF(bswap32_i64, 1, 1, 1, 0)
40
DEF(bswap64_i64, 1, 1, 1, 0)
41
42
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/optimize.c
45
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
47
case INDEX_op_ctpop:
48
return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
49
50
- CASE_OP_32_64(bswap16):
51
+ case INDEX_op_bswap16:
52
x = bswap16(x);
53
return y & TCG_BSWAP_OS ? (int16_t)x : x;
54
55
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
56
57
z_mask = t1->z_mask;
58
switch (op->opc) {
59
- case INDEX_op_bswap16_i32:
60
- case INDEX_op_bswap16_i64:
61
+ case INDEX_op_bswap16:
62
z_mask = bswap16(z_mask);
63
sign = INT16_MIN;
64
break;
65
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
66
case INDEX_op_brcond2_i32:
67
done = fold_brcond2(&ctx, op);
68
break;
69
- CASE_OP_32_64(bswap16):
70
+ case INDEX_op_bswap16:
71
CASE_OP_32_64(bswap32):
72
case INDEX_op_bswap64_i64:
73
done = fold_bswap(&ctx, op);
74
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/tcg-op.c
77
+++ b/tcg/tcg-op.c
78
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
79
/* Only one extension flag may be present. */
80
tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ));
81
82
- if (tcg_op_supported(INDEX_op_bswap16_i32, TCG_TYPE_I32, 0)) {
83
- tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags);
84
+ if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I32, 0)) {
85
+ tcg_gen_op3i_i32(INDEX_op_bswap16, ret, arg, flags);
86
} else {
87
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
88
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
89
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
90
} else {
91
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
92
}
93
- } else if (tcg_op_supported(INDEX_op_bswap16_i64, TCG_TYPE_I64, 0)) {
94
- tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags);
95
+ } else if (tcg_op_supported(INDEX_op_bswap16, TCG_TYPE_I64, 0)) {
96
+ tcg_gen_op3i_i64(INDEX_op_bswap16, ret, arg, flags);
97
} else {
98
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
99
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
100
diff --git a/tcg/tcg.c b/tcg/tcg.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg.c
103
+++ b/tcg/tcg.c
104
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
105
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
106
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
107
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
108
- OUTOP(INDEX_op_bswap16_i32, TCGOutOpBswap, outop_bswap16),
109
- OUTOP(INDEX_op_bswap16_i64, TCGOutOpBswap, outop_bswap16),
110
+ OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16),
111
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
112
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
113
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
114
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
115
i = 1;
116
}
117
break;
118
- case INDEX_op_bswap16_i32:
119
- case INDEX_op_bswap16_i64:
120
+ case INDEX_op_bswap16:
121
case INDEX_op_bswap32_i32:
122
case INDEX_op_bswap32_i64:
123
case INDEX_op_bswap64_i64:
124
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
125
}
126
break;
127
128
- case INDEX_op_bswap16_i32:
129
- case INDEX_op_bswap16_i64:
130
+ case INDEX_op_bswap16:
131
{
132
const TCGOutOpBswap *out =
133
container_of(all_outop[op->opc], TCGOutOpBswap, base);
134
diff --git a/tcg/tci.c b/tcg/tci.c
135
index XXXXXXX..XXXXXXX 100644
136
--- a/tcg/tci.c
137
+++ b/tcg/tci.c
138
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
139
tci_write_reg64(regs, r1, r0, T1 - T2);
140
break;
141
#endif
142
- CASE_32_64(bswap16)
143
+ case INDEX_op_bswap16:
144
tci_args_rr(insn, &r0, &r1);
145
regs[r0] = bswap16(regs[r1]);
146
break;
147
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
148
op_name, str_r(r0), str_r(r1), s2);
149
break;
150
151
+ case INDEX_op_bswap16:
152
case INDEX_op_ctpop:
153
case INDEX_op_mov:
154
case INDEX_op_neg:
155
case INDEX_op_not:
156
case INDEX_op_ext_i32_i64:
157
case INDEX_op_extu_i32_i64:
158
- case INDEX_op_bswap16_i32:
159
- case INDEX_op_bswap16_i64:
160
case INDEX_op_bswap32_i32:
161
case INDEX_op_bswap32_i64:
162
case INDEX_op_bswap64_i64:
163
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
164
index XXXXXXX..XXXXXXX 100644
165
--- a/docs/devel/tcg-ops.rst
166
+++ b/docs/devel/tcg-ops.rst
167
@@ -XXX,XX +XXX,XX @@ Misc
168
- | *t0* = *t1*
169
| Move *t1* to *t0*.
170
171
- * - bswap16_i32/i64 *t0*, *t1*, *flags*
172
+ * - bswap16 *t0*, *t1*, *flags*
173
174
- | 16 bit byte swap on the low bits of a 32/64 bit input.
175
|
176
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/tci/tcg-target.c.inc
179
+++ b/tcg/tci/tcg-target.c.inc
180
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_ctpop = {
181
static void tgen_bswap16(TCGContext *s, TCGType type,
182
TCGReg a0, TCGReg a1, unsigned flags)
183
{
184
- tcg_out_op_rr(s, INDEX_op_bswap16_i32, a0, a1);
185
+ tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
186
if (flags & TCG_BSWAP_OS) {
187
tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
188
}
189
--
190
2.43.0
191
192
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 -
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 -
7
tcg/loongarch64/tcg-target-has.h | 2 -
8
tcg/mips/tcg-target-has.h | 2 -
9
tcg/ppc/tcg-target-has.h | 2 -
10
tcg/riscv/tcg-target-has.h | 2 -
11
tcg/s390x/tcg-target-has.h | 2 -
12
tcg/sparc64/tcg-target-has.h | 2 -
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 -
15
tcg/tcg-op.c | 4 +-
16
tcg/tcg.c | 8 +--
17
tcg/tci.c | 2 -
18
tcg/aarch64/tcg-target.c.inc | 25 +++++-----
19
tcg/arm/tcg-target.c.inc | 23 ++++-----
20
tcg/i386/tcg-target.c.inc | 23 +++++----
21
tcg/loongarch64/tcg-target.c.inc | 33 +++++++------
22
tcg/mips/tcg-target.c.inc | 54 ++++++++++----------
23
tcg/ppc/tcg-target.c.inc | 84 ++++++++++++++++----------------
24
tcg/riscv/tcg-target.c.inc | 30 +++++++-----
25
tcg/s390x/tcg-target.c.inc | 31 ++++++------
26
tcg/sparc64/tcg-target.c.inc | 4 ++
27
tcg/tci/tcg-target.c.inc | 28 +++++------
28
24 files changed, 182 insertions(+), 187 deletions(-)
29
1
30
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target-has.h
33
+++ b/tcg/aarch64/tcg-target-has.h
34
@@ -XXX,XX +XXX,XX @@
35
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
36
37
/* optional instructions */
38
-#define TCG_TARGET_HAS_bswap32_i32 1
39
#define TCG_TARGET_HAS_extract2_i32 1
40
#define TCG_TARGET_HAS_add2_i32 1
41
#define TCG_TARGET_HAS_sub2_i32 1
42
#define TCG_TARGET_HAS_extr_i64_i32 0
43
#define TCG_TARGET_HAS_qemu_st8_i32 0
44
45
-#define TCG_TARGET_HAS_bswap32_i64 1
46
#define TCG_TARGET_HAS_bswap64_i64 1
47
#define TCG_TARGET_HAS_extract2_i64 1
48
#define TCG_TARGET_HAS_add2_i64 1
49
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/arm/tcg-target-has.h
52
+++ b/tcg/arm/tcg-target-has.h
53
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
54
#endif
55
56
/* optional instructions */
57
-#define TCG_TARGET_HAS_bswap32_i32 1
58
#define TCG_TARGET_HAS_extract2_i32 1
59
#define TCG_TARGET_HAS_qemu_st8_i32 0
60
61
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tcg/i386/tcg-target-has.h
64
+++ b/tcg/i386/tcg-target-has.h
65
@@ -XXX,XX +XXX,XX @@
66
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
67
68
/* optional instructions */
69
-#define TCG_TARGET_HAS_bswap32_i32 1
70
#define TCG_TARGET_HAS_extract2_i32 1
71
#define TCG_TARGET_HAS_add2_i32 1
72
#define TCG_TARGET_HAS_sub2_i32 1
73
@@ -XXX,XX +XXX,XX @@
74
#if TCG_TARGET_REG_BITS == 64
75
/* Keep 32-bit values zero-extended in a register. */
76
#define TCG_TARGET_HAS_extr_i64_i32 1
77
-#define TCG_TARGET_HAS_bswap32_i64 1
78
#define TCG_TARGET_HAS_bswap64_i64 1
79
#define TCG_TARGET_HAS_extract2_i64 1
80
#define TCG_TARGET_HAS_add2_i64 1
81
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/loongarch64/tcg-target-has.h
84
+++ b/tcg/loongarch64/tcg-target-has.h
85
@@ -XXX,XX +XXX,XX @@
86
#define TCG_TARGET_HAS_extract2_i32 0
87
#define TCG_TARGET_HAS_add2_i32 0
88
#define TCG_TARGET_HAS_sub2_i32 0
89
-#define TCG_TARGET_HAS_bswap32_i32 1
90
#define TCG_TARGET_HAS_qemu_st8_i32 0
91
92
/* 64-bit operations */
93
#define TCG_TARGET_HAS_extract2_i64 0
94
#define TCG_TARGET_HAS_extr_i64_i32 1
95
-#define TCG_TARGET_HAS_bswap32_i64 1
96
#define TCG_TARGET_HAS_bswap64_i64 1
97
#define TCG_TARGET_HAS_add2_i64 0
98
#define TCG_TARGET_HAS_sub2_i64 0
99
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/mips/tcg-target-has.h
102
+++ b/tcg/mips/tcg-target-has.h
103
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
104
#endif
105
106
/* optional instructions */
107
-#define TCG_TARGET_HAS_bswap32_i32 1
108
109
#if TCG_TARGET_REG_BITS == 64
110
#define TCG_TARGET_HAS_add2_i32 0
111
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
112
#define TCG_TARGET_HAS_qemu_st8_i32 0
113
114
#if TCG_TARGET_REG_BITS == 64
115
-#define TCG_TARGET_HAS_bswap32_i64 1
116
#define TCG_TARGET_HAS_bswap64_i64 1
117
#define TCG_TARGET_HAS_extract2_i64 0
118
#endif
119
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
120
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/ppc/tcg-target-has.h
122
+++ b/tcg/ppc/tcg-target-has.h
123
@@ -XXX,XX +XXX,XX @@
124
#define have_vsx (cpuinfo & CPUINFO_VSX)
125
126
/* optional instructions */
127
-#define TCG_TARGET_HAS_bswap32_i32 1
128
#define TCG_TARGET_HAS_extract2_i32 0
129
#define TCG_TARGET_HAS_qemu_st8_i32 0
130
131
@@ -XXX,XX +XXX,XX @@
132
#define TCG_TARGET_HAS_add2_i32 0
133
#define TCG_TARGET_HAS_sub2_i32 0
134
#define TCG_TARGET_HAS_extr_i64_i32 0
135
-#define TCG_TARGET_HAS_bswap32_i64 1
136
#define TCG_TARGET_HAS_bswap64_i64 1
137
#define TCG_TARGET_HAS_extract2_i64 0
138
#define TCG_TARGET_HAS_add2_i64 1
139
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/riscv/tcg-target-has.h
142
+++ b/tcg/riscv/tcg-target-has.h
143
@@ -XXX,XX +XXX,XX @@
144
#define TCG_TARGET_HAS_extract2_i32 0
145
#define TCG_TARGET_HAS_add2_i32 1
146
#define TCG_TARGET_HAS_sub2_i32 1
147
-#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
148
#define TCG_TARGET_HAS_qemu_st8_i32 0
149
150
#define TCG_TARGET_HAS_extract2_i64 0
151
#define TCG_TARGET_HAS_extr_i64_i32 1
152
-#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
153
#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_add2_i64 1
155
#define TCG_TARGET_HAS_sub2_i64 1
156
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/s390x/tcg-target-has.h
159
+++ b/tcg/s390x/tcg-target-has.h
160
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
161
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
162
163
/* optional instructions */
164
-#define TCG_TARGET_HAS_bswap32_i32 1
165
#define TCG_TARGET_HAS_extract2_i32 0
166
#define TCG_TARGET_HAS_add2_i32 1
167
#define TCG_TARGET_HAS_sub2_i32 1
168
#define TCG_TARGET_HAS_extr_i64_i32 0
169
#define TCG_TARGET_HAS_qemu_st8_i32 0
170
171
-#define TCG_TARGET_HAS_bswap32_i64 1
172
#define TCG_TARGET_HAS_bswap64_i64 1
173
#define TCG_TARGET_HAS_extract2_i64 0
174
#define TCG_TARGET_HAS_add2_i64 1
175
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
176
index XXXXXXX..XXXXXXX 100644
177
--- a/tcg/sparc64/tcg-target-has.h
178
+++ b/tcg/sparc64/tcg-target-has.h
179
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
180
#endif
181
182
/* optional instructions */
183
-#define TCG_TARGET_HAS_bswap32_i32 0
184
#define TCG_TARGET_HAS_extract2_i32 0
185
#define TCG_TARGET_HAS_add2_i32 1
186
#define TCG_TARGET_HAS_sub2_i32 1
187
#define TCG_TARGET_HAS_qemu_st8_i32 0
188
189
#define TCG_TARGET_HAS_extr_i64_i32 0
190
-#define TCG_TARGET_HAS_bswap32_i64 0
191
#define TCG_TARGET_HAS_bswap64_i64 0
192
#define TCG_TARGET_HAS_extract2_i64 0
193
#define TCG_TARGET_HAS_add2_i64 1
194
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
195
index XXXXXXX..XXXXXXX 100644
196
--- a/tcg/tcg-has.h
197
+++ b/tcg/tcg-has.h
198
@@ -XXX,XX +XXX,XX @@
199
#if TCG_TARGET_REG_BITS == 32
200
/* Turn some undef macros into false macros. */
201
#define TCG_TARGET_HAS_extr_i64_i32 0
202
-#define TCG_TARGET_HAS_bswap32_i64 0
203
#define TCG_TARGET_HAS_bswap64_i64 0
204
#define TCG_TARGET_HAS_extract2_i64 0
205
#define TCG_TARGET_HAS_add2_i64 0
206
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tcg/tci/tcg-target-has.h
209
+++ b/tcg/tci/tcg-target-has.h
210
@@ -XXX,XX +XXX,XX @@
211
#ifndef TCG_TARGET_HAS_H
212
#define TCG_TARGET_HAS_H
213
214
-#define TCG_TARGET_HAS_bswap32_i32 1
215
#define TCG_TARGET_HAS_extract2_i32 0
216
#define TCG_TARGET_HAS_qemu_st8_i32 0
217
218
#if TCG_TARGET_REG_BITS == 64
219
#define TCG_TARGET_HAS_extr_i64_i32 0
220
-#define TCG_TARGET_HAS_bswap32_i64 1
221
#define TCG_TARGET_HAS_bswap64_i64 1
222
#define TCG_TARGET_HAS_extract2_i64 0
223
#define TCG_TARGET_HAS_add2_i32 1
224
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/tcg/tcg-op.c
227
+++ b/tcg/tcg-op.c
228
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
229
*/
230
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
231
{
232
- if (TCG_TARGET_HAS_bswap32_i32) {
233
+ if (tcg_op_supported(INDEX_op_bswap32_i32, TCG_TYPE_I32, 0)) {
234
tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0);
235
} else {
236
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
237
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
238
} else {
239
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
240
}
241
- } else if (TCG_TARGET_HAS_bswap32_i64) {
242
+ } else if (tcg_op_supported(INDEX_op_bswap32_i64, TCG_TYPE_I64, 0)) {
243
tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags);
244
} else {
245
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
246
diff --git a/tcg/tcg.c b/tcg/tcg.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tcg/tcg.c
249
+++ b/tcg/tcg.c
250
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
251
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
252
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
253
OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16),
254
+ OUTOP(INDEX_op_bswap32_i32, TCGOutOpBswap, outop_bswap32),
255
+ OUTOP(INDEX_op_bswap32_i64, TCGOutOpBswap, outop_bswap32),
256
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
257
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
258
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
259
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
260
return TCG_TARGET_HAS_add2_i32;
261
case INDEX_op_sub2_i32:
262
return TCG_TARGET_HAS_sub2_i32;
263
- case INDEX_op_bswap32_i32:
264
- return TCG_TARGET_HAS_bswap32_i32;
265
266
case INDEX_op_brcond2_i32:
267
case INDEX_op_setcond2_i32:
268
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
269
case INDEX_op_extrl_i64_i32:
270
case INDEX_op_extrh_i64_i32:
271
return TCG_TARGET_HAS_extr_i64_i32;
272
- case INDEX_op_bswap32_i64:
273
- return TCG_TARGET_HAS_bswap32_i64;
274
case INDEX_op_bswap64_i64:
275
return TCG_TARGET_HAS_bswap64_i64;
276
case INDEX_op_add2_i64:
277
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
278
break;
279
280
case INDEX_op_bswap16:
281
+ case INDEX_op_bswap32_i32:
282
+ case INDEX_op_bswap32_i64:
283
{
284
const TCGOutOpBswap *out =
285
container_of(all_outop[op->opc], TCGOutOpBswap, base);
286
diff --git a/tcg/tci.c b/tcg/tci.c
287
index XXXXXXX..XXXXXXX 100644
288
--- a/tcg/tci.c
289
+++ b/tcg/tci.c
290
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
291
tci_args_rr(insn, &r0, &r1);
292
regs[r0] = bswap16(regs[r1]);
293
break;
294
-#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
295
CASE_32_64(bswap32)
296
tci_args_rr(insn, &r0, &r1);
297
regs[r0] = bswap32(regs[r1]);
298
break;
299
-#endif
300
#if TCG_TARGET_REG_BITS == 64
301
/* Load/store operations (64 bit). */
302
303
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
304
index XXXXXXX..XXXXXXX 100644
305
--- a/tcg/aarch64/tcg-target.c.inc
306
+++ b/tcg/aarch64/tcg-target.c.inc
307
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
308
.out_rr = tgen_bswap16,
309
};
310
311
+static void tgen_bswap32(TCGContext *s, TCGType type,
312
+ TCGReg a0, TCGReg a1, unsigned flags)
313
+{
314
+ tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
315
+ if (flags & TCG_BSWAP_OS) {
316
+ tcg_out_ext32s(s, a0, a0);
317
+ }
318
+}
319
+
320
+static const TCGOutOpBswap outop_bswap32 = {
321
+ .base.static_constraint = C_O1_I1(r, r),
322
+ .out_rr = tgen_bswap32,
323
+};
324
+
325
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
326
{
327
tgen_sub(s, type, a0, TCG_REG_XZR, a1);
328
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
329
case INDEX_op_bswap64_i64:
330
tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
331
break;
332
- case INDEX_op_bswap32_i64:
333
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
334
- if (a2 & TCG_BSWAP_OS) {
335
- tcg_out_ext32s(s, a0, a0);
336
- }
337
- break;
338
- case INDEX_op_bswap32_i32:
339
- tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
340
- break;
341
342
case INDEX_op_deposit_i64:
343
case INDEX_op_deposit_i32:
344
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
345
case INDEX_op_ld32u_i64:
346
case INDEX_op_ld32s_i64:
347
case INDEX_op_ld_i64:
348
- case INDEX_op_bswap32_i32:
349
- case INDEX_op_bswap32_i64:
350
case INDEX_op_bswap64_i64:
351
case INDEX_op_ext_i32_i64:
352
case INDEX_op_extu_i32_i64:
353
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
354
index XXXXXXX..XXXXXXX 100644
355
--- a/tcg/arm/tcg-target.c.inc
356
+++ b/tcg/arm/tcg-target.c.inc
357
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
358
g_assert_not_reached();
359
}
360
361
-static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
362
-{
363
- /* rev */
364
- tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
365
-}
366
-
367
static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
368
TCGArg a1, int ofs, int len, bool const_a1)
369
{
370
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
371
.out_rr = tgen_bswap16,
372
};
373
374
+static void tgen_bswap32(TCGContext *s, TCGType type,
375
+ TCGReg rd, TCGReg rn, unsigned flags)
376
+{
377
+ /* rev */
378
+ tcg_out32(s, 0x06bf0f30 | (COND_AL << 28) | (rd << 12) | rn);
379
+}
380
+
381
+static const TCGOutOpBswap outop_bswap32 = {
382
+ .base.static_constraint = C_O1_I1(r, r),
383
+ .out_rr = tgen_bswap32,
384
+};
385
+
386
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
387
{
388
tgen_subfi(s, type, a0, 0, a1);
389
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
390
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
391
break;
392
393
- case INDEX_op_bswap32_i32:
394
- tcg_out_bswap32(s, COND_AL, args[0], args[1]);
395
- break;
396
-
397
case INDEX_op_deposit_i32:
398
tcg_out_deposit(s, COND_AL, args[0], args[2],
399
args[3], args[4], const_args[2]);
400
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
401
case INDEX_op_ld16u_i32:
402
case INDEX_op_ld16s_i32:
403
case INDEX_op_ld_i32:
404
- case INDEX_op_bswap32_i32:
405
case INDEX_op_extract_i32:
406
case INDEX_op_sextract_i32:
407
return C_O1_I1(r, r);
408
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
409
index XXXXXXX..XXXXXXX 100644
410
--- a/tcg/i386/tcg-target.c.inc
411
+++ b/tcg/i386/tcg-target.c.inc
412
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
413
.out_rr = tgen_bswap16,
414
};
415
416
+static void tgen_bswap32(TCGContext *s, TCGType type,
417
+ TCGReg a0, TCGReg a1, unsigned flags)
418
+{
419
+ tcg_out_bswap32(s, a0);
420
+ if (flags & TCG_BSWAP_OS) {
421
+ tcg_out_ext32s(s, a0, a0);
422
+ }
423
+}
424
+
425
+static const TCGOutOpBswap outop_bswap32 = {
426
+ .base.static_constraint = C_O1_I1(r, 0),
427
+ .out_rr = tgen_bswap32,
428
+};
429
+
430
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
431
{
432
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
433
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
434
}
435
break;
436
437
- OP_32_64(bswap32):
438
- tcg_out_bswap32(s, a0);
439
- if (rexw && (a2 & TCG_BSWAP_OS)) {
440
- tcg_out_ext32s(s, a0, a0);
441
- }
442
- break;
443
-
444
case INDEX_op_qemu_ld_i32:
445
tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
446
break;
447
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
448
case INDEX_op_st_i64:
449
return C_O0_I2(re, r);
450
451
- case INDEX_op_bswap32_i32:
452
- case INDEX_op_bswap32_i64:
453
case INDEX_op_bswap64_i64:
454
case INDEX_op_extrh_i64_i32:
455
return C_O1_I1(r, 0);
456
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
457
index XXXXXXX..XXXXXXX 100644
458
--- a/tcg/loongarch64/tcg-target.c.inc
459
+++ b/tcg/loongarch64/tcg-target.c.inc
460
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
461
.out_rr = tgen_bswap16,
462
};
463
464
+static void tgen_bswap32(TCGContext *s, TCGType type,
465
+ TCGReg a0, TCGReg a1, unsigned flags)
466
+{
467
+ tcg_out_opc_revb_2w(s, a0, a1);
468
+
469
+ /* All 32-bit values are computed sign-extended in the register. */
470
+ if (type == TCG_TYPE_I32 || (flags & TCG_BSWAP_OS)) {
471
+ tcg_out_ext32s(s, a0, a0);
472
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
473
+ tcg_out_ext32u(s, a0, a0);
474
+ }
475
+}
476
+
477
+static const TCGOutOpBswap outop_bswap32 = {
478
+ .base.static_constraint = C_O1_I1(r, r),
479
+ .out_rr = tgen_bswap32,
480
+};
481
+
482
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
483
{
484
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
485
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
486
tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
487
break;
488
489
- case INDEX_op_bswap32_i32:
490
- /* All 32-bit values are computed sign-extended in the register. */
491
- a2 = TCG_BSWAP_OS;
492
- /* fallthrough */
493
- case INDEX_op_bswap32_i64:
494
- tcg_out_opc_revb_2w(s, a0, a1);
495
- if (a2 & TCG_BSWAP_OS) {
496
- tcg_out_ext32s(s, a0, a0);
497
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
498
- tcg_out_ext32u(s, a0, a0);
499
- }
500
- break;
501
-
502
case INDEX_op_bswap64_i64:
503
tcg_out_opc_revb_d(s, a0, a1);
504
break;
505
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
506
case INDEX_op_extract_i64:
507
case INDEX_op_sextract_i32:
508
case INDEX_op_sextract_i64:
509
- case INDEX_op_bswap32_i32:
510
- case INDEX_op_bswap32_i64:
511
case INDEX_op_bswap64_i64:
512
case INDEX_op_ld8s_i32:
513
case INDEX_op_ld8s_i64:
514
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
515
index XXXXXXX..XXXXXXX 100644
516
--- a/tcg/mips/tcg-target.c.inc
517
+++ b/tcg/mips/tcg-target.c.inc
518
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
519
}
520
}
521
522
-static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags)
523
-{
524
- if (use_mips32r2_instructions) {
525
- tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
526
- tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
527
- if (flags & TCG_BSWAP_OZ) {
528
- tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
529
- }
530
- } else {
531
- if (flags & TCG_BSWAP_OZ) {
532
- tcg_out_bswap_subr(s, bswap32u_addr);
533
- } else {
534
- tcg_out_bswap_subr(s, bswap32_addr);
535
- }
536
- /* delay slot -- never omit the insn, like tcg_out_mov might. */
537
- tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
538
- tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
539
- }
540
-}
541
-
542
static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
543
{
544
if (use_mips32r2_instructions) {
545
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
546
.out_rr = tgen_bswap16,
547
};
548
549
+static void tgen_bswap32(TCGContext *s, TCGType type,
550
+ TCGReg ret, TCGReg arg, unsigned flags)
551
+{
552
+ if (use_mips32r2_instructions) {
553
+ tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
554
+ tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
555
+ if (flags & TCG_BSWAP_OZ) {
556
+ tcg_out_opc_bf(s, OPC_DEXT, ret, ret, 31, 0);
557
+ }
558
+ } else {
559
+ if (flags & TCG_BSWAP_OZ) {
560
+ tcg_out_bswap_subr(s, bswap32u_addr);
561
+ } else {
562
+ tcg_out_bswap_subr(s, bswap32_addr);
563
+ }
564
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
565
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
566
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
567
+ }
568
+}
569
+
570
+static const TCGOutOpBswap outop_bswap32 = {
571
+ .base.static_constraint = C_O1_I1(r, r),
572
+ .out_rr = tgen_bswap32,
573
+};
574
+
575
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
576
{
577
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
578
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
579
tcg_out_ldst(s, i1, a0, a1, a2);
580
break;
581
582
- case INDEX_op_bswap32_i32:
583
- tcg_out_bswap32(s, a0, a1, 0);
584
- break;
585
- case INDEX_op_bswap32_i64:
586
- tcg_out_bswap32(s, a0, a1, a2);
587
- break;
588
case INDEX_op_bswap64_i64:
589
tcg_out_bswap64(s, a0, a1);
590
break;
591
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
592
case INDEX_op_ld16u_i32:
593
case INDEX_op_ld16s_i32:
594
case INDEX_op_ld_i32:
595
- case INDEX_op_bswap32_i32:
596
case INDEX_op_extract_i32:
597
case INDEX_op_sextract_i32:
598
case INDEX_op_ld8u_i64:
599
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
600
case INDEX_op_ld32s_i64:
601
case INDEX_op_ld32u_i64:
602
case INDEX_op_ld_i64:
603
- case INDEX_op_bswap32_i64:
604
case INDEX_op_bswap64_i64:
605
case INDEX_op_ext_i32_i64:
606
case INDEX_op_extu_i32_i64:
607
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
608
index XXXXXXX..XXXXXXX 100644
609
--- a/tcg/ppc/tcg-target.c.inc
610
+++ b/tcg/ppc/tcg-target.c.inc
611
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
612
tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
613
}
614
615
-static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
616
-{
617
- TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
618
-
619
- if (have_isa_3_10) {
620
- tcg_out32(s, BRW | RA(dst) | RS(src));
621
- if (flags & TCG_BSWAP_OS) {
622
- tcg_out_ext32s(s, dst, dst);
623
- } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
624
- tcg_out_ext32u(s, dst, dst);
625
- }
626
- return;
627
- }
628
-
629
- /*
630
- * Stolen from gcc's builtin_bswap32.
631
- * In the following,
632
- * dep(a, b, m) -> (a & ~m) | (b & m)
633
- *
634
- * Begin with: src = xxxxabcd
635
- */
636
- /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
637
- tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
638
- /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
639
- tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
640
- /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
641
- tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
642
-
643
- if (flags & TCG_BSWAP_OS) {
644
- tcg_out_ext32s(s, dst, tmp);
645
- } else {
646
- tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
647
- }
648
-}
649
-
650
static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
651
{
652
TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
653
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
654
.out_rr = tgen_bswap16,
655
};
656
657
+static void tgen_bswap32(TCGContext *s, TCGType type,
658
+ TCGReg dst, TCGReg src, unsigned flags)
659
+{
660
+ TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
661
+
662
+ if (have_isa_3_10) {
663
+ tcg_out32(s, BRW | RA(dst) | RS(src));
664
+ if (flags & TCG_BSWAP_OS) {
665
+ tcg_out_ext32s(s, dst, dst);
666
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
667
+ tcg_out_ext32u(s, dst, dst);
668
+ }
669
+ return;
670
+ }
671
+
672
+ /*
673
+ * Stolen from gcc's builtin_bswap32.
674
+ * In the following,
675
+ * dep(a, b, m) -> (a & ~m) | (b & m)
676
+ *
677
+ * Begin with: src = xxxxabcd
678
+ */
679
+ /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
680
+ tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
681
+ /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
682
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
683
+ /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
684
+ tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
685
+
686
+ if (flags & TCG_BSWAP_OS) {
687
+ tcg_out_ext32s(s, dst, tmp);
688
+ } else {
689
+ tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
690
+ }
691
+}
692
+
693
+static const TCGOutOpBswap outop_bswap32 = {
694
+ .base.static_constraint = C_O1_I1(r, r),
695
+ .out_rr = tgen_bswap32,
696
+};
697
+
698
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
699
{
700
tcg_out32(s, NEG | RT(a0) | RA(a1));
701
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
702
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
703
break;
704
705
- case INDEX_op_bswap32_i32:
706
- tcg_out_bswap32(s, args[0], args[1], 0);
707
- break;
708
- case INDEX_op_bswap32_i64:
709
- tcg_out_bswap32(s, args[0], args[1], args[2]);
710
- break;
711
case INDEX_op_bswap64_i64:
712
tcg_out_bswap64(s, args[0], args[1]);
713
break;
714
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
715
case INDEX_op_ld16u_i32:
716
case INDEX_op_ld16s_i32:
717
case INDEX_op_ld_i32:
718
- case INDEX_op_bswap32_i32:
719
case INDEX_op_extract_i32:
720
case INDEX_op_sextract_i32:
721
case INDEX_op_ld8u_i64:
722
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
723
case INDEX_op_ld_i64:
724
case INDEX_op_ext_i32_i64:
725
case INDEX_op_extu_i32_i64:
726
- case INDEX_op_bswap32_i64:
727
case INDEX_op_bswap64_i64:
728
case INDEX_op_extract_i64:
729
case INDEX_op_sextract_i64:
730
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
731
index XXXXXXX..XXXXXXX 100644
732
--- a/tcg/riscv/tcg-target.c.inc
733
+++ b/tcg/riscv/tcg-target.c.inc
734
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
735
.out_rr = tgen_bswap16,
736
};
737
738
+static void tgen_bswap32(TCGContext *s, TCGType type,
739
+ TCGReg a0, TCGReg a1, unsigned flags)
740
+{
741
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
742
+ if (flags & TCG_BSWAP_OZ) {
743
+ tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
744
+ } else {
745
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
746
+ }
747
+}
748
+
749
+static const TCGOutOpBswap outop_bswap32 = {
750
+ .base.static_constraint = C_Dynamic,
751
+ .base.dynamic_constraint = cset_bswap,
752
+ .out_rr = tgen_bswap32,
753
+};
754
+
755
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
756
{
757
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
758
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
759
case INDEX_op_bswap64_i64:
760
tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
761
break;
762
- case INDEX_op_bswap32_i32:
763
- a2 = 0;
764
- /* fall through */
765
- case INDEX_op_bswap32_i64:
766
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
767
- if (a2 & TCG_BSWAP_OZ) {
768
- tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
769
- } else {
770
- tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
771
- }
772
- break;
773
774
case INDEX_op_add2_i32:
775
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
776
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
777
case INDEX_op_extract_i64:
778
case INDEX_op_sextract_i32:
779
case INDEX_op_sextract_i64:
780
- case INDEX_op_bswap32_i32:
781
- case INDEX_op_bswap32_i64:
782
case INDEX_op_bswap64_i64:
783
return C_O1_I1(r, r);
784
785
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
786
index XXXXXXX..XXXXXXX 100644
787
--- a/tcg/s390x/tcg-target.c.inc
788
+++ b/tcg/s390x/tcg-target.c.inc
789
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
790
.out_rr = tgen_bswap16,
791
};
792
793
+static void tgen_bswap32(TCGContext *s, TCGType type,
794
+ TCGReg a0, TCGReg a1, unsigned flags)
795
+{
796
+ tcg_out_insn(s, RRE, LRVR, a0, a1);
797
+ if (flags & TCG_BSWAP_OS) {
798
+ tcg_out_ext32s(s, a0, a0);
799
+ } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
800
+ tcg_out_ext32u(s, a0, a0);
801
+ }
802
+}
803
+
804
+static const TCGOutOpBswap outop_bswap32 = {
805
+ .base.static_constraint = C_O1_I1(r, r),
806
+ .out_rr = tgen_bswap32,
807
+};
808
+
809
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
810
{
811
if (type == TCG_TYPE_I32) {
812
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
813
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
814
break;
815
816
- case INDEX_op_bswap32_i32:
817
- tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
818
- break;
819
- case INDEX_op_bswap32_i64:
820
- a0 = args[0], a1 = args[1], a2 = args[2];
821
- tcg_out_insn(s, RRE, LRVR, a0, a1);
822
- if (a2 & TCG_BSWAP_OS) {
823
- tcg_out_ext32s(s, a0, a0);
824
- } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
825
- tcg_out_ext32u(s, a0, a0);
826
- }
827
- break;
828
-
829
case INDEX_op_add2_i32:
830
if (const_args[4]) {
831
tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
832
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
833
case INDEX_op_st_i64:
834
return C_O0_I2(r, r);
835
836
- case INDEX_op_bswap32_i32:
837
- case INDEX_op_bswap32_i64:
838
case INDEX_op_bswap64_i64:
839
case INDEX_op_ext_i32_i64:
840
case INDEX_op_extu_i32_i64:
841
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
842
index XXXXXXX..XXXXXXX 100644
843
--- a/tcg/sparc64/tcg-target.c.inc
844
+++ b/tcg/sparc64/tcg-target.c.inc
845
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
846
.base.static_constraint = C_NotImplemented,
847
};
848
849
+static const TCGOutOpBswap outop_bswap32 = {
850
+ .base.static_constraint = C_NotImplemented,
851
+};
852
+
853
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
854
{
855
tgen_sub(s, type, a0, TCG_REG_G0, a1);
856
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
857
index XXXXXXX..XXXXXXX 100644
858
--- a/tcg/tci/tcg-target.c.inc
859
+++ b/tcg/tci/tcg-target.c.inc
860
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
861
case INDEX_op_ld_i64:
862
case INDEX_op_ext_i32_i64:
863
case INDEX_op_extu_i32_i64:
864
- case INDEX_op_bswap32_i32:
865
- case INDEX_op_bswap32_i64:
866
case INDEX_op_bswap64_i64:
867
case INDEX_op_extract_i32:
868
case INDEX_op_extract_i64:
869
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
870
.out_rr = tgen_bswap16,
871
};
872
873
+static void tgen_bswap32(TCGContext *s, TCGType type,
874
+ TCGReg a0, TCGReg a1, unsigned flags)
875
+{
876
+ tcg_out_op_rr(s, INDEX_op_bswap32_i32, a0, a1);
877
+ if (flags & TCG_BSWAP_OS) {
878
+ tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
879
+ }
880
+}
881
+
882
+static const TCGOutOpBswap outop_bswap32 = {
883
+ .base.static_constraint = C_O1_I1(r, r),
884
+ .out_rr = tgen_bswap32,
885
+};
886
+
887
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
888
{
889
tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
890
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
891
const TCGArg args[TCG_MAX_OP_ARGS],
892
const int const_args[TCG_MAX_OP_ARGS])
893
{
894
- int width;
895
-
896
switch (opc) {
897
case INDEX_op_goto_ptr:
898
tcg_out_op_r(s, opc, args[0]);
899
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
900
tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
901
break;
902
903
- case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
904
case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
905
tcg_out_op_rr(s, opc, args[0], args[1]);
906
break;
907
908
- case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
909
- width = 32;
910
- /* The base tci bswaps zero-extend, and ignore high bits. */
911
- tcg_out_op_rr(s, opc, args[0], args[1]);
912
- if (args[2] & TCG_BSWAP_OS) {
913
- tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
914
- }
915
- break;
916
-
917
CASE_32_64(add2)
918
CASE_32_64(sub2)
919
tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
920
--
921
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 4 +---
5
tcg/optimize.c | 7 +++----
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 9 +++------
8
tcg/tci.c | 5 ++---
9
docs/devel/tcg-ops.rst | 13 ++++++-------
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 20 insertions(+), 28 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(add, 1, 2, 0, TCG_OPF_INT)
18
DEF(and, 1, 2, 0, TCG_OPF_INT)
19
DEF(andc, 1, 2, 0, TCG_OPF_INT)
20
DEF(bswap16, 1, 1, 1, TCG_OPF_INT)
21
+DEF(bswap32, 1, 1, 1, TCG_OPF_INT)
22
DEF(clz, 1, 2, 0, TCG_OPF_INT)
23
DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
24
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(sub2_i32, 2, 4, 0, 0)
26
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
27
DEF(setcond2_i32, 1, 4, 1, 0)
28
29
-DEF(bswap32_i32, 1, 1, 1, 0)
30
-
31
/* load/store */
32
DEF(ld8u_i64, 1, 1, 1, 0)
33
DEF(ld8s_i64, 1, 1, 1, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(extu_i32_i64, 1, 1, 0, 0)
35
DEF(extrl_i64_i32, 1, 1, 0, 0)
36
DEF(extrh_i64_i32, 1, 1, 0, 0)
37
38
-DEF(bswap32_i64, 1, 1, 1, 0)
39
DEF(bswap64_i64, 1, 1, 1, 0)
40
41
DEF(add2_i64, 2, 4, 0, 0)
42
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/optimize.c
45
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
47
x = bswap16(x);
48
return y & TCG_BSWAP_OS ? (int16_t)x : x;
49
50
- CASE_OP_32_64(bswap32):
51
+ case INDEX_op_bswap32:
52
x = bswap32(x);
53
return y & TCG_BSWAP_OS ? (int32_t)x : x;
54
55
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
56
z_mask = bswap16(z_mask);
57
sign = INT16_MIN;
58
break;
59
- case INDEX_op_bswap32_i32:
60
- case INDEX_op_bswap32_i64:
61
+ case INDEX_op_bswap32:
62
z_mask = bswap32(z_mask);
63
sign = INT32_MIN;
64
break;
65
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
66
done = fold_brcond2(&ctx, op);
67
break;
68
case INDEX_op_bswap16:
69
- CASE_OP_32_64(bswap32):
70
+ case INDEX_op_bswap32:
71
case INDEX_op_bswap64_i64:
72
done = fold_bswap(&ctx, op);
73
break;
74
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/tcg-op.c
77
+++ b/tcg/tcg-op.c
78
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
79
*/
80
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
81
{
82
- if (tcg_op_supported(INDEX_op_bswap32_i32, TCG_TYPE_I32, 0)) {
83
- tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0);
84
+ if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I32, 0)) {
85
+ tcg_gen_op3i_i32(INDEX_op_bswap32, ret, arg, 0);
86
} else {
87
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
88
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
89
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
90
} else {
91
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
92
}
93
- } else if (tcg_op_supported(INDEX_op_bswap32_i64, TCG_TYPE_I64, 0)) {
94
- tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags);
95
+ } else if (tcg_op_supported(INDEX_op_bswap32, TCG_TYPE_I64, 0)) {
96
+ tcg_gen_op3i_i64(INDEX_op_bswap32, ret, arg, flags);
97
} else {
98
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
99
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
100
diff --git a/tcg/tcg.c b/tcg/tcg.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg.c
103
+++ b/tcg/tcg.c
104
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
105
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
106
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
107
OUTOP(INDEX_op_bswap16, TCGOutOpBswap, outop_bswap16),
108
- OUTOP(INDEX_op_bswap32_i32, TCGOutOpBswap, outop_bswap32),
109
- OUTOP(INDEX_op_bswap32_i64, TCGOutOpBswap, outop_bswap32),
110
+ OUTOP(INDEX_op_bswap32, TCGOutOpBswap, outop_bswap32),
111
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
112
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
113
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
114
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
115
}
116
break;
117
case INDEX_op_bswap16:
118
- case INDEX_op_bswap32_i32:
119
- case INDEX_op_bswap32_i64:
120
+ case INDEX_op_bswap32:
121
case INDEX_op_bswap64_i64:
122
{
123
TCGArg flags = op->args[k];
124
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
125
break;
126
127
case INDEX_op_bswap16:
128
- case INDEX_op_bswap32_i32:
129
- case INDEX_op_bswap32_i64:
130
+ case INDEX_op_bswap32:
131
{
132
const TCGOutOpBswap *out =
133
container_of(all_outop[op->opc], TCGOutOpBswap, base);
134
diff --git a/tcg/tci.c b/tcg/tci.c
135
index XXXXXXX..XXXXXXX 100644
136
--- a/tcg/tci.c
137
+++ b/tcg/tci.c
138
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
139
tci_args_rr(insn, &r0, &r1);
140
regs[r0] = bswap16(regs[r1]);
141
break;
142
- CASE_32_64(bswap32)
143
+ case INDEX_op_bswap32:
144
tci_args_rr(insn, &r0, &r1);
145
regs[r0] = bswap32(regs[r1]);
146
break;
147
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
148
break;
149
150
case INDEX_op_bswap16:
151
+ case INDEX_op_bswap32:
152
case INDEX_op_ctpop:
153
case INDEX_op_mov:
154
case INDEX_op_neg:
155
case INDEX_op_not:
156
case INDEX_op_ext_i32_i64:
157
case INDEX_op_extu_i32_i64:
158
- case INDEX_op_bswap32_i32:
159
- case INDEX_op_bswap32_i64:
160
case INDEX_op_bswap64_i64:
161
tci_args_rr(insn, &r0, &r1);
162
info->fprintf_func(info->stream, "%-12s %s, %s",
163
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
164
index XXXXXXX..XXXXXXX 100644
165
--- a/docs/devel/tcg-ops.rst
166
+++ b/docs/devel/tcg-ops.rst
167
@@ -XXX,XX +XXX,XX @@ Misc
168
|
169
| If neither ``TCG_BSWAP_OZ`` nor ``TCG_BSWAP_OS`` are set, then the bits of *t0* above bit 15 may contain any value.
170
171
- * - bswap32_i64 *t0*, *t1*, *flags*
172
+ * - bswap32 *t0*, *t1*, *flags*
173
174
- - | 32 bit byte swap on a 64-bit value. The flags are the same as for bswap16,
175
- except they apply from bit 31 instead of bit 15.
176
+ - | 32 bit byte swap. The flags are the same as for bswap16, except
177
+ they apply from bit 31 instead of bit 15. On TCG_TYPE_I32, the
178
+ flags should be zero.
179
180
- * - bswap32_i32 *t0*, *t1*, *flags*
181
+ * - bswap64_i64 *t0*, *t1*, *flags*
182
183
- bswap64_i64 *t0*, *t1*, *flags*
184
-
185
- - | 32/64 bit byte swap. The flags are ignored, but still present
186
+ - | 64 bit byte swap. The flags are ignored, but still present
187
for consistency with the other bswap opcodes.
188
189
* - discard_i32/i64 *t0*
190
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/tci/tcg-target.c.inc
193
+++ b/tcg/tci/tcg-target.c.inc
194
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap16 = {
195
static void tgen_bswap32(TCGContext *s, TCGType type,
196
TCGReg a0, TCGReg a1, unsigned flags)
197
{
198
- tcg_out_op_rr(s, INDEX_op_bswap32_i32, a0, a1);
199
+ tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1);
200
if (flags & TCG_BSWAP_OS) {
201
tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
202
}
203
--
204
2.43.0
205
206
diff view generated by jsdifflib
Deleted patch
1
Use TCGOutOpUnary instead of TCGOutOpBswap because the
2
flags are not used with this opcode; they are merely
3
present for uniformity with the smaller bswaps.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target-has.h | 1 -
9
tcg/i386/tcg-target-has.h | 1 -
10
tcg/loongarch64/tcg-target-has.h | 1 -
11
tcg/mips/tcg-target-has.h | 1 -
12
tcg/ppc/tcg-target-has.h | 1 -
13
tcg/riscv/tcg-target-has.h | 1 -
14
tcg/s390x/tcg-target-has.h | 1 -
15
tcg/sparc64/tcg-target-has.h | 1 -
16
tcg/tcg-has.h | 1 -
17
tcg/tci/tcg-target-has.h | 1 -
18
tcg/tcg-op.c | 2 +-
19
tcg/tcg.c | 7 ++-
20
tcg/tci.c | 2 -
21
tcg/aarch64/tcg-target.c.inc | 15 ++++--
22
tcg/arm/tcg-target.c.inc | 4 ++
23
tcg/i386/tcg-target.c.inc | 16 ++++--
24
tcg/loongarch64/tcg-target.c.inc | 15 ++++--
25
tcg/mips/tcg-target.c.inc | 37 ++++++++------
26
tcg/ppc/tcg-target.c.inc | 88 ++++++++++++++++----------------
27
tcg/riscv/tcg-target.c.inc | 16 ++++--
28
tcg/s390x/tcg-target.c.inc | 15 ++++--
29
tcg/sparc64/tcg-target.c.inc | 4 ++
30
tcg/tci/tcg-target.c.inc | 17 ++++--
31
23 files changed, 144 insertions(+), 104 deletions(-)
32
33
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/aarch64/tcg-target-has.h
36
+++ b/tcg/aarch64/tcg-target-has.h
37
@@ -XXX,XX +XXX,XX @@
38
#define TCG_TARGET_HAS_extr_i64_i32 0
39
#define TCG_TARGET_HAS_qemu_st8_i32 0
40
41
-#define TCG_TARGET_HAS_bswap64_i64 1
42
#define TCG_TARGET_HAS_extract2_i64 1
43
#define TCG_TARGET_HAS_add2_i64 1
44
#define TCG_TARGET_HAS_sub2_i64 1
45
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/i386/tcg-target-has.h
48
+++ b/tcg/i386/tcg-target-has.h
49
@@ -XXX,XX +XXX,XX @@
50
#if TCG_TARGET_REG_BITS == 64
51
/* Keep 32-bit values zero-extended in a register. */
52
#define TCG_TARGET_HAS_extr_i64_i32 1
53
-#define TCG_TARGET_HAS_bswap64_i64 1
54
#define TCG_TARGET_HAS_extract2_i64 1
55
#define TCG_TARGET_HAS_add2_i64 1
56
#define TCG_TARGET_HAS_sub2_i64 1
57
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/loongarch64/tcg-target-has.h
60
+++ b/tcg/loongarch64/tcg-target-has.h
61
@@ -XXX,XX +XXX,XX @@
62
/* 64-bit operations */
63
#define TCG_TARGET_HAS_extract2_i64 0
64
#define TCG_TARGET_HAS_extr_i64_i32 1
65
-#define TCG_TARGET_HAS_bswap64_i64 1
66
#define TCG_TARGET_HAS_add2_i64 0
67
#define TCG_TARGET_HAS_sub2_i64 0
68
69
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/mips/tcg-target-has.h
72
+++ b/tcg/mips/tcg-target-has.h
73
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
74
#define TCG_TARGET_HAS_qemu_st8_i32 0
75
76
#if TCG_TARGET_REG_BITS == 64
77
-#define TCG_TARGET_HAS_bswap64_i64 1
78
#define TCG_TARGET_HAS_extract2_i64 0
79
#endif
80
81
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/ppc/tcg-target-has.h
84
+++ b/tcg/ppc/tcg-target-has.h
85
@@ -XXX,XX +XXX,XX @@
86
#define TCG_TARGET_HAS_add2_i32 0
87
#define TCG_TARGET_HAS_sub2_i32 0
88
#define TCG_TARGET_HAS_extr_i64_i32 0
89
-#define TCG_TARGET_HAS_bswap64_i64 1
90
#define TCG_TARGET_HAS_extract2_i64 0
91
#define TCG_TARGET_HAS_add2_i64 1
92
#define TCG_TARGET_HAS_sub2_i64 1
93
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/riscv/tcg-target-has.h
96
+++ b/tcg/riscv/tcg-target-has.h
97
@@ -XXX,XX +XXX,XX @@
98
99
#define TCG_TARGET_HAS_extract2_i64 0
100
#define TCG_TARGET_HAS_extr_i64_i32 1
101
-#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
102
#define TCG_TARGET_HAS_add2_i64 1
103
#define TCG_TARGET_HAS_sub2_i64 1
104
105
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/s390x/tcg-target-has.h
108
+++ b/tcg/s390x/tcg-target-has.h
109
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
110
#define TCG_TARGET_HAS_extr_i64_i32 0
111
#define TCG_TARGET_HAS_qemu_st8_i32 0
112
113
-#define TCG_TARGET_HAS_bswap64_i64 1
114
#define TCG_TARGET_HAS_extract2_i64 0
115
#define TCG_TARGET_HAS_add2_i64 1
116
#define TCG_TARGET_HAS_sub2_i64 1
117
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
118
index XXXXXXX..XXXXXXX 100644
119
--- a/tcg/sparc64/tcg-target-has.h
120
+++ b/tcg/sparc64/tcg-target-has.h
121
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
122
#define TCG_TARGET_HAS_qemu_st8_i32 0
123
124
#define TCG_TARGET_HAS_extr_i64_i32 0
125
-#define TCG_TARGET_HAS_bswap64_i64 0
126
#define TCG_TARGET_HAS_extract2_i64 0
127
#define TCG_TARGET_HAS_add2_i64 1
128
#define TCG_TARGET_HAS_sub2_i64 1
129
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/tcg-has.h
132
+++ b/tcg/tcg-has.h
133
@@ -XXX,XX +XXX,XX @@
134
#if TCG_TARGET_REG_BITS == 32
135
/* Turn some undef macros into false macros. */
136
#define TCG_TARGET_HAS_extr_i64_i32 0
137
-#define TCG_TARGET_HAS_bswap64_i64 0
138
#define TCG_TARGET_HAS_extract2_i64 0
139
#define TCG_TARGET_HAS_add2_i64 0
140
#define TCG_TARGET_HAS_sub2_i64 0
141
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
142
index XXXXXXX..XXXXXXX 100644
143
--- a/tcg/tci/tcg-target-has.h
144
+++ b/tcg/tci/tcg-target-has.h
145
@@ -XXX,XX +XXX,XX @@
146
147
#if TCG_TARGET_REG_BITS == 64
148
#define TCG_TARGET_HAS_extr_i64_i32 0
149
-#define TCG_TARGET_HAS_bswap64_i64 1
150
#define TCG_TARGET_HAS_extract2_i64 0
151
#define TCG_TARGET_HAS_add2_i32 1
152
#define TCG_TARGET_HAS_sub2_i32 1
153
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/tcg-op.c
156
+++ b/tcg/tcg-op.c
157
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
158
tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
159
tcg_temp_free_i32(t0);
160
tcg_temp_free_i32(t1);
161
- } else if (TCG_TARGET_HAS_bswap64_i64) {
162
+ } else if (tcg_op_supported(INDEX_op_bswap64_i64, TCG_TYPE_I64, 0)) {
163
tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0);
164
} else {
165
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
166
diff --git a/tcg/tcg.c b/tcg/tcg.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/tcg/tcg.c
169
+++ b/tcg/tcg.c
170
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
171
#if TCG_TARGET_REG_BITS == 32
172
OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
173
OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
174
+#else
175
+ OUTOP(INDEX_op_bswap64_i64, TCGOutOpUnary, outop_bswap64),
176
#endif
177
};
178
179
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
180
case INDEX_op_extrl_i64_i32:
181
case INDEX_op_extrh_i64_i32:
182
return TCG_TARGET_HAS_extr_i64_i32;
183
- case INDEX_op_bswap64_i64:
184
- return TCG_TARGET_HAS_bswap64_i64;
185
case INDEX_op_add2_i64:
186
return TCG_TARGET_HAS_add2_i64;
187
case INDEX_op_sub2_i64:
188
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
189
}
190
break;
191
192
+ case INDEX_op_bswap64_i64:
193
+ assert(TCG_TARGET_REG_BITS == 64);
194
+ /* fall through */
195
case INDEX_op_ctpop:
196
case INDEX_op_neg:
197
case INDEX_op_not:
198
diff --git a/tcg/tci.c b/tcg/tci.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/tcg/tci.c
201
+++ b/tcg/tci.c
202
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
203
tci_args_rr(insn, &r0, &r1);
204
regs[r0] = (uint32_t)regs[r1];
205
break;
206
-#if TCG_TARGET_HAS_bswap64_i64
207
case INDEX_op_bswap64_i64:
208
tci_args_rr(insn, &r0, &r1);
209
regs[r0] = bswap64(regs[r1]);
210
break;
211
-#endif
212
#endif /* TCG_TARGET_REG_BITS == 64 */
213
214
/* QEMU specific operations. */
215
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/aarch64/tcg-target.c.inc
218
+++ b/tcg/aarch64/tcg-target.c.inc
219
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
220
.out_rr = tgen_bswap32,
221
};
222
223
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
224
+{
225
+ tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
226
+}
227
+
228
+static const TCGOutOpUnary outop_bswap64 = {
229
+ .base.static_constraint = C_O1_I1(r, r),
230
+ .out_rr = tgen_bswap64,
231
+};
232
+
233
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
234
{
235
tgen_sub(s, type, a0, TCG_REG_XZR, a1);
236
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
237
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
238
break;
239
240
- case INDEX_op_bswap64_i64:
241
- tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
242
- break;
243
-
244
case INDEX_op_deposit_i64:
245
case INDEX_op_deposit_i32:
246
tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
247
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
248
case INDEX_op_ld32u_i64:
249
case INDEX_op_ld32s_i64:
250
case INDEX_op_ld_i64:
251
- case INDEX_op_bswap64_i64:
252
case INDEX_op_ext_i32_i64:
253
case INDEX_op_extu_i32_i64:
254
case INDEX_op_extract_i32:
255
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
256
index XXXXXXX..XXXXXXX 100644
257
--- a/tcg/arm/tcg-target.c.inc
258
+++ b/tcg/arm/tcg-target.c.inc
259
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
260
.out_rr = tgen_bswap32,
261
};
262
263
+static const TCGOutOpUnary outop_bswap64 = {
264
+ .base.static_constraint = C_NotImplemented,
265
+};
266
+
267
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
268
{
269
tgen_subfi(s, type, a0, 0, a1);
270
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
271
index XXXXXXX..XXXXXXX 100644
272
--- a/tcg/i386/tcg-target.c.inc
273
+++ b/tcg/i386/tcg-target.c.inc
274
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
275
.out_rr = tgen_bswap32,
276
};
277
278
+#if TCG_TARGET_REG_BITS == 64
279
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
280
+{
281
+ tcg_out_bswap64(s, a0);
282
+}
283
+
284
+static const TCGOutOpUnary outop_bswap64 = {
285
+ .base.static_constraint = C_O1_I1(r, 0),
286
+ .out_rr = tgen_bswap64,
287
+};
288
+#endif
289
+
290
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
291
{
292
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
293
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
294
}
295
break;
296
297
- case INDEX_op_bswap64_i64:
298
- tcg_out_bswap64(s, a0);
299
- break;
300
case INDEX_op_extrh_i64_i32:
301
tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
302
break;
303
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
304
case INDEX_op_st_i64:
305
return C_O0_I2(re, r);
306
307
- case INDEX_op_bswap64_i64:
308
case INDEX_op_extrh_i64_i32:
309
return C_O1_I1(r, 0);
310
311
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
312
index XXXXXXX..XXXXXXX 100644
313
--- a/tcg/loongarch64/tcg-target.c.inc
314
+++ b/tcg/loongarch64/tcg-target.c.inc
315
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
316
.out_rr = tgen_bswap32,
317
};
318
319
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
320
+{
321
+ tcg_out_opc_revb_d(s, a0, a1);
322
+}
323
+
324
+static const TCGOutOpUnary outop_bswap64 = {
325
+ .base.static_constraint = C_O1_I1(r, r),
326
+ .out_rr = tgen_bswap64,
327
+};
328
+
329
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
330
{
331
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
332
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
333
tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
334
break;
335
336
- case INDEX_op_bswap64_i64:
337
- tcg_out_opc_revb_d(s, a0, a1);
338
- break;
339
-
340
case INDEX_op_ld8s_i32:
341
case INDEX_op_ld8s_i64:
342
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
343
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
344
case INDEX_op_extract_i64:
345
case INDEX_op_sextract_i32:
346
case INDEX_op_sextract_i64:
347
- case INDEX_op_bswap64_i64:
348
case INDEX_op_ld8s_i32:
349
case INDEX_op_ld8s_i64:
350
case INDEX_op_ld8u_i32:
351
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
352
index XXXXXXX..XXXXXXX 100644
353
--- a/tcg/mips/tcg-target.c.inc
354
+++ b/tcg/mips/tcg-target.c.inc
355
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
356
}
357
}
358
359
-static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
360
-{
361
- if (use_mips32r2_instructions) {
362
- tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
363
- tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
364
- } else {
365
- tcg_out_bswap_subr(s, bswap64_addr);
366
- /* delay slot -- never omit the insn, like tcg_out_mov might. */
367
- tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
368
- tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
369
- }
370
-}
371
-
372
static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
373
{
374
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
375
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
376
.out_rr = tgen_bswap32,
377
};
378
379
+#if TCG_TARGET_REG_BITS == 64
380
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
381
+{
382
+ if (use_mips32r2_instructions) {
383
+ tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
384
+ tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
385
+ } else {
386
+ tcg_out_bswap_subr(s, bswap64_addr);
387
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
388
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
389
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
390
+ }
391
+}
392
+
393
+static const TCGOutOpUnary outop_bswap64 = {
394
+ .base.static_constraint = C_O1_I1(r, r),
395
+ .out_rr = tgen_bswap64,
396
+};
397
+#endif /* TCG_TARGET_REG_BITS == 64 */
398
+
399
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
400
{
401
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
402
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
403
tcg_out_ldst(s, i1, a0, a1, a2);
404
break;
405
406
- case INDEX_op_bswap64_i64:
407
- tcg_out_bswap64(s, a0, a1);
408
- break;
409
case INDEX_op_extrh_i64_i32:
410
tcg_out_dsra(s, a0, a1, 32);
411
break;
412
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
413
case INDEX_op_ld32s_i64:
414
case INDEX_op_ld32u_i64:
415
case INDEX_op_ld_i64:
416
- case INDEX_op_bswap64_i64:
417
case INDEX_op_ext_i32_i64:
418
case INDEX_op_extu_i32_i64:
419
case INDEX_op_extrl_i64_i32:
420
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
421
index XXXXXXX..XXXXXXX 100644
422
--- a/tcg/ppc/tcg-target.c.inc
423
+++ b/tcg/ppc/tcg-target.c.inc
424
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
425
tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
426
}
427
428
-static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
429
-{
430
- TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
431
- TCGReg t1 = dst == src ? dst : TCG_REG_R0;
432
-
433
- if (have_isa_3_10) {
434
- tcg_out32(s, BRD | RA(dst) | RS(src));
435
- return;
436
- }
437
-
438
- /*
439
- * In the following,
440
- * dep(a, b, m) -> (a & ~m) | (b & m)
441
- *
442
- * Begin with: src = abcdefgh
443
- */
444
- /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
445
- tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
446
- /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
447
- tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
448
- /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
449
- tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
450
-
451
- /* t0 = rol64(t0, 32) = hgfe0000 */
452
- tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
453
- /* t1 = rol64(src, 32) = efghabcd */
454
- tcg_out_rld(s, RLDICL, t1, src, 32, 0);
455
-
456
- /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
457
- tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
458
- /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
459
- tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
460
- /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
461
- tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
462
-
463
- tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
464
-}
465
-
466
/* Emit a move into ret of arg, if it can be done in one insn. */
467
static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
468
{
469
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
470
.out_rr = tgen_bswap32,
471
};
472
473
+#if TCG_TARGET_REG_BITS == 64
474
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
475
+{
476
+ TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
477
+ TCGReg t1 = dst == src ? dst : TCG_REG_R0;
478
+
479
+ if (have_isa_3_10) {
480
+ tcg_out32(s, BRD | RA(dst) | RS(src));
481
+ return;
482
+ }
483
+
484
+ /*
485
+ * In the following,
486
+ * dep(a, b, m) -> (a & ~m) | (b & m)
487
+ *
488
+ * Begin with: src = abcdefgh
489
+ */
490
+ /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
491
+ tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
492
+ /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
493
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
494
+ /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
495
+ tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
496
+
497
+ /* t0 = rol64(t0, 32) = hgfe0000 */
498
+ tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
499
+ /* t1 = rol64(src, 32) = efghabcd */
500
+ tcg_out_rld(s, RLDICL, t1, src, 32, 0);
501
+
502
+ /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
503
+ tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
504
+ /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
505
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
506
+ /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
507
+ tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
508
+
509
+ tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
510
+}
511
+
512
+static const TCGOutOpUnary outop_bswap64 = {
513
+ .base.static_constraint = C_O1_I1(r, r),
514
+ .out_rr = tgen_bswap64,
515
+};
516
+#endif /* TCG_TARGET_REG_BITS == 64 */
517
+
518
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
519
{
520
tcg_out32(s, NEG | RT(a0) | RA(a1));
521
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
522
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
523
break;
524
525
- case INDEX_op_bswap64_i64:
526
- tcg_out_bswap64(s, args[0], args[1]);
527
- break;
528
-
529
case INDEX_op_deposit_i32:
530
if (const_args[2]) {
531
uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
532
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
533
case INDEX_op_ld_i64:
534
case INDEX_op_ext_i32_i64:
535
case INDEX_op_extu_i32_i64:
536
- case INDEX_op_bswap64_i64:
537
case INDEX_op_extract_i64:
538
case INDEX_op_sextract_i64:
539
return C_O1_I1(r, r);
540
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
541
index XXXXXXX..XXXXXXX 100644
542
--- a/tcg/riscv/tcg-target.c.inc
543
+++ b/tcg/riscv/tcg-target.c.inc
544
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
545
.out_rr = tgen_bswap32,
546
};
547
548
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
549
+{
550
+ tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
551
+}
552
+
553
+static const TCGOutOpUnary outop_bswap64 = {
554
+ .base.static_constraint = C_Dynamic,
555
+ .base.dynamic_constraint = cset_bswap,
556
+ .out_rr = tgen_bswap64,
557
+};
558
+
559
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
560
{
561
tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
562
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
563
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
564
break;
565
566
- case INDEX_op_bswap64_i64:
567
- tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
568
- break;
569
-
570
case INDEX_op_add2_i32:
571
tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
572
const_args[4], const_args[5], false, true);
573
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
574
case INDEX_op_extract_i64:
575
case INDEX_op_sextract_i32:
576
case INDEX_op_sextract_i64:
577
- case INDEX_op_bswap64_i64:
578
return C_O1_I1(r, r);
579
580
case INDEX_op_st8_i32:
581
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
582
index XXXXXXX..XXXXXXX 100644
583
--- a/tcg/s390x/tcg-target.c.inc
584
+++ b/tcg/s390x/tcg-target.c.inc
585
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
586
.out_rr = tgen_bswap32,
587
};
588
589
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
590
+{
591
+ tcg_out_insn(s, RRE, LRVGR, a0, a1);
592
+}
593
+
594
+static const TCGOutOpUnary outop_bswap64 = {
595
+ .base.static_constraint = C_O1_I1(r, r),
596
+ .out_rr = tgen_bswap64,
597
+};
598
+
599
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
600
{
601
if (type == TCG_TYPE_I32) {
602
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
603
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
604
break;
605
606
- case INDEX_op_bswap64_i64:
607
- tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
608
- break;
609
-
610
case INDEX_op_add2_i64:
611
if (const_args[4]) {
612
if ((int64_t)args[4] >= 0) {
613
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
614
case INDEX_op_st_i64:
615
return C_O0_I2(r, r);
616
617
- case INDEX_op_bswap64_i64:
618
case INDEX_op_ext_i32_i64:
619
case INDEX_op_extu_i32_i64:
620
case INDEX_op_extract_i32:
621
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
622
index XXXXXXX..XXXXXXX 100644
623
--- a/tcg/sparc64/tcg-target.c.inc
624
+++ b/tcg/sparc64/tcg-target.c.inc
625
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
626
.base.static_constraint = C_NotImplemented,
627
};
628
629
+static const TCGOutOpUnary outop_bswap64 = {
630
+ .base.static_constraint = C_NotImplemented,
631
+};
632
+
633
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
634
{
635
tgen_sub(s, type, a0, TCG_REG_G0, a1);
636
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
637
index XXXXXXX..XXXXXXX 100644
638
--- a/tcg/tci/tcg-target.c.inc
639
+++ b/tcg/tci/tcg-target.c.inc
640
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
641
case INDEX_op_ld_i64:
642
case INDEX_op_ext_i32_i64:
643
case INDEX_op_extu_i32_i64:
644
- case INDEX_op_bswap64_i64:
645
case INDEX_op_extract_i32:
646
case INDEX_op_extract_i64:
647
case INDEX_op_sextract_i32:
648
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
649
.out_rr = tgen_bswap32,
650
};
651
652
+#if TCG_TARGET_REG_BITS == 64
653
+static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
654
+{
655
+ tcg_out_op_rr(s, INDEX_op_bswap64_i64, a0, a1);
656
+}
657
+
658
+static const TCGOutOpUnary outop_bswap64 = {
659
+ .base.static_constraint = C_O1_I1(r, r),
660
+ .out_rr = tgen_bswap64,
661
+};
662
+#endif
663
+
664
static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
665
{
666
tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
667
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
668
tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
669
break;
670
671
- case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
672
- tcg_out_op_rr(s, opc, args[0], args[1]);
673
- break;
674
-
675
CASE_32_64(add2)
676
CASE_32_64(sub2)
677
tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
678
--
679
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Even though bswap64 can only be used with TCG_TYPE_I64,
2
rename the opcode to maintain uniformity.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 3 +--
9
tcg/optimize.c | 6 +++---
10
tcg/tcg-op.c | 4 ++--
11
tcg/tcg.c | 6 +++---
12
tcg/tci.c | 4 ++--
13
docs/devel/tcg-ops.rst | 5 +++--
14
tcg/tci/tcg-target.c.inc | 2 +-
15
7 files changed, 15 insertions(+), 15 deletions(-)
16
17
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg-opc.h
20
+++ b/include/tcg/tcg-opc.h
21
@@ -XXX,XX +XXX,XX @@ DEF(and, 1, 2, 0, TCG_OPF_INT)
22
DEF(andc, 1, 2, 0, TCG_OPF_INT)
23
DEF(bswap16, 1, 1, 1, TCG_OPF_INT)
24
DEF(bswap32, 1, 1, 1, TCG_OPF_INT)
25
+DEF(bswap64, 1, 1, 1, TCG_OPF_INT)
26
DEF(clz, 1, 2, 0, TCG_OPF_INT)
27
DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
28
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
29
@@ -XXX,XX +XXX,XX @@ DEF(extu_i32_i64, 1, 1, 0, 0)
30
DEF(extrl_i64_i32, 1, 1, 0, 0)
31
DEF(extrh_i64_i32, 1, 1, 0, 0)
32
33
-DEF(bswap64_i64, 1, 1, 1, 0)
34
-
35
DEF(add2_i64, 2, 4, 0, 0)
36
DEF(sub2_i64, 2, 4, 0, 0)
37
38
diff --git a/tcg/optimize.c b/tcg/optimize.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/optimize.c
41
+++ b/tcg/optimize.c
42
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
43
x = bswap32(x);
44
return y & TCG_BSWAP_OS ? (int32_t)x : x;
45
46
- case INDEX_op_bswap64_i64:
47
+ case INDEX_op_bswap64:
48
return bswap64(x);
49
50
case INDEX_op_ext_i32_i64:
51
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
52
z_mask = bswap32(z_mask);
53
sign = INT32_MIN;
54
break;
55
- case INDEX_op_bswap64_i64:
56
+ case INDEX_op_bswap64:
57
z_mask = bswap64(z_mask);
58
sign = INT64_MIN;
59
break;
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
break;
62
case INDEX_op_bswap16:
63
case INDEX_op_bswap32:
64
- case INDEX_op_bswap64_i64:
65
+ case INDEX_op_bswap64:
66
done = fold_bswap(&ctx, op);
67
break;
68
case INDEX_op_clz:
69
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg-op.c
72
+++ b/tcg/tcg-op.c
73
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
74
tcg_gen_mov_i32(TCGV_HIGH(ret), t0);
75
tcg_temp_free_i32(t0);
76
tcg_temp_free_i32(t1);
77
- } else if (tcg_op_supported(INDEX_op_bswap64_i64, TCG_TYPE_I64, 0)) {
78
- tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0);
79
+ } else if (tcg_op_supported(INDEX_op_bswap64, TCG_TYPE_I64, 0)) {
80
+ tcg_gen_op3i_i64(INDEX_op_bswap64, ret, arg, 0);
81
} else {
82
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
83
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
84
diff --git a/tcg/tcg.c b/tcg/tcg.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/tcg.c
87
+++ b/tcg/tcg.c
88
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
89
OUTOP(INDEX_op_brcond2_i32, TCGOutOpBrcond2, outop_brcond2),
90
OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
91
#else
92
- OUTOP(INDEX_op_bswap64_i64, TCGOutOpUnary, outop_bswap64),
93
+ OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64),
94
#endif
95
};
96
97
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
98
break;
99
case INDEX_op_bswap16:
100
case INDEX_op_bswap32:
101
- case INDEX_op_bswap64_i64:
102
+ case INDEX_op_bswap64:
103
{
104
TCGArg flags = op->args[k];
105
const char *name = NULL;
106
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
107
}
108
break;
109
110
- case INDEX_op_bswap64_i64:
111
+ case INDEX_op_bswap64:
112
assert(TCG_TARGET_REG_BITS == 64);
113
/* fall through */
114
case INDEX_op_ctpop:
115
diff --git a/tcg/tci.c b/tcg/tci.c
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/tci.c
118
+++ b/tcg/tci.c
119
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
tci_args_rr(insn, &r0, &r1);
121
regs[r0] = (uint32_t)regs[r1];
122
break;
123
- case INDEX_op_bswap64_i64:
124
+ case INDEX_op_bswap64:
125
tci_args_rr(insn, &r0, &r1);
126
regs[r0] = bswap64(regs[r1]);
127
break;
128
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
129
case INDEX_op_not:
130
case INDEX_op_ext_i32_i64:
131
case INDEX_op_extu_i32_i64:
132
- case INDEX_op_bswap64_i64:
133
+ case INDEX_op_bswap64:
134
tci_args_rr(insn, &r0, &r1);
135
info->fprintf_func(info->stream, "%-12s %s, %s",
136
op_name, str_r(r0), str_r(r1));
137
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
138
index XXXXXXX..XXXXXXX 100644
139
--- a/docs/devel/tcg-ops.rst
140
+++ b/docs/devel/tcg-ops.rst
141
@@ -XXX,XX +XXX,XX @@ Misc
142
they apply from bit 31 instead of bit 15. On TCG_TYPE_I32, the
143
flags should be zero.
144
145
- * - bswap64_i64 *t0*, *t1*, *flags*
146
+ * - bswap64 *t0*, *t1*, *flags*
147
148
- | 64 bit byte swap. The flags are ignored, but still present
149
- for consistency with the other bswap opcodes.
150
+ for consistency with the other bswap opcodes. For future
151
+ compatibility, the flags should be zero.
152
153
* - discard_i32/i64 *t0*
154
155
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/tci/tcg-target.c.inc
158
+++ b/tcg/tci/tcg-target.c.inc
159
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBswap outop_bswap32 = {
160
#if TCG_TARGET_REG_BITS == 64
161
static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
162
{
163
- tcg_out_op_rr(s, INDEX_op_bswap64_i64, a0, a1);
164
+ tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1);
165
}
166
167
static const TCGOutOpUnary outop_bswap64 = {
168
--
169
2.43.0
170
171
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 20 +++++++++
5
tcg/aarch64/tcg-target.c.inc | 28 +++++++-----
6
tcg/arm/tcg-target.c.inc | 23 +++++-----
7
tcg/i386/tcg-target.c.inc | 77 +++++++++++++++++---------------
8
tcg/loongarch64/tcg-target.c.inc | 33 +++++++-------
9
tcg/mips/tcg-target.c.inc | 35 +++++++--------
10
tcg/ppc/tcg-target.c.inc | 35 +++++++--------
11
tcg/riscv/tcg-target.c.inc | 54 +++++++++++-----------
12
tcg/s390x/tcg-target.c.inc | 14 +++---
13
tcg/sparc64/tcg-target.c.inc | 16 ++++---
14
tcg/tci/tcg-target.c.inc | 8 ++--
15
11 files changed, 191 insertions(+), 152 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpDivRem {
22
TCGReg a0, TCGReg a1, TCGReg a4);
23
} TCGOutOpDivRem;
24
25
+typedef struct TCGOutOpExtract {
26
+ TCGOutOp base;
27
+ void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
28
+ unsigned ofs, unsigned len);
29
+} TCGOutOpExtract;
30
+
31
typedef struct TCGOutOpMovcond {
32
TCGOutOp base;
33
void (*out)(TCGContext *s, TCGType type, TCGCond cond,
34
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
35
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
36
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
37
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
38
+ OUTOP(INDEX_op_extract_i32, TCGOutOpExtract, outop_extract),
39
+ OUTOP(INDEX_op_extract_i64, TCGOutOpExtract, outop_extract),
40
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
41
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
42
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
43
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
44
}
45
break;
46
47
+ case INDEX_op_extract_i32:
48
+ case INDEX_op_extract_i64:
49
+ {
50
+ const TCGOutOpExtract *out =
51
+ container_of(all_outop[op->opc], TCGOutOpExtract, base);
52
+
53
+ tcg_debug_assert(!const_args[1]);
54
+ out->out_rr(s, type, new_args[0], new_args[1],
55
+ new_args[2], new_args[3]);
56
+ }
57
+ break;
58
+
59
case INDEX_op_muls2:
60
case INDEX_op_mulu2:
61
{
62
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/aarch64/tcg-target.c.inc
65
+++ b/tcg/aarch64/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
67
.out = tgen_movcond,
68
};
69
70
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
71
+ unsigned ofs, unsigned len)
72
+{
73
+ if (ofs == 0) {
74
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
75
+ tcg_out_logicali(s, I3404_ANDI, type, a0, a1, mask);
76
+ } else {
77
+ tcg_out_ubfm(s, type, a0, a1, ofs, ofs + len - 1);
78
+ }
79
+}
80
+
81
+static const TCGOutOpExtract outop_extract = {
82
+ .base.static_constraint = C_O1_I1(r, r),
83
+ .out_rr = tgen_extract,
84
+};
85
+
86
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
87
const TCGArg args[TCG_MAX_OP_ARGS],
88
const int const_args[TCG_MAX_OP_ARGS])
89
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
90
tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
91
break;
92
93
- case INDEX_op_extract_i64:
94
- case INDEX_op_extract_i32:
95
- if (a2 == 0) {
96
- uint64_t mask = MAKE_64BIT_MASK(0, args[3]);
97
- tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, mask);
98
- } else {
99
- tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
100
- }
101
- break;
102
-
103
case INDEX_op_sextract_i64:
104
case INDEX_op_sextract_i32:
105
tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
106
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
107
case INDEX_op_ld_i64:
108
case INDEX_op_ext_i32_i64:
109
case INDEX_op_extu_i32_i64:
110
- case INDEX_op_extract_i32:
111
- case INDEX_op_extract_i64:
112
case INDEX_op_sextract_i32:
113
case INDEX_op_sextract_i64:
114
return C_O1_I1(r, r);
115
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/arm/tcg-target.c.inc
118
+++ b/tcg/arm/tcg-target.c.inc
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
120
| (ofs << 7) | ((ofs + len - 1) << 16));
121
}
122
123
-static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
124
- TCGReg rn, int ofs, int len)
125
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
126
+ unsigned ofs, unsigned len)
127
{
128
/* According to gcc, AND can be faster. */
129
if (ofs == 0 && len <= 8) {
130
- tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn,
131
+ tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn,
132
encode_imm_nofail((1 << len) - 1));
133
return;
134
}
135
136
if (use_armv7_instructions) {
137
/* ubfx */
138
- tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
139
+ tcg_out32(s, 0x07e00050 | (COND_AL << 28) | (rd << 12) | rn
140
| (ofs << 7) | ((len - 1) << 16));
141
return;
142
}
143
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
144
switch (len) {
145
case 8:
146
/* uxtb */
147
- tcg_out32(s, 0x06ef0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
148
+ tcg_out32(s, 0x06ef0070 | (COND_AL << 28) |
149
+ (rd << 12) | (ofs << 7) | rn);
150
break;
151
case 16:
152
/* uxth */
153
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
154
+ tcg_out32(s, 0x06ff0070 | (COND_AL << 28) |
155
+ (rd << 12) | (ofs << 7) | rn);
156
break;
157
default:
158
g_assert_not_reached();
159
}
160
}
161
162
+static const TCGOutOpExtract outop_extract = {
163
+ .base.static_constraint = C_O1_I1(r, r),
164
+ .out_rr = tgen_extract,
165
+};
166
+
167
static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
168
TCGReg rn, int ofs, int len)
169
{
170
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
171
tcg_out_deposit(s, COND_AL, args[0], args[2],
172
args[3], args[4], const_args[2]);
173
break;
174
- case INDEX_op_extract_i32:
175
- tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
176
- break;
177
case INDEX_op_sextract_i32:
178
tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
179
break;
180
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
181
case INDEX_op_ld16u_i32:
182
case INDEX_op_ld16s_i32:
183
case INDEX_op_ld_i32:
184
- case INDEX_op_extract_i32:
185
case INDEX_op_sextract_i32:
186
return C_O1_I1(r, r);
187
188
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
189
index XXXXXXX..XXXXXXX 100644
190
--- a/tcg/i386/tcg-target.c.inc
191
+++ b/tcg/i386/tcg-target.c.inc
192
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
193
.out_rr = tgen_not,
194
};
195
196
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
197
+ unsigned ofs, unsigned len)
198
+{
199
+ if (ofs == 0) {
200
+ switch (len) {
201
+ case 8:
202
+ tcg_out_ext8u(s, a0, a1);
203
+ return;
204
+ case 16:
205
+ tcg_out_ext16u(s, a0, a1);
206
+ return;
207
+ case 32:
208
+ tcg_out_ext32u(s, a0, a1);
209
+ return;
210
+ }
211
+ } else if (TCG_TARGET_REG_BITS == 64 && ofs + len == 32) {
212
+ /* This is a 32-bit zero-extending right shift. */
213
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
214
+ tcg_out_shifti(s, SHIFT_SHR, a0, ofs);
215
+ return;
216
+ } else if (ofs == 8 && len == 8) {
217
+ /*
218
+ * On the off-chance that we can use the high-byte registers.
219
+ * Otherwise we emit the same ext16 + shift pattern that we
220
+ * would have gotten from the normal tcg-op.c expansion.
221
+ */
222
+ if (a1 < 4 && (TCG_TARGET_REG_BITS == 32 || a0 < 8)) {
223
+ tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
224
+ } else {
225
+ tcg_out_ext16u(s, a0, a1);
226
+ tcg_out_shifti(s, SHIFT_SHR, a0, 8);
227
+ }
228
+ return;
229
+ }
230
+ g_assert_not_reached();
231
+}
232
+
233
+static const TCGOutOpExtract outop_extract = {
234
+ .base.static_constraint = C_O1_I1(r, r),
235
+ .out_rr = tgen_extract,
236
+};
237
238
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
239
const TCGArg args[TCG_MAX_OP_ARGS],
240
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
241
}
242
break;
243
244
- case INDEX_op_extract_i64:
245
- if (a2 + args[3] == 32) {
246
- if (a2 == 0) {
247
- tcg_out_ext32u(s, a0, a1);
248
- break;
249
- }
250
- /* This is a 32-bit zero-extending right shift. */
251
- tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
252
- tcg_out_shifti(s, SHIFT_SHR, a0, a2);
253
- break;
254
- }
255
- /* FALLTHRU */
256
- case INDEX_op_extract_i32:
257
- if (a2 == 0 && args[3] == 8) {
258
- tcg_out_ext8u(s, a0, a1);
259
- } else if (a2 == 0 && args[3] == 16) {
260
- tcg_out_ext16u(s, a0, a1);
261
- } else if (a2 == 8 && args[3] == 8) {
262
- /*
263
- * On the off-chance that we can use the high-byte registers.
264
- * Otherwise we emit the same ext16 + shift pattern that we
265
- * would have gotten from the normal tcg-op.c expansion.
266
- */
267
- if (a1 < 4 && a0 < 8) {
268
- tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
269
- } else {
270
- tcg_out_ext16u(s, a0, a1);
271
- tcg_out_shifti(s, SHIFT_SHR, a0, 8);
272
- }
273
- } else {
274
- g_assert_not_reached();
275
- }
276
- break;
277
-
278
case INDEX_op_sextract_i64:
279
if (a2 == 0 && args[3] == 8) {
280
tcg_out_ext8s(s, TCG_TYPE_I64, a0, a1);
281
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
282
case INDEX_op_ext_i32_i64:
283
case INDEX_op_extu_i32_i64:
284
case INDEX_op_extrl_i64_i32:
285
- case INDEX_op_extract_i32:
286
- case INDEX_op_extract_i64:
287
case INDEX_op_sextract_i32:
288
case INDEX_op_sextract_i64:
289
return C_O1_I1(r, r);
290
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
291
index XXXXXXX..XXXXXXX 100644
292
--- a/tcg/loongarch64/tcg-target.c.inc
293
+++ b/tcg/loongarch64/tcg-target.c.inc
294
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
295
.out_rr = tgen_not,
296
};
297
298
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
299
+ unsigned ofs, unsigned len)
300
+{
301
+ if (ofs == 0 && len <= 12) {
302
+ tcg_out_opc_andi(s, a0, a1, (1 << len) - 1);
303
+ } else if (type == TCG_TYPE_I32) {
304
+ tcg_out_opc_bstrpick_w(s, a0, a1, ofs, ofs + len - 1);
305
+ } else {
306
+ tcg_out_opc_bstrpick_d(s, a0, a1, ofs, ofs + len - 1);
307
+ }
308
+}
309
+
310
+static const TCGOutOpExtract outop_extract = {
311
+ .base.static_constraint = C_O1_I1(r, r),
312
+ .out_rr = tgen_extract,
313
+};
314
315
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
316
const TCGArg args[TCG_MAX_OP_ARGS],
317
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
318
tcg_out_opc_srai_d(s, a0, a1, 32);
319
break;
320
321
- case INDEX_op_extract_i32:
322
- if (a2 == 0 && args[3] <= 12) {
323
- tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
324
- } else {
325
- tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
326
- }
327
- break;
328
- case INDEX_op_extract_i64:
329
- if (a2 == 0 && args[3] <= 12) {
330
- tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
331
- } else {
332
- tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
333
- }
334
- break;
335
-
336
case INDEX_op_sextract_i64:
337
if (a2 + args[3] == 32) {
338
if (a2 == 0) {
339
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
340
case INDEX_op_extrl_i64_i32:
341
case INDEX_op_extrh_i64_i32:
342
case INDEX_op_ext_i32_i64:
343
- case INDEX_op_extract_i32:
344
- case INDEX_op_extract_i64:
345
case INDEX_op_sextract_i32:
346
case INDEX_op_sextract_i64:
347
case INDEX_op_ld8s_i32:
348
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
349
index XXXXXXX..XXXXXXX 100644
350
--- a/tcg/mips/tcg-target.c.inc
351
+++ b/tcg/mips/tcg-target.c.inc
352
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
353
.out_rr = tgen_not,
354
};
355
356
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
357
+ unsigned ofs, unsigned len)
358
+{
359
+ if (ofs == 0 && len <= 16) {
360
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << len) - 1);
361
+ } else if (type == TCG_TYPE_I32) {
362
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, len - 1, ofs);
363
+ } else {
364
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU,
365
+ a0, a1, len - 1, ofs);
366
+ }
367
+}
368
+
369
+static const TCGOutOpExtract outop_extract = {
370
+ .base.static_constraint = C_O1_I1(r, r),
371
+ .out_rr = tgen_extract,
372
+};
373
374
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
375
const TCGArg args[TCG_MAX_OP_ARGS],
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
377
args[3] + args[4] - 1, args[3]);
378
break;
379
380
- case INDEX_op_extract_i32:
381
- if (a2 == 0 && args[3] <= 16) {
382
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << args[3]) - 1);
383
- } else {
384
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
385
- }
386
- break;
387
- case INDEX_op_extract_i64:
388
- if (a2 == 0 && args[3] <= 16) {
389
- tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << args[3]) - 1);
390
- } else {
391
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU,
392
- a0, a1, args[3] - 1, a2);
393
- }
394
- break;
395
-
396
case INDEX_op_sextract_i64:
397
if (a2 == 0 && args[3] == 32) {
398
tcg_out_ext32s(s, a0, a1);
399
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
400
case INDEX_op_ld16u_i32:
401
case INDEX_op_ld16s_i32:
402
case INDEX_op_ld_i32:
403
- case INDEX_op_extract_i32:
404
case INDEX_op_sextract_i32:
405
case INDEX_op_ld8u_i64:
406
case INDEX_op_ld8s_i64:
407
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
408
case INDEX_op_extu_i32_i64:
409
case INDEX_op_extrl_i64_i32:
410
case INDEX_op_extrh_i64_i32:
411
- case INDEX_op_extract_i64:
412
case INDEX_op_sextract_i64:
413
return C_O1_I1(r, r);
414
415
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
416
index XXXXXXX..XXXXXXX 100644
417
--- a/tcg/ppc/tcg-target.c.inc
418
+++ b/tcg/ppc/tcg-target.c.inc
419
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
420
.out_rr = tgen_not,
421
};
422
423
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
424
+ unsigned ofs, unsigned len)
425
+{
426
+ if (ofs == 0 && len <= 16) {
427
+ tgen_andi(s, TCG_TYPE_I32, a0, a1, (1 << len) - 1);
428
+ } else if (type == TCG_TYPE_I32) {
429
+ tcg_out_rlw(s, RLWINM, a0, a1, 32 - ofs, 32 - len, 31);
430
+ } else {
431
+ tcg_out_rld(s, RLDICL, a0, a1, 64 - ofs, 64 - len);
432
+ }
433
+}
434
+
435
+static const TCGOutOpExtract outop_extract = {
436
+ .base.static_constraint = C_O1_I1(r, r),
437
+ .out_rr = tgen_extract,
438
+};
439
+
440
441
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
442
const TCGArg args[TCG_MAX_OP_ARGS],
443
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
444
}
445
break;
446
447
- case INDEX_op_extract_i32:
448
- if (args[2] == 0 && args[3] <= 16) {
449
- tcg_out32(s, ANDI | SAI(args[1], args[0], (1 << args[3]) - 1));
450
- break;
451
- }
452
- tcg_out_rlw(s, RLWINM, args[0], args[1],
453
- 32 - args[2], 32 - args[3], 31);
454
- break;
455
- case INDEX_op_extract_i64:
456
- if (args[2] == 0 && args[3] <= 16) {
457
- tcg_out32(s, ANDI | SAI(args[1], args[0], (1 << args[3]) - 1));
458
- break;
459
- }
460
- tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
461
- break;
462
-
463
case INDEX_op_sextract_i64:
464
if (args[2] + args[3] == 32) {
465
if (args[2] == 0) {
466
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
467
case INDEX_op_ld16u_i32:
468
case INDEX_op_ld16s_i32:
469
case INDEX_op_ld_i32:
470
- case INDEX_op_extract_i32:
471
case INDEX_op_sextract_i32:
472
case INDEX_op_ld8u_i64:
473
case INDEX_op_ld8s_i64:
474
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
475
case INDEX_op_ld_i64:
476
case INDEX_op_ext_i32_i64:
477
case INDEX_op_extu_i32_i64:
478
- case INDEX_op_extract_i64:
479
case INDEX_op_sextract_i64:
480
return C_O1_I1(r, r);
481
482
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
483
index XXXXXXX..XXXXXXX 100644
484
--- a/tcg/riscv/tcg-target.c.inc
485
+++ b/tcg/riscv/tcg-target.c.inc
486
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
487
.out_rr = tgen_not,
488
};
489
490
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
491
+ unsigned ofs, unsigned len)
492
+{
493
+ if (ofs == 0) {
494
+ switch (len) {
495
+ case 16:
496
+ tcg_out_ext16u(s, a0, a1);
497
+ return;
498
+ case 32:
499
+ tcg_out_ext32u(s, a0, a1);
500
+ return;
501
+ }
502
+ }
503
+ if (ofs + len == 32) {
504
+ tgen_shli(s, TCG_TYPE_I32, a0, a1, ofs);
505
+ return;
506
+ }
507
+ if (len == 1) {
508
+ tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, ofs);
509
+ return;
510
+ }
511
+ g_assert_not_reached();
512
+}
513
+
514
+static const TCGOutOpExtract outop_extract = {
515
+ .base.static_constraint = C_O1_I1(r, r),
516
+ .out_rr = tgen_extract,
517
+};
518
519
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
520
const TCGArg args[TCG_MAX_OP_ARGS],
521
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
522
tcg_out_mb(s, a0);
523
break;
524
525
- case INDEX_op_extract_i64:
526
- if (a2 + args[3] == 32) {
527
- if (a2 == 0) {
528
- tcg_out_ext32u(s, a0, a1);
529
- } else {
530
- tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2);
531
- }
532
- break;
533
- }
534
- /* FALLTHRU */
535
- case INDEX_op_extract_i32:
536
- switch (args[3]) {
537
- case 1:
538
- tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, a2);
539
- break;
540
- case 16:
541
- tcg_debug_assert(a2 == 0);
542
- tcg_out_ext16u(s, a0, a1);
543
- break;
544
- default:
545
- g_assert_not_reached();
546
- }
547
- break;
548
-
549
case INDEX_op_sextract_i64:
550
if (a2 + args[3] == 32) {
551
if (a2 == 0) {
552
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
553
case INDEX_op_extrl_i64_i32:
554
case INDEX_op_extrh_i64_i32:
555
case INDEX_op_ext_i32_i64:
556
- case INDEX_op_extract_i32:
557
- case INDEX_op_extract_i64:
558
case INDEX_op_sextract_i32:
559
case INDEX_op_sextract_i64:
560
return C_O1_I1(r, r);
561
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
562
index XXXXXXX..XXXXXXX 100644
563
--- a/tcg/s390x/tcg-target.c.inc
564
+++ b/tcg/s390x/tcg-target.c.inc
565
@@ -XXX,XX +XXX,XX @@ static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
566
tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
567
}
568
569
-static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
570
- int ofs, int len)
571
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg dest,
572
+ TCGReg src, unsigned ofs, unsigned len)
573
{
574
if (ofs == 0) {
575
switch (len) {
576
@@ -XXX,XX +XXX,XX @@ static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
577
tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
578
}
579
580
+static const TCGOutOpExtract outop_extract = {
581
+ .base.static_constraint = C_O1_I1(r, r),
582
+ .out_rr = tgen_extract,
583
+};
584
+
585
static void tgen_sextract(TCGContext *s, TCGReg dest, TCGReg src,
586
int ofs, int len)
587
{
588
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
589
}
590
break;
591
592
- OP_32_64(extract):
593
- tgen_extract(s, args[0], args[1], args[2], args[3]);
594
- break;
595
OP_32_64(sextract):
596
tgen_sextract(s, args[0], args[1], args[2], args[3]);
597
break;
598
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
599
600
case INDEX_op_ext_i32_i64:
601
case INDEX_op_extu_i32_i64:
602
- case INDEX_op_extract_i32:
603
- case INDEX_op_extract_i64:
604
case INDEX_op_sextract_i32:
605
case INDEX_op_sextract_i64:
606
return C_O1_I1(r, r);
607
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
608
index XXXXXXX..XXXXXXX 100644
609
--- a/tcg/sparc64/tcg-target.c.inc
610
+++ b/tcg/sparc64/tcg-target.c.inc
611
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
612
.out_rr = tgen_not,
613
};
614
615
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
616
+ unsigned ofs, unsigned len)
617
+{
618
+ tcg_debug_assert(ofs + len == 32);
619
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRL);
620
+}
621
+
622
+static const TCGOutOpExtract outop_extract = {
623
+ .base.static_constraint = C_O1_I1(r, r),
624
+ .out_rr = tgen_extract,
625
+};
626
627
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
628
const TCGArg args[TCG_MAX_OP_ARGS],
629
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
630
tcg_out_mb(s, a0);
631
break;
632
633
- case INDEX_op_extract_i64:
634
- tcg_debug_assert(a2 + args[3] == 32);
635
- tcg_out_arithi(s, a0, a1, a2, SHIFT_SRL);
636
- break;
637
case INDEX_op_sextract_i64:
638
tcg_debug_assert(a2 + args[3] == 32);
639
tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
640
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
641
case INDEX_op_ld_i64:
642
case INDEX_op_ext_i32_i64:
643
case INDEX_op_extu_i32_i64:
644
- case INDEX_op_extract_i64:
645
case INDEX_op_sextract_i64:
646
case INDEX_op_qemu_ld_i32:
647
case INDEX_op_qemu_ld_i64:
648
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
649
index XXXXXXX..XXXXXXX 100644
650
--- a/tcg/tci/tcg-target.c.inc
651
+++ b/tcg/tci/tcg-target.c.inc
652
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
653
case INDEX_op_ld_i64:
654
case INDEX_op_ext_i32_i64:
655
case INDEX_op_extu_i32_i64:
656
- case INDEX_op_extract_i32:
657
- case INDEX_op_extract_i64:
658
case INDEX_op_sextract_i32:
659
case INDEX_op_sextract_i64:
660
return C_O1_I1(r, r);
661
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
662
tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
663
}
664
665
+static const TCGOutOpExtract outop_extract = {
666
+ .base.static_constraint = C_O1_I1(r, r),
667
+ .out_rr = tcg_out_extract,
668
+};
669
+
670
static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
671
TCGReg rs, unsigned pos, unsigned len)
672
{
673
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
674
tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
675
break;
676
677
- CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */
678
CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
679
tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
680
break;
681
--
682
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 3 +--
6
tcg/optimize.c | 14 ++++----------
7
tcg/tcg-op.c | 8 ++++----
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 12 ++++--------
10
docs/devel/tcg-ops.rst | 6 +++---
11
tcg/tci/tcg-target.c.inc | 5 +----
12
7 files changed, 20 insertions(+), 37 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(divs2, 2, 3, 0, TCG_OPF_INT)
19
DEF(divu, 1, 2, 0, TCG_OPF_INT)
20
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
21
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
22
+DEF(extract, 1, 1, 2, TCG_OPF_INT)
23
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
24
DEF(mul, 1, 2, 0, TCG_OPF_INT)
25
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
26
@@ -XXX,XX +XXX,XX @@ DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
DEF(deposit_i32, 1, 2, 2, 0)
30
-DEF(extract_i32, 1, 1, 2, 0)
31
DEF(sextract_i32, 1, 1, 2, 0)
32
DEF(extract2_i32, 1, 2, 1, 0)
33
34
@@ -XXX,XX +XXX,XX @@ DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
DEF(deposit_i64, 1, 2, 2, 0)
38
-DEF(extract_i64, 1, 1, 2, 0)
39
DEF(sextract_i64, 1, 1, 2, 0)
40
DEF(extract2_i64, 1, 2, 1, 0)
41
42
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/optimize.c
45
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
47
48
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
49
{
50
- TCGOpcode uext_opc = 0, sext_opc = 0;
51
+ TCGOpcode sext_opc = 0;
52
TCGCond cond = op->args[3];
53
TCGArg ret, src1, src2;
54
TCGOp *op2;
55
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
56
57
switch (ctx->type) {
58
case TCG_TYPE_I32:
59
- if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
60
- uext_opc = INDEX_op_extract_i32;
61
- }
62
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
63
sext_opc = INDEX_op_sextract_i32;
64
}
65
break;
66
case TCG_TYPE_I64:
67
- if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
68
- uext_opc = INDEX_op_extract_i64;
69
- }
70
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
71
sext_opc = INDEX_op_sextract_i64;
72
}
73
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
74
op->args[2] = sh;
75
op->args[3] = 1;
76
return;
77
- } else if (sh && uext_opc) {
78
- op->opc = uext_opc;
79
+ } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
80
+ op->opc = INDEX_op_extract;
81
op->args[1] = src1;
82
op->args[2] = sh;
83
op->args[3] = 1;
84
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
85
case INDEX_op_eqv_vec:
86
done = fold_eqv(&ctx, op);
87
break;
88
- CASE_OP_32_64(extract):
89
+ case INDEX_op_extract:
90
done = fold_extract(&ctx, op);
91
break;
92
CASE_OP_32_64(extract2):
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
98
}
99
100
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
101
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
102
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, ofs, len);
103
return;
104
}
105
if (ofs == 0) {
106
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
107
108
/* Assume that zero-extension, if available, is cheaper than a shift. */
109
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, 0, ofs + len)) {
110
- tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, 0, ofs + len);
111
+ tcg_gen_op4ii_i32(INDEX_op_extract, ret, arg, 0, ofs + len);
112
tcg_gen_shri_i32(ret, ret, ofs);
113
return;
114
}
115
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
116
}
117
118
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
119
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
120
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, ofs, len);
121
return;
122
}
123
if (ofs == 0) {
124
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
125
126
/* Assume that zero-extension, if available, is cheaper than a shift. */
127
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, 0, ofs + len)) {
128
- tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, 0, ofs + len);
129
+ tcg_gen_op4ii_i64(INDEX_op_extract, ret, arg, 0, ofs + len);
130
tcg_gen_shri_i64(ret, ret, ofs);
131
return;
132
}
133
diff --git a/tcg/tcg.c b/tcg/tcg.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/tcg.c
136
+++ b/tcg/tcg.c
137
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
138
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
139
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
140
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
141
- OUTOP(INDEX_op_extract_i32, TCGOutOpExtract, outop_extract),
142
- OUTOP(INDEX_op_extract_i64, TCGOutOpExtract, outop_extract),
143
+ OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
144
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
145
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
146
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
147
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
148
case INDEX_op_add:
149
case INDEX_op_and:
150
case INDEX_op_brcond:
151
+ case INDEX_op_extract:
152
case INDEX_op_mov:
153
case INDEX_op_movcond:
154
case INDEX_op_negsetcond:
155
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
156
case INDEX_op_st8_i32:
157
case INDEX_op_st16_i32:
158
case INDEX_op_st_i32:
159
- case INDEX_op_extract_i32:
160
case INDEX_op_sextract_i32:
161
case INDEX_op_deposit_i32:
162
return true;
163
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
164
case INDEX_op_st_i64:
165
case INDEX_op_ext_i32_i64:
166
case INDEX_op_extu_i32_i64:
167
- case INDEX_op_extract_i64:
168
case INDEX_op_sextract_i64:
169
case INDEX_op_deposit_i64:
170
return TCG_TARGET_REG_BITS == 64;
171
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
172
}
173
break;
174
175
- case INDEX_op_extract_i32:
176
- case INDEX_op_extract_i64:
177
+ case INDEX_op_extract:
178
{
179
const TCGOutOpExtract *out =
180
container_of(all_outop[op->opc], TCGOutOpExtract, base);
181
diff --git a/tcg/tci.c b/tcg/tci.c
182
index XXXXXXX..XXXXXXX 100644
183
--- a/tcg/tci.c
184
+++ b/tcg/tci.c
185
@@ -XXX,XX +XXX,XX @@
186
187
188
#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
189
+#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
190
191
/*
192
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
193
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
194
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
195
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
196
break;
197
- case INDEX_op_extract_i32:
198
+ case INDEX_op_extract:
199
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
200
- regs[r0] = extract32(regs[r1], pos, len);
201
+ regs[r0] = extract_tr(regs[r1], pos, len);
202
break;
203
case INDEX_op_sextract_i32:
204
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
205
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
206
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
207
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
208
break;
209
- case INDEX_op_extract_i64:
210
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
211
- regs[r0] = extract64(regs[r1], pos, len);
212
- break;
213
case INDEX_op_sextract_i64:
214
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
215
regs[r0] = sextract64(regs[r1], pos, len);
216
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
217
op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
218
break;
219
220
- case INDEX_op_extract_i32:
221
- case INDEX_op_extract_i64:
222
+ case INDEX_op_extract:
223
case INDEX_op_sextract_i32:
224
case INDEX_op_sextract_i64:
225
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
226
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
227
index XXXXXXX..XXXXXXX 100644
228
--- a/docs/devel/tcg-ops.rst
229
+++ b/docs/devel/tcg-ops.rst
230
@@ -XXX,XX +XXX,XX @@ Misc
231
|
232
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
233
234
- * - extract_i32/i64 *dest*, *t1*, *pos*, *len*
235
+ * - extract *dest*, *t1*, *pos*, *len*
236
237
sextract_i32/i64 *dest*, *t1*, *pos*, *len*
238
239
@@ -XXX,XX +XXX,XX @@ Misc
240
to the left with zeros; for sextract_*, the result will be extended
241
to the left with copies of the bitfield sign bit at *pos* + *len* - 1.
242
|
243
- | For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
244
+ | For example, "sextract dest, t1, 8, 4" indicates a 4-bit field
245
at bit 8. This operation would be equivalent to
246
|
247
| *dest* = (*t1* << 20) >> 28
248
|
249
- | (using an arithmetic right shift).
250
+ | (using an arithmetic right shift) on TCG_TYPE_I32.
251
252
* - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
253
254
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tcg/tci/tcg-target.c.inc
257
+++ b/tcg/tci/tcg-target.c.inc
258
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
259
static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
260
TCGReg rs, unsigned pos, unsigned len)
261
{
262
- TCGOpcode opc = type == TCG_TYPE_I32 ?
263
- INDEX_op_extract_i32 :
264
- INDEX_op_extract_i64;
265
- tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
266
+ tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
267
}
268
269
static const TCGOutOpExtract outop_extract = {
270
--
271
2.43.0
272
273
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 4 ++
5
tcg/aarch64/tcg-target.c.inc | 18 +++++----
6
tcg/arm/tcg-target.c.inc | 21 ++++++-----
7
tcg/i386/tcg-target.c.inc | 63 ++++++++++++++++----------------
8
tcg/loongarch64/tcg-target.c.inc | 49 ++++++++++++++-----------
9
tcg/mips/tcg-target.c.inc | 42 ++++++++++++---------
10
tcg/ppc/tcg-target.c.inc | 49 ++++++++++++++-----------
11
tcg/riscv/tcg-target.c.inc | 49 ++++++++++++++-----------
12
tcg/s390x/tcg-target.c.inc | 15 ++++----
13
tcg/sparc64/tcg-target.c.inc | 18 ++++++---
14
tcg/tci/tcg-target.c.inc | 11 +++---
15
11 files changed, 188 insertions(+), 151 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
23
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
24
OUTOP(INDEX_op_setcond, TCGOutOpSetcond, outop_setcond),
25
+ OUTOP(INDEX_op_sextract_i32, TCGOutOpExtract, outop_sextract),
26
+ OUTOP(INDEX_op_sextract_i64, TCGOutOpExtract, outop_sextract),
27
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
28
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
29
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
30
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
31
break;
32
33
case INDEX_op_extract:
34
+ case INDEX_op_sextract_i32:
35
+ case INDEX_op_sextract_i64:
36
{
37
const TCGOutOpExtract *out =
38
container_of(all_outop[op->opc], TCGOutOpExtract, base);
39
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/aarch64/tcg-target.c.inc
42
+++ b/tcg/aarch64/tcg-target.c.inc
43
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
44
.out_rr = tgen_extract,
45
};
46
47
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
48
+ unsigned ofs, unsigned len)
49
+{
50
+ tcg_out_sbfm(s, type, a0, a1, ofs, ofs + len - 1);
51
+}
52
+
53
+static const TCGOutOpExtract outop_sextract = {
54
+ .base.static_constraint = C_O1_I1(r, r),
55
+ .out_rr = tgen_sextract,
56
+};
57
+
58
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
59
const TCGArg args[TCG_MAX_OP_ARGS],
60
const int const_args[TCG_MAX_OP_ARGS])
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
62
tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
63
break;
64
65
- case INDEX_op_sextract_i64:
66
- case INDEX_op_sextract_i32:
67
- tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
68
- break;
69
-
70
case INDEX_op_extract2_i64:
71
case INDEX_op_extract2_i32:
72
tcg_out_extr(s, ext, a0, a2, a1, args[3]);
73
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
74
case INDEX_op_ld_i64:
75
case INDEX_op_ext_i32_i64:
76
case INDEX_op_extu_i32_i64:
77
- case INDEX_op_sextract_i32:
78
- case INDEX_op_sextract_i64:
79
return C_O1_I1(r, r);
80
81
case INDEX_op_st8_i32:
82
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/arm/tcg-target.c.inc
85
+++ b/tcg/arm/tcg-target.c.inc
86
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
87
.out_rr = tgen_extract,
88
};
89
90
-static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
91
- TCGReg rn, int ofs, int len)
92
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
93
+ unsigned ofs, unsigned len)
94
{
95
if (use_armv7_instructions) {
96
/* sbfx */
97
- tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
98
+ tcg_out32(s, 0x07a00050 | (COND_AL << 28) | (rd << 12) | rn
99
| (ofs << 7) | ((len - 1) << 16));
100
return;
101
}
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
103
switch (len) {
104
case 8:
105
/* sxtb */
106
- tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
107
+ tcg_out32(s, 0x06af0070 | (COND_AL << 28) |
108
+ (rd << 12) | (ofs << 7) | rn);
109
break;
110
case 16:
111
/* sxth */
112
- tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
113
+ tcg_out32(s, 0x06bf0070 | (COND_AL << 28) |
114
+ (rd << 12) | (ofs << 7) | rn);
115
break;
116
default:
117
g_assert_not_reached();
118
}
119
}
120
121
+static const TCGOutOpExtract outop_sextract = {
122
+ .base.static_constraint = C_O1_I1(r, r),
123
+ .out_rr = tgen_sextract,
124
+};
125
+
126
127
static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
128
TCGReg rd, TCGReg rn, int32_t offset)
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
130
tcg_out_deposit(s, COND_AL, args[0], args[2],
131
args[3], args[4], const_args[2]);
132
break;
133
- case INDEX_op_sextract_i32:
134
- tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
135
- break;
136
case INDEX_op_extract2_i32:
137
/* ??? These optimization vs zero should be generic. */
138
/* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
139
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
140
case INDEX_op_ld16u_i32:
141
case INDEX_op_ld16s_i32:
142
case INDEX_op_ld_i32:
143
- case INDEX_op_sextract_i32:
144
return C_O1_I1(r, r);
145
146
case INDEX_op_st8_i32:
147
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
148
index XXXXXXX..XXXXXXX 100644
149
--- a/tcg/i386/tcg-target.c.inc
150
+++ b/tcg/i386/tcg-target.c.inc
151
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
152
.out_rr = tgen_extract,
153
};
154
155
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
156
+ unsigned ofs, unsigned len)
157
+{
158
+ if (ofs == 0) {
159
+ switch (len) {
160
+ case 8:
161
+ tcg_out_ext8s(s, type, a0, a1);
162
+ return;
163
+ case 16:
164
+ tcg_out_ext16s(s, type, a0, a1);
165
+ return;
166
+ case 32:
167
+ tcg_out_ext32s(s, a0, a1);
168
+ return;
169
+ }
170
+ } else if (ofs == 8 && len == 8) {
171
+ if (type == TCG_TYPE_I32 && a1 < 4 && a0 < 8) {
172
+ tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
173
+ } else {
174
+ tcg_out_ext16s(s, type, a0, a1);
175
+ tgen_sari(s, type, a0, a0, 8);
176
+ }
177
+ return;
178
+ }
179
+ g_assert_not_reached();
180
+}
181
+
182
+static const TCGOutOpExtract outop_sextract = {
183
+ .base.static_constraint = C_O1_I1(r, r),
184
+ .out_rr = tgen_sextract,
185
+};
186
+
187
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
188
const TCGArg args[TCG_MAX_OP_ARGS],
189
const int const_args[TCG_MAX_OP_ARGS])
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
191
}
192
break;
193
194
- case INDEX_op_sextract_i64:
195
- if (a2 == 0 && args[3] == 8) {
196
- tcg_out_ext8s(s, TCG_TYPE_I64, a0, a1);
197
- } else if (a2 == 0 && args[3] == 16) {
198
- tcg_out_ext16s(s, TCG_TYPE_I64, a0, a1);
199
- } else if (a2 == 0 && args[3] == 32) {
200
- tcg_out_ext32s(s, a0, a1);
201
- } else {
202
- g_assert_not_reached();
203
- }
204
- break;
205
-
206
- case INDEX_op_sextract_i32:
207
- if (a2 == 0 && args[3] == 8) {
208
- tcg_out_ext8s(s, TCG_TYPE_I32, a0, a1);
209
- } else if (a2 == 0 && args[3] == 16) {
210
- tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
211
- } else if (a2 == 8 && args[3] == 8) {
212
- if (a1 < 4 && a0 < 8) {
213
- tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
214
- } else {
215
- tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
216
- tcg_out_shifti(s, SHIFT_SAR, a0, 8);
217
- }
218
- } else {
219
- g_assert_not_reached();
220
- }
221
- break;
222
-
223
OP_32_64(extract2):
224
/* Note that SHRD outputs to the r/m operand. */
225
tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
226
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
227
case INDEX_op_ext_i32_i64:
228
case INDEX_op_extu_i32_i64:
229
case INDEX_op_extrl_i64_i32:
230
- case INDEX_op_sextract_i32:
231
- case INDEX_op_sextract_i64:
232
return C_O1_I1(r, r);
233
234
case INDEX_op_extract2_i32:
235
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/loongarch64/tcg-target.c.inc
238
+++ b/tcg/loongarch64/tcg-target.c.inc
239
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
240
.out_rr = tgen_extract,
241
};
242
243
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
244
+ unsigned ofs, unsigned len)
245
+{
246
+ if (ofs == 0) {
247
+ switch (len) {
248
+ case 8:
249
+ tcg_out_ext8s(s, type, a0, a1);
250
+ return;
251
+ case 16:
252
+ tcg_out_ext16s(s, type, a0, a1);
253
+ return;
254
+ case 32:
255
+ tcg_out_ext32s(s, a0, a1);
256
+ return;
257
+ }
258
+ } else if (ofs + len == 32) {
259
+ tcg_out_opc_srai_w(s, a0, a1, ofs);
260
+ return;
261
+ }
262
+ g_assert_not_reached();
263
+}
264
+
265
+static const TCGOutOpExtract outop_sextract = {
266
+ .base.static_constraint = C_O1_I1(r, r),
267
+ .out_rr = tgen_sextract,
268
+};
269
+
270
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
271
const TCGArg args[TCG_MAX_OP_ARGS],
272
const int const_args[TCG_MAX_OP_ARGS])
273
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
274
tcg_out_opc_srai_d(s, a0, a1, 32);
275
break;
276
277
- case INDEX_op_sextract_i64:
278
- if (a2 + args[3] == 32) {
279
- if (a2 == 0) {
280
- tcg_out_ext32s(s, a0, a1);
281
- } else {
282
- tcg_out_opc_srai_w(s, a0, a1, a2);
283
- }
284
- break;
285
- }
286
- /* FALLTHRU */
287
- case INDEX_op_sextract_i32:
288
- if (a2 == 0 && args[3] == 8) {
289
- tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
290
- } else if (a2 == 0 && args[3] == 16) {
291
- tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
292
- } else {
293
- g_assert_not_reached();
294
- }
295
- break;
296
-
297
case INDEX_op_deposit_i32:
298
tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
299
break;
300
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
301
case INDEX_op_extrl_i64_i32:
302
case INDEX_op_extrh_i64_i32:
303
case INDEX_op_ext_i32_i64:
304
- case INDEX_op_sextract_i32:
305
- case INDEX_op_sextract_i64:
306
case INDEX_op_ld8s_i32:
307
case INDEX_op_ld8s_i64:
308
case INDEX_op_ld8u_i32:
309
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
310
index XXXXXXX..XXXXXXX 100644
311
--- a/tcg/mips/tcg-target.c.inc
312
+++ b/tcg/mips/tcg-target.c.inc
313
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
314
.out_rr = tgen_extract,
315
};
316
317
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
318
+ unsigned ofs, unsigned len)
319
+{
320
+ if (ofs == 0) {
321
+ switch (len) {
322
+ case 8:
323
+ tcg_out_ext8s(s, type, a0, a1);
324
+ return;
325
+ case 16:
326
+ tcg_out_ext16s(s, type, a0, a1);
327
+ return;
328
+ case 32:
329
+ tcg_out_ext32s(s, a0, a1);
330
+ return;
331
+ }
332
+ }
333
+ g_assert_not_reached();
334
+}
335
+
336
+static const TCGOutOpExtract outop_sextract = {
337
+ .base.static_constraint = C_O1_I1(r, r),
338
+ .out_rr = tgen_sextract,
339
+};
340
+
341
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
342
const TCGArg args[TCG_MAX_OP_ARGS],
343
const int const_args[TCG_MAX_OP_ARGS])
344
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
345
args[3] + args[4] - 1, args[3]);
346
break;
347
348
- case INDEX_op_sextract_i64:
349
- if (a2 == 0 && args[3] == 32) {
350
- tcg_out_ext32s(s, a0, a1);
351
- break;
352
- }
353
- /* FALLTHRU */
354
- case INDEX_op_sextract_i32:
355
- if (a2 == 0 && args[3] == 8) {
356
- tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
357
- } else if (a2 == 0 && args[3] == 16) {
358
- tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
359
- } else {
360
- g_assert_not_reached();
361
- }
362
- break;
363
-
364
case INDEX_op_qemu_ld_i32:
365
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
366
break;
367
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
368
case INDEX_op_ld16u_i32:
369
case INDEX_op_ld16s_i32:
370
case INDEX_op_ld_i32:
371
- case INDEX_op_sextract_i32:
372
case INDEX_op_ld8u_i64:
373
case INDEX_op_ld8s_i64:
374
case INDEX_op_ld16u_i64:
375
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
376
case INDEX_op_extu_i32_i64:
377
case INDEX_op_extrl_i64_i32:
378
case INDEX_op_extrh_i64_i32:
379
- case INDEX_op_sextract_i64:
380
return C_O1_I1(r, r);
381
382
case INDEX_op_st8_i32:
383
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
384
index XXXXXXX..XXXXXXX 100644
385
--- a/tcg/ppc/tcg-target.c.inc
386
+++ b/tcg/ppc/tcg-target.c.inc
387
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
388
.out_rr = tgen_extract,
389
};
390
391
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
392
+ unsigned ofs, unsigned len)
393
+{
394
+ if (ofs == 0) {
395
+ switch (len) {
396
+ case 8:
397
+ tcg_out_ext8s(s, type, a0, a1);
398
+ return;
399
+ case 16:
400
+ tcg_out_ext16s(s, type, a0, a1);
401
+ return;
402
+ case 32:
403
+ tcg_out_ext32s(s, a0, a1);
404
+ return;
405
+ }
406
+ } else if (ofs + len == 32) {
407
+ tcg_out_sari32(s, a0, a1, ofs);
408
+ return;
409
+ }
410
+ g_assert_not_reached();
411
+}
412
+
413
+static const TCGOutOpExtract outop_sextract = {
414
+ .base.static_constraint = C_O1_I1(r, r),
415
+ .out_rr = tgen_sextract,
416
+};
417
+
418
419
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
420
const TCGArg args[TCG_MAX_OP_ARGS],
421
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
422
}
423
break;
424
425
- case INDEX_op_sextract_i64:
426
- if (args[2] + args[3] == 32) {
427
- if (args[2] == 0) {
428
- tcg_out_ext32s(s, args[0], args[1]);
429
- } else {
430
- tcg_out_sari32(s, args[0], args[1], args[2]);
431
- }
432
- break;
433
- }
434
- /* FALLTHRU */
435
- case INDEX_op_sextract_i32:
436
- if (args[2] == 0 && args[3] == 8) {
437
- tcg_out_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
438
- } else if (args[2] == 0 && args[3] == 16) {
439
- tcg_out_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
440
- } else {
441
- g_assert_not_reached();
442
- }
443
- break;
444
-
445
#if TCG_TARGET_REG_BITS == 64
446
case INDEX_op_add2_i64:
447
#else
448
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
449
case INDEX_op_ld16u_i32:
450
case INDEX_op_ld16s_i32:
451
case INDEX_op_ld_i32:
452
- case INDEX_op_sextract_i32:
453
case INDEX_op_ld8u_i64:
454
case INDEX_op_ld8s_i64:
455
case INDEX_op_ld16u_i64:
456
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
457
case INDEX_op_ld_i64:
458
case INDEX_op_ext_i32_i64:
459
case INDEX_op_extu_i32_i64:
460
- case INDEX_op_sextract_i64:
461
return C_O1_I1(r, r);
462
463
case INDEX_op_st8_i32:
464
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
465
index XXXXXXX..XXXXXXX 100644
466
--- a/tcg/riscv/tcg-target.c.inc
467
+++ b/tcg/riscv/tcg-target.c.inc
468
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
469
.out_rr = tgen_extract,
470
};
471
472
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
473
+ unsigned ofs, unsigned len)
474
+{
475
+ if (ofs == 0) {
476
+ switch (len) {
477
+ case 8:
478
+ tcg_out_ext8s(s, type, a0, a1);
479
+ return;
480
+ case 16:
481
+ tcg_out_ext16s(s, type, a0, a1);
482
+ return;
483
+ case 32:
484
+ tcg_out_ext32s(s, a0, a1);
485
+ return;
486
+ }
487
+ } else if (ofs + len == 32) {
488
+ tgen_sari(s, TCG_TYPE_I32, a0, a1, ofs);
489
+ return;
490
+ }
491
+ g_assert_not_reached();
492
+}
493
+
494
+static const TCGOutOpExtract outop_sextract = {
495
+ .base.static_constraint = C_O1_I1(r, r),
496
+ .out_rr = tgen_sextract,
497
+};
498
+
499
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
500
const TCGArg args[TCG_MAX_OP_ARGS],
501
const int const_args[TCG_MAX_OP_ARGS])
502
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
503
tcg_out_mb(s, a0);
504
break;
505
506
- case INDEX_op_sextract_i64:
507
- if (a2 + args[3] == 32) {
508
- if (a2 == 0) {
509
- tcg_out_ext32s(s, a0, a1);
510
- } else {
511
- tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2);
512
- }
513
- break;
514
- }
515
- /* FALLTHRU */
516
- case INDEX_op_sextract_i32:
517
- if (a2 == 0 && args[3] == 8) {
518
- tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
519
- } else if (a2 == 0 && args[3] == 16) {
520
- tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
521
- } else {
522
- g_assert_not_reached();
523
- }
524
- break;
525
-
526
case INDEX_op_call: /* Always emitted via tcg_out_call. */
527
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
528
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
529
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
530
case INDEX_op_extrl_i64_i32:
531
case INDEX_op_extrh_i64_i32:
532
case INDEX_op_ext_i32_i64:
533
- case INDEX_op_sextract_i32:
534
- case INDEX_op_sextract_i64:
535
return C_O1_I1(r, r);
536
537
case INDEX_op_st8_i32:
538
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
539
index XXXXXXX..XXXXXXX 100644
540
--- a/tcg/s390x/tcg-target.c.inc
541
+++ b/tcg/s390x/tcg-target.c.inc
542
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
543
.out_rr = tgen_extract,
544
};
545
546
-static void tgen_sextract(TCGContext *s, TCGReg dest, TCGReg src,
547
- int ofs, int len)
548
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg dest,
549
+ TCGReg src, unsigned ofs, unsigned len)
550
{
551
if (ofs == 0) {
552
switch (len) {
553
@@ -XXX,XX +XXX,XX @@ static void tgen_sextract(TCGContext *s, TCGReg dest, TCGReg src,
554
g_assert_not_reached();
555
}
556
557
+static const TCGOutOpExtract outop_sextract = {
558
+ .base.static_constraint = C_O1_I1(r, r),
559
+ .out_rr = tgen_sextract,
560
+};
561
+
562
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
563
{
564
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
565
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
566
}
567
break;
568
569
- OP_32_64(sextract):
570
- tgen_sextract(s, args[0], args[1], args[2], args[3]);
571
- break;
572
-
573
case INDEX_op_mb:
574
/* The host memory model is quite strong, we simply need to
575
serialize the instruction stream. */
576
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
577
578
case INDEX_op_ext_i32_i64:
579
case INDEX_op_extu_i32_i64:
580
- case INDEX_op_sextract_i32:
581
- case INDEX_op_sextract_i64:
582
return C_O1_I1(r, r);
583
584
case INDEX_op_qemu_ld_i32:
585
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
586
index XXXXXXX..XXXXXXX 100644
587
--- a/tcg/sparc64/tcg-target.c.inc
588
+++ b/tcg/sparc64/tcg-target.c.inc
589
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
590
.out_rr = tgen_extract,
591
};
592
593
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
594
+ unsigned ofs, unsigned len)
595
+{
596
+ tcg_debug_assert(ofs + len == 32);
597
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRA);
598
+}
599
+
600
+static const TCGOutOpExtract outop_sextract = {
601
+ .base.static_constraint = C_O1_I1(r, r),
602
+ .out_rr = tgen_sextract,
603
+};
604
+
605
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
606
const TCGArg args[TCG_MAX_OP_ARGS],
607
const int const_args[TCG_MAX_OP_ARGS])
608
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
609
tcg_out_mb(s, a0);
610
break;
611
612
- case INDEX_op_sextract_i64:
613
- tcg_debug_assert(a2 + args[3] == 32);
614
- tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
615
- break;
616
-
617
case INDEX_op_call: /* Always emitted via tcg_out_call. */
618
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
619
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
620
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
621
case INDEX_op_ld_i64:
622
case INDEX_op_ext_i32_i64:
623
case INDEX_op_extu_i32_i64:
624
- case INDEX_op_sextract_i64:
625
case INDEX_op_qemu_ld_i32:
626
case INDEX_op_qemu_ld_i64:
627
return C_O1_I1(r, r);
628
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
629
index XXXXXXX..XXXXXXX 100644
630
--- a/tcg/tci/tcg-target.c.inc
631
+++ b/tcg/tci/tcg-target.c.inc
632
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
633
case INDEX_op_ld_i64:
634
case INDEX_op_ext_i32_i64:
635
case INDEX_op_extu_i32_i64:
636
- case INDEX_op_sextract_i32:
637
- case INDEX_op_sextract_i64:
638
return C_O1_I1(r, r);
639
640
case INDEX_op_st8_i32:
641
@@ -XXX,XX +XXX,XX @@ static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
642
tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
643
}
644
645
+static const TCGOutOpExtract outop_sextract = {
646
+ .base.static_constraint = C_O1_I1(r, r),
647
+ .out_rr = tcg_out_sextract,
648
+};
649
+
650
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
651
{
652
tcg_out_sextract(s, type, rd, rs, 0, 8);
653
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
654
tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
655
break;
656
657
- CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
658
- tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
659
- break;
660
-
661
CASE_32_64(add2)
662
CASE_32_64(sub2)
663
tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
664
--
665
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 3 +--
6
tcg/optimize.c | 22 +++-------------------
7
tcg/tcg-op.c | 12 ++++++------
8
tcg/tcg.c | 9 +++------
9
tcg/tci.c | 12 ++++--------
10
docs/devel/tcg-ops.rst | 2 +-
11
tcg/tci/tcg-target.c.inc | 5 +----
12
7 files changed, 19 insertions(+), 46 deletions(-)
13
1
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(rotl, 1, 2, 0, TCG_OPF_INT)
19
DEF(rotr, 1, 2, 0, TCG_OPF_INT)
20
DEF(sar, 1, 2, 0, TCG_OPF_INT)
21
DEF(setcond, 1, 2, 1, TCG_OPF_INT)
22
+DEF(sextract, 1, 1, 2, TCG_OPF_INT)
23
DEF(shl, 1, 2, 0, TCG_OPF_INT)
24
DEF(shr, 1, 2, 0, TCG_OPF_INT)
25
DEF(sub, 1, 2, 0, TCG_OPF_INT)
26
@@ -XXX,XX +XXX,XX @@ DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
DEF(deposit_i32, 1, 2, 2, 0)
30
-DEF(sextract_i32, 1, 1, 2, 0)
31
DEF(extract2_i32, 1, 2, 1, 0)
32
33
DEF(add2_i32, 2, 4, 0, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
DEF(deposit_i64, 1, 2, 2, 0)
38
-DEF(sextract_i64, 1, 1, 2, 0)
39
DEF(extract2_i64, 1, 2, 1, 0)
40
41
/* size changing ops */
42
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/optimize.c
45
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
47
48
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
49
{
50
- TCGOpcode sext_opc = 0;
51
TCGCond cond = op->args[3];
52
TCGArg ret, src1, src2;
53
TCGOp *op2;
54
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
55
}
56
sh = ctz64(val);
57
58
- switch (ctx->type) {
59
- case TCG_TYPE_I32:
60
- if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
61
- sext_opc = INDEX_op_sextract_i32;
62
- }
63
- break;
64
- case TCG_TYPE_I64:
65
- if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
66
- sext_opc = INDEX_op_sextract_i64;
67
- }
68
- break;
69
- default:
70
- g_assert_not_reached();
71
- }
72
-
73
ret = op->args[0];
74
src1 = op->args[1];
75
inv = cond == TCG_COND_TSTEQ;
76
77
- if (sh && sext_opc && neg && !inv) {
78
- op->opc = sext_opc;
79
+ if (sh && neg && !inv && TCG_TARGET_sextract_valid(ctx->type, sh, 1)) {
80
+ op->opc = INDEX_op_sextract;
81
op->args[1] = src1;
82
op->args[2] = sh;
83
op->args[3] = 1;
84
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
85
case INDEX_op_bitsel_vec:
86
done = fold_bitsel_vec(&ctx, op);
87
break;
88
- CASE_OP_32_64(sextract):
89
+ case INDEX_op_sextract:
90
done = fold_sextract(&ctx, op);
91
break;
92
case INDEX_op_sub:
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
98
}
99
100
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, ofs, len)) {
101
- tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
102
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, ofs, len);
103
return;
104
}
105
106
/* Assume that sign-extension, if available, is cheaper than a shift. */
107
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, ofs + len)) {
108
- tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, 0, ofs + len);
109
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, arg, 0, ofs + len);
110
tcg_gen_sari_i32(ret, ret, ofs);
111
return;
112
}
113
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, 0, len)) {
114
tcg_gen_shri_i32(ret, arg, ofs);
115
- tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, ret, 0, len);
116
+ tcg_gen_op4ii_i32(INDEX_op_sextract, ret, ret, 0, len);
117
return;
118
}
119
120
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
121
}
122
123
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, ofs, len)) {
124
- tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
125
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, ofs, len);
126
return;
127
}
128
129
/* Assume that sign-extension, if available, is cheaper than a shift. */
130
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, ofs + len)) {
131
- tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, 0, ofs + len);
132
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, arg, 0, ofs + len);
133
tcg_gen_sari_i64(ret, ret, ofs);
134
return;
135
}
136
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, 0, len)) {
137
tcg_gen_shri_i64(ret, arg, ofs);
138
- tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, ret, 0, len);
139
+ tcg_gen_op4ii_i64(INDEX_op_sextract, ret, ret, 0, len);
140
return;
141
}
142
143
diff --git a/tcg/tcg.c b/tcg/tcg.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/tcg.c
146
+++ b/tcg/tcg.c
147
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
148
OUTOP(INDEX_op_rotr, TCGOutOpBinary, outop_rotr),
149
OUTOP(INDEX_op_sar, TCGOutOpBinary, outop_sar),
150
OUTOP(INDEX_op_setcond, TCGOutOpSetcond, outop_setcond),
151
- OUTOP(INDEX_op_sextract_i32, TCGOutOpExtract, outop_sextract),
152
- OUTOP(INDEX_op_sextract_i64, TCGOutOpExtract, outop_sextract),
153
+ OUTOP(INDEX_op_sextract, TCGOutOpExtract, outop_sextract),
154
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
155
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
156
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
157
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
158
case INDEX_op_negsetcond:
159
case INDEX_op_or:
160
case INDEX_op_setcond:
161
+ case INDEX_op_sextract:
162
case INDEX_op_xor:
163
return has_type;
164
165
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
166
case INDEX_op_st8_i32:
167
case INDEX_op_st16_i32:
168
case INDEX_op_st_i32:
169
- case INDEX_op_sextract_i32:
170
case INDEX_op_deposit_i32:
171
return true;
172
173
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
174
case INDEX_op_st_i64:
175
case INDEX_op_ext_i32_i64:
176
case INDEX_op_extu_i32_i64:
177
- case INDEX_op_sextract_i64:
178
case INDEX_op_deposit_i64:
179
return TCG_TARGET_REG_BITS == 64;
180
181
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
182
break;
183
184
case INDEX_op_extract:
185
- case INDEX_op_sextract_i32:
186
- case INDEX_op_sextract_i64:
187
+ case INDEX_op_sextract:
188
{
189
const TCGOutOpExtract *out =
190
container_of(all_outop[op->opc], TCGOutOpExtract, base);
191
diff --git a/tcg/tci.c b/tcg/tci.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/tci.c
194
+++ b/tcg/tci.c
195
@@ -XXX,XX +XXX,XX @@
196
197
#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
198
#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
199
+#define sextract_tr glue(sextract, TCG_TARGET_REG_BITS)
200
201
/*
202
* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
203
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
204
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
205
regs[r0] = extract_tr(regs[r1], pos, len);
206
break;
207
- case INDEX_op_sextract_i32:
208
+ case INDEX_op_sextract:
209
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
210
- regs[r0] = sextract32(regs[r1], pos, len);
211
+ regs[r0] = sextract_tr(regs[r1], pos, len);
212
break;
213
case INDEX_op_brcond:
214
tci_args_rl(insn, tb_ptr, &r0, &ptr);
215
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
216
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
217
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
218
break;
219
- case INDEX_op_sextract_i64:
220
- tci_args_rrbb(insn, &r0, &r1, &pos, &len);
221
- regs[r0] = sextract64(regs[r1], pos, len);
222
- break;
223
case INDEX_op_ext_i32_i64:
224
tci_args_rr(insn, &r0, &r1);
225
regs[r0] = (int32_t)regs[r1];
226
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
227
break;
228
229
case INDEX_op_extract:
230
- case INDEX_op_sextract_i32:
231
- case INDEX_op_sextract_i64:
232
+ case INDEX_op_sextract:
233
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
234
info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
235
op_name, str_r(r0), str_r(r1), pos, len);
236
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
237
index XXXXXXX..XXXXXXX 100644
238
--- a/docs/devel/tcg-ops.rst
239
+++ b/docs/devel/tcg-ops.rst
240
@@ -XXX,XX +XXX,XX @@ Misc
241
242
* - extract *dest*, *t1*, *pos*, *len*
243
244
- sextract_i32/i64 *dest*, *t1*, *pos*, *len*
245
+ sextract *dest*, *t1*, *pos*, *len*
246
247
- | Extract a bitfield from *t1*, placing the result in *dest*.
248
|
249
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
250
index XXXXXXX..XXXXXXX 100644
251
--- a/tcg/tci/tcg-target.c.inc
252
+++ b/tcg/tci/tcg-target.c.inc
253
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_extract = {
254
static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
255
TCGReg rs, unsigned pos, unsigned len)
256
{
257
- TCGOpcode opc = type == TCG_TYPE_I32 ?
258
- INDEX_op_sextract_i32 :
259
- INDEX_op_sextract_i64;
260
- tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
261
+ tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len);
262
}
263
264
static const TCGOutOpExtract outop_sextract = {
265
--
266
2.43.0
267
268
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 22 +++++++++++++++++++---
5
tcg/aarch64/tcg-target.c.inc | 2 --
6
tcg/i386/tcg-target.c.inc | 2 --
7
tcg/loongarch64/tcg-target.c.inc | 2 --
8
tcg/mips/tcg-target.c.inc | 2 --
9
tcg/ppc/tcg-target.c.inc | 2 --
10
tcg/riscv/tcg-target.c.inc | 2 --
11
tcg/s390x/tcg-target.c.inc | 2 --
12
tcg/sparc64/tcg-target.c.inc | 2 --
13
tcg/tci/tcg-target.c.inc | 2 --
14
10 files changed, 19 insertions(+), 21 deletions(-)
15
1
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.c
19
+++ b/tcg/tcg.c
20
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
21
< MIN_TLB_MASK_TABLE_OFS);
22
#endif
23
24
+#if TCG_TARGET_REG_BITS == 64
25
+/*
26
+ * We require these functions for slow-path function calls.
27
+ * Adapt them generically for opcode output.
28
+ */
29
+
30
+static void tgen_exts_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
31
+{
32
+ tcg_out_exts_i32_i64(s, a0, a1);
33
+}
34
+
35
+static const TCGOutOpUnary outop_exts_i32_i64 = {
36
+ .base.static_constraint = C_O1_I1(r, r),
37
+ .out_rr = tgen_exts_i32_i64,
38
+};
39
+#endif
40
+
41
/*
42
* Register V as the TCGOutOp for O.
43
* This verifies that V is of type T, otherwise give a nice compiler error.
44
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
45
OUTOP(INDEX_op_setcond2_i32, TCGOutOpSetcond2, outop_setcond2),
46
#else
47
OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64),
48
+ OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64),
49
#endif
50
};
51
52
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
53
/* emit instruction */
54
TCGType type = TCGOP_TYPE(op);
55
switch (op->opc) {
56
- case INDEX_op_ext_i32_i64:
57
- tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
58
- break;
59
case INDEX_op_extu_i32_i64:
60
tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
61
break;
62
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
63
break;
64
65
case INDEX_op_bswap64:
66
+ case INDEX_op_ext_i32_i64:
67
assert(TCG_TARGET_REG_BITS == 64);
68
/* fall through */
69
case INDEX_op_ctpop:
70
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/aarch64/tcg-target.c.inc
73
+++ b/tcg/aarch64/tcg-target.c.inc
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
75
case INDEX_op_call: /* Always emitted via tcg_out_call. */
76
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
77
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
78
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
79
case INDEX_op_extu_i32_i64:
80
case INDEX_op_extrl_i64_i32:
81
default:
82
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
83
case INDEX_op_ld32u_i64:
84
case INDEX_op_ld32s_i64:
85
case INDEX_op_ld_i64:
86
- case INDEX_op_ext_i32_i64:
87
case INDEX_op_extu_i32_i64:
88
return C_O1_I1(r, r);
89
90
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
91
index XXXXXXX..XXXXXXX 100644
92
--- a/tcg/i386/tcg-target.c.inc
93
+++ b/tcg/i386/tcg-target.c.inc
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
95
case INDEX_op_call: /* Always emitted via tcg_out_call. */
96
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
97
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
98
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
99
case INDEX_op_extu_i32_i64:
100
case INDEX_op_extrl_i64_i32:
101
default:
102
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
103
case INDEX_op_extrh_i64_i32:
104
return C_O1_I1(r, 0);
105
106
- case INDEX_op_ext_i32_i64:
107
case INDEX_op_extu_i32_i64:
108
case INDEX_op_extrl_i64_i32:
109
return C_O1_I1(r, r);
110
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
111
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/loongarch64/tcg-target.c.inc
113
+++ b/tcg/loongarch64/tcg-target.c.inc
114
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
115
case INDEX_op_call: /* Always emitted via tcg_out_call. */
116
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
117
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
118
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
119
case INDEX_op_extu_i32_i64:
120
case INDEX_op_extrl_i64_i32:
121
default:
122
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
123
case INDEX_op_extu_i32_i64:
124
case INDEX_op_extrl_i64_i32:
125
case INDEX_op_extrh_i64_i32:
126
- case INDEX_op_ext_i32_i64:
127
case INDEX_op_ld8s_i32:
128
case INDEX_op_ld8s_i64:
129
case INDEX_op_ld8u_i32:
130
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/mips/tcg-target.c.inc
133
+++ b/tcg/mips/tcg-target.c.inc
134
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
135
case INDEX_op_call: /* Always emitted via tcg_out_call. */
136
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
137
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
138
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
139
case INDEX_op_extu_i32_i64:
140
case INDEX_op_extrl_i64_i32:
141
default:
142
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
143
case INDEX_op_ld32s_i64:
144
case INDEX_op_ld32u_i64:
145
case INDEX_op_ld_i64:
146
- case INDEX_op_ext_i32_i64:
147
case INDEX_op_extu_i32_i64:
148
case INDEX_op_extrl_i64_i32:
149
case INDEX_op_extrh_i64_i32:
150
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/ppc/tcg-target.c.inc
153
+++ b/tcg/ppc/tcg-target.c.inc
154
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
155
case INDEX_op_call: /* Always emitted via tcg_out_call. */
156
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
157
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
158
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
159
case INDEX_op_extu_i32_i64:
160
case INDEX_op_extrl_i64_i32:
161
default:
162
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
163
case INDEX_op_ld32u_i64:
164
case INDEX_op_ld32s_i64:
165
case INDEX_op_ld_i64:
166
- case INDEX_op_ext_i32_i64:
167
case INDEX_op_extu_i32_i64:
168
return C_O1_I1(r, r);
169
170
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tcg/riscv/tcg-target.c.inc
173
+++ b/tcg/riscv/tcg-target.c.inc
174
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
175
case INDEX_op_call: /* Always emitted via tcg_out_call. */
176
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
177
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
178
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
179
case INDEX_op_extu_i32_i64:
180
case INDEX_op_extrl_i64_i32:
181
default:
182
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
183
case INDEX_op_extu_i32_i64:
184
case INDEX_op_extrl_i64_i32:
185
case INDEX_op_extrh_i64_i32:
186
- case INDEX_op_ext_i32_i64:
187
return C_O1_I1(r, r);
188
189
case INDEX_op_st8_i32:
190
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/s390x/tcg-target.c.inc
193
+++ b/tcg/s390x/tcg-target.c.inc
194
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
195
case INDEX_op_call: /* Always emitted via tcg_out_call. */
196
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
197
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
198
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
199
case INDEX_op_extu_i32_i64:
200
case INDEX_op_extrl_i64_i32:
201
default:
202
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
203
case INDEX_op_st_i64:
204
return C_O0_I2(r, r);
205
206
- case INDEX_op_ext_i32_i64:
207
case INDEX_op_extu_i32_i64:
208
return C_O1_I1(r, r);
209
210
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/sparc64/tcg-target.c.inc
213
+++ b/tcg/sparc64/tcg-target.c.inc
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
215
case INDEX_op_call: /* Always emitted via tcg_out_call. */
216
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
217
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
218
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
219
case INDEX_op_extu_i32_i64:
220
default:
221
g_assert_not_reached();
222
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
223
case INDEX_op_ld32u_i64:
224
case INDEX_op_ld32s_i64:
225
case INDEX_op_ld_i64:
226
- case INDEX_op_ext_i32_i64:
227
case INDEX_op_extu_i32_i64:
228
case INDEX_op_qemu_ld_i32:
229
case INDEX_op_qemu_ld_i64:
230
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
231
index XXXXXXX..XXXXXXX 100644
232
--- a/tcg/tci/tcg-target.c.inc
233
+++ b/tcg/tci/tcg-target.c.inc
234
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
235
case INDEX_op_ld32u_i64:
236
case INDEX_op_ld32s_i64:
237
case INDEX_op_ld_i64:
238
- case INDEX_op_ext_i32_i64:
239
case INDEX_op_extu_i32_i64:
240
return C_O1_I1(r, r);
241
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
243
case INDEX_op_call: /* Always emitted via tcg_out_call. */
244
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
245
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
246
- case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */
247
case INDEX_op_extu_i32_i64:
248
case INDEX_op_extrl_i64_i32:
249
default:
250
--
251
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 15 ++++++++++++---
5
tcg/aarch64/tcg-target.c.inc | 2 --
6
tcg/i386/tcg-target.c.inc | 2 --
7
tcg/loongarch64/tcg-target.c.inc | 2 --
8
tcg/mips/tcg-target.c.inc | 2 --
9
tcg/ppc/tcg-target.c.inc | 2 --
10
tcg/riscv/tcg-target.c.inc | 2 --
11
tcg/s390x/tcg-target.c.inc | 4 ----
12
tcg/sparc64/tcg-target.c.inc | 2 --
13
tcg/tci/tcg-target.c.inc | 2 --
14
10 files changed, 12 insertions(+), 23 deletions(-)
15
1
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.c
19
+++ b/tcg/tcg.c
20
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_exts_i32_i64 = {
21
.base.static_constraint = C_O1_I1(r, r),
22
.out_rr = tgen_exts_i32_i64,
23
};
24
+
25
+static void tgen_extu_i32_i64(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
26
+{
27
+ tcg_out_extu_i32_i64(s, a0, a1);
28
+}
29
+
30
+static const TCGOutOpUnary outop_extu_i32_i64 = {
31
+ .base.static_constraint = C_O1_I1(r, r),
32
+ .out_rr = tgen_extu_i32_i64,
33
+};
34
#endif
35
36
/*
37
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
38
#else
39
OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64),
40
OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64),
41
+ OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
42
#endif
43
};
44
45
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
46
/* emit instruction */
47
TCGType type = TCGOP_TYPE(op);
48
switch (op->opc) {
49
- case INDEX_op_extu_i32_i64:
50
- tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
51
- break;
52
case INDEX_op_extrl_i64_i32:
53
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
54
break;
55
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
56
57
case INDEX_op_bswap64:
58
case INDEX_op_ext_i32_i64:
59
+ case INDEX_op_extu_i32_i64:
60
assert(TCG_TARGET_REG_BITS == 64);
61
/* fall through */
62
case INDEX_op_ctpop:
63
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/aarch64/tcg-target.c.inc
66
+++ b/tcg/aarch64/tcg-target.c.inc
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
68
case INDEX_op_call: /* Always emitted via tcg_out_call. */
69
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
70
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
71
- case INDEX_op_extu_i32_i64:
72
case INDEX_op_extrl_i64_i32:
73
default:
74
g_assert_not_reached();
75
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
76
case INDEX_op_ld32u_i64:
77
case INDEX_op_ld32s_i64:
78
case INDEX_op_ld_i64:
79
- case INDEX_op_extu_i32_i64:
80
return C_O1_I1(r, r);
81
82
case INDEX_op_st8_i32:
83
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/i386/tcg-target.c.inc
86
+++ b/tcg/i386/tcg-target.c.inc
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
88
case INDEX_op_call: /* Always emitted via tcg_out_call. */
89
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
90
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
91
- case INDEX_op_extu_i32_i64:
92
case INDEX_op_extrl_i64_i32:
93
default:
94
g_assert_not_reached();
95
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
96
case INDEX_op_extrh_i64_i32:
97
return C_O1_I1(r, 0);
98
99
- case INDEX_op_extu_i32_i64:
100
case INDEX_op_extrl_i64_i32:
101
return C_O1_I1(r, r);
102
103
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/loongarch64/tcg-target.c.inc
106
+++ b/tcg/loongarch64/tcg-target.c.inc
107
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
108
case INDEX_op_call: /* Always emitted via tcg_out_call. */
109
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
110
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
111
- case INDEX_op_extu_i32_i64:
112
case INDEX_op_extrl_i64_i32:
113
default:
114
g_assert_not_reached();
115
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
116
case INDEX_op_qemu_st_i128:
117
return C_O0_I3(r, r, r);
118
119
- case INDEX_op_extu_i32_i64:
120
case INDEX_op_extrl_i64_i32:
121
case INDEX_op_extrh_i64_i32:
122
case INDEX_op_ld8s_i32:
123
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/mips/tcg-target.c.inc
126
+++ b/tcg/mips/tcg-target.c.inc
127
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
128
case INDEX_op_call: /* Always emitted via tcg_out_call. */
129
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
130
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
131
- case INDEX_op_extu_i32_i64:
132
case INDEX_op_extrl_i64_i32:
133
default:
134
g_assert_not_reached();
135
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
136
case INDEX_op_ld32s_i64:
137
case INDEX_op_ld32u_i64:
138
case INDEX_op_ld_i64:
139
- case INDEX_op_extu_i32_i64:
140
case INDEX_op_extrl_i64_i32:
141
case INDEX_op_extrh_i64_i32:
142
return C_O1_I1(r, r);
143
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
144
index XXXXXXX..XXXXXXX 100644
145
--- a/tcg/ppc/tcg-target.c.inc
146
+++ b/tcg/ppc/tcg-target.c.inc
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
148
case INDEX_op_call: /* Always emitted via tcg_out_call. */
149
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
150
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
151
- case INDEX_op_extu_i32_i64:
152
case INDEX_op_extrl_i64_i32:
153
default:
154
g_assert_not_reached();
155
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
156
case INDEX_op_ld32u_i64:
157
case INDEX_op_ld32s_i64:
158
case INDEX_op_ld_i64:
159
- case INDEX_op_extu_i32_i64:
160
return C_O1_I1(r, r);
161
162
case INDEX_op_st8_i32:
163
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/riscv/tcg-target.c.inc
166
+++ b/tcg/riscv/tcg-target.c.inc
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
168
case INDEX_op_call: /* Always emitted via tcg_out_call. */
169
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
170
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
171
- case INDEX_op_extu_i32_i64:
172
case INDEX_op_extrl_i64_i32:
173
default:
174
g_assert_not_reached();
175
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
176
case INDEX_op_ld32s_i64:
177
case INDEX_op_ld32u_i64:
178
case INDEX_op_ld_i64:
179
- case INDEX_op_extu_i32_i64:
180
case INDEX_op_extrl_i64_i32:
181
case INDEX_op_extrh_i64_i32:
182
return C_O1_I1(r, r);
183
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
184
index XXXXXXX..XXXXXXX 100644
185
--- a/tcg/s390x/tcg-target.c.inc
186
+++ b/tcg/s390x/tcg-target.c.inc
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
188
case INDEX_op_call: /* Always emitted via tcg_out_call. */
189
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
190
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
191
- case INDEX_op_extu_i32_i64:
192
case INDEX_op_extrl_i64_i32:
193
default:
194
g_assert_not_reached();
195
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
196
case INDEX_op_st_i64:
197
return C_O0_I2(r, r);
198
199
- case INDEX_op_extu_i32_i64:
200
- return C_O1_I1(r, r);
201
-
202
case INDEX_op_qemu_ld_i32:
203
case INDEX_op_qemu_ld_i64:
204
return C_O1_I1(r, r);
205
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
206
index XXXXXXX..XXXXXXX 100644
207
--- a/tcg/sparc64/tcg-target.c.inc
208
+++ b/tcg/sparc64/tcg-target.c.inc
209
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
210
case INDEX_op_call: /* Always emitted via tcg_out_call. */
211
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
212
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
213
- case INDEX_op_extu_i32_i64:
214
default:
215
g_assert_not_reached();
216
}
217
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
218
case INDEX_op_ld32u_i64:
219
case INDEX_op_ld32s_i64:
220
case INDEX_op_ld_i64:
221
- case INDEX_op_extu_i32_i64:
222
case INDEX_op_qemu_ld_i32:
223
case INDEX_op_qemu_ld_i64:
224
return C_O1_I1(r, r);
225
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
226
index XXXXXXX..XXXXXXX 100644
227
--- a/tcg/tci/tcg-target.c.inc
228
+++ b/tcg/tci/tcg-target.c.inc
229
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
230
case INDEX_op_ld32u_i64:
231
case INDEX_op_ld32s_i64:
232
case INDEX_op_ld_i64:
233
- case INDEX_op_extu_i32_i64:
234
return C_O1_I1(r, r);
235
236
case INDEX_op_st8_i32:
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
238
case INDEX_op_call: /* Always emitted via tcg_out_call. */
239
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
240
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
241
- case INDEX_op_extu_i32_i64:
242
case INDEX_op_extrl_i64_i32:
243
default:
244
g_assert_not_reached();
245
--
246
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Drop the cast from TCGv_i64 to TCGv_i32 in tcg_gen_extrl_i64_i32
2
an emit extrl_i64_i32 unconditionally. Move that special case
3
to tcg_gen_code when we find out if the output is live or dead.
4
In this way even hosts that canonicalize truncations can make
5
use of a store directly from the 64-bit host register.
6
1
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/tcg-op.c | 4 +---
11
tcg/tcg.c | 35 +++++++++++++++++++++++++++-----
12
tcg/aarch64/tcg-target.c.inc | 1 -
13
tcg/i386/tcg-target.c.inc | 4 ----
14
tcg/loongarch64/tcg-target.c.inc | 2 --
15
tcg/mips/tcg-target.c.inc | 2 --
16
tcg/ppc/tcg-target.c.inc | 1 -
17
tcg/riscv/tcg-target.c.inc | 2 --
18
tcg/s390x/tcg-target.c.inc | 1 -
19
tcg/tci/tcg-target.c.inc | 1 -
20
10 files changed, 31 insertions(+), 22 deletions(-)
21
22
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/tcg-op.c
25
+++ b/tcg/tcg-op.c
26
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
27
{
28
if (TCG_TARGET_REG_BITS == 32) {
29
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
30
- } else if (TCG_TARGET_HAS_extr_i64_i32) {
31
+ } else {
32
tcg_gen_op2(INDEX_op_extrl_i64_i32, TCG_TYPE_I32,
33
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
34
- } else {
35
- tcg_gen_mov_i32(ret, (TCGv_i32)arg);
36
}
37
}
38
39
diff --git a/tcg/tcg.c b/tcg/tcg.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/tcg.c
42
+++ b/tcg/tcg.c
43
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_extu_i32_i64 = {
44
.base.static_constraint = C_O1_I1(r, r),
45
.out_rr = tgen_extu_i32_i64,
46
};
47
+
48
+static void tgen_extrl_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
49
+{
50
+ tcg_out_extrl_i64_i32(s, a0, a1);
51
+}
52
+
53
+static const TCGOutOpUnary outop_extrl_i64_i32 = {
54
+ .base.static_constraint = C_O1_I1(r, r),
55
+ .out_rr = TCG_TARGET_HAS_extr_i64_i32 ? tgen_extrl_i64_i32 : NULL,
56
+};
57
#endif
58
59
/*
60
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
61
OUTOP(INDEX_op_bswap64, TCGOutOpUnary, outop_bswap64),
62
OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64),
63
OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
64
+ OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
65
#endif
66
};
67
68
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
69
case INDEX_op_st_i64:
70
case INDEX_op_ext_i32_i64:
71
case INDEX_op_extu_i32_i64:
72
+ case INDEX_op_extrl_i64_i32:
73
case INDEX_op_deposit_i64:
74
return TCG_TARGET_REG_BITS == 64;
75
76
case INDEX_op_extract2_i64:
77
return TCG_TARGET_HAS_extract2_i64;
78
- case INDEX_op_extrl_i64_i32:
79
case INDEX_op_extrh_i64_i32:
80
return TCG_TARGET_HAS_extr_i64_i32;
81
case INDEX_op_add2_i64:
82
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
83
/* emit instruction */
84
TCGType type = TCGOP_TYPE(op);
85
switch (op->opc) {
86
- case INDEX_op_extrl_i64_i32:
87
- tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
88
- break;
89
-
90
case INDEX_op_add:
91
case INDEX_op_and:
92
case INDEX_op_andc:
93
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
94
case INDEX_op_bswap64:
95
case INDEX_op_ext_i32_i64:
96
case INDEX_op_extu_i32_i64:
97
+ case INDEX_op_extrl_i64_i32:
98
assert(TCG_TARGET_REG_BITS == 64);
99
/* fall through */
100
case INDEX_op_ctpop:
101
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
102
TCGOpcode opc = op->opc;
103
104
switch (opc) {
105
+ case INDEX_op_extrl_i64_i32:
106
+ assert(TCG_TARGET_REG_BITS == 64);
107
+ /*
108
+ * If TCG_TYPE_I32 is represented in some canonical form,
109
+ * e.g. zero or sign-extended, then emit as a unary op.
110
+ * Otherwise we can treat this as a plain move.
111
+ * If the output dies, treat this as a plain move, because
112
+ * this will be implemented with a store.
113
+ */
114
+ if (TCG_TARGET_HAS_extr_i64_i32) {
115
+ TCGLifeData arg_life = op->life;
116
+ if (!IS_DEAD_ARG(0)) {
117
+ goto do_default;
118
+ }
119
+ }
120
+ /* fall through */
121
case INDEX_op_mov:
122
case INDEX_op_mov_vec:
123
tcg_reg_alloc_mov(s, op);
124
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
125
}
126
/* fall through */
127
default:
128
+ do_default:
129
/* Sanity check that we've not introduced any unhandled opcodes. */
130
tcg_debug_assert(tcg_op_supported(opc, TCGOP_TYPE(op),
131
TCGOP_FLAGS(op)));
132
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tcg/aarch64/tcg-target.c.inc
135
+++ b/tcg/aarch64/tcg-target.c.inc
136
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
137
case INDEX_op_call: /* Always emitted via tcg_out_call. */
138
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
139
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
140
- case INDEX_op_extrl_i64_i32:
141
default:
142
g_assert_not_reached();
143
}
144
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/i386/tcg-target.c.inc
147
+++ b/tcg/i386/tcg-target.c.inc
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
149
case INDEX_op_call: /* Always emitted via tcg_out_call. */
150
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
151
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
152
- case INDEX_op_extrl_i64_i32:
153
default:
154
g_assert_not_reached();
155
}
156
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
157
case INDEX_op_extrh_i64_i32:
158
return C_O1_I1(r, 0);
159
160
- case INDEX_op_extrl_i64_i32:
161
- return C_O1_I1(r, r);
162
-
163
case INDEX_op_extract2_i32:
164
case INDEX_op_extract2_i64:
165
return C_O1_I2(r, 0, r);
166
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
167
index XXXXXXX..XXXXXXX 100644
168
--- a/tcg/loongarch64/tcg-target.c.inc
169
+++ b/tcg/loongarch64/tcg-target.c.inc
170
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
171
case INDEX_op_call: /* Always emitted via tcg_out_call. */
172
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
173
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
174
- case INDEX_op_extrl_i64_i32:
175
default:
176
g_assert_not_reached();
177
}
178
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
179
case INDEX_op_qemu_st_i128:
180
return C_O0_I3(r, r, r);
181
182
- case INDEX_op_extrl_i64_i32:
183
case INDEX_op_extrh_i64_i32:
184
case INDEX_op_ld8s_i32:
185
case INDEX_op_ld8s_i64:
186
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/mips/tcg-target.c.inc
189
+++ b/tcg/mips/tcg-target.c.inc
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
191
case INDEX_op_call: /* Always emitted via tcg_out_call. */
192
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
193
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
194
- case INDEX_op_extrl_i64_i32:
195
default:
196
g_assert_not_reached();
197
}
198
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
199
case INDEX_op_ld32s_i64:
200
case INDEX_op_ld32u_i64:
201
case INDEX_op_ld_i64:
202
- case INDEX_op_extrl_i64_i32:
203
case INDEX_op_extrh_i64_i32:
204
return C_O1_I1(r, r);
205
206
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tcg/ppc/tcg-target.c.inc
209
+++ b/tcg/ppc/tcg-target.c.inc
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
211
case INDEX_op_call: /* Always emitted via tcg_out_call. */
212
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
213
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
214
- case INDEX_op_extrl_i64_i32:
215
default:
216
g_assert_not_reached();
217
}
218
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/riscv/tcg-target.c.inc
221
+++ b/tcg/riscv/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
223
case INDEX_op_call: /* Always emitted via tcg_out_call. */
224
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
225
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
226
- case INDEX_op_extrl_i64_i32:
227
default:
228
g_assert_not_reached();
229
}
230
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
231
case INDEX_op_ld32s_i64:
232
case INDEX_op_ld32u_i64:
233
case INDEX_op_ld_i64:
234
- case INDEX_op_extrl_i64_i32:
235
case INDEX_op_extrh_i64_i32:
236
return C_O1_I1(r, r);
237
238
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
239
index XXXXXXX..XXXXXXX 100644
240
--- a/tcg/s390x/tcg-target.c.inc
241
+++ b/tcg/s390x/tcg-target.c.inc
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
243
case INDEX_op_call: /* Always emitted via tcg_out_call. */
244
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
245
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
246
- case INDEX_op_extrl_i64_i32:
247
default:
248
g_assert_not_reached();
249
}
250
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
251
index XXXXXXX..XXXXXXX 100644
252
--- a/tcg/tci/tcg-target.c.inc
253
+++ b/tcg/tci/tcg-target.c.inc
254
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
255
case INDEX_op_call: /* Always emitted via tcg_out_call. */
256
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
257
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
258
- case INDEX_op_extrl_i64_i32:
259
default:
260
g_assert_not_reached();
261
}
262
--
263
2.43.0
diff view generated by jsdifflib
Deleted patch
1
At the same time, make extrh_i64_i32 mandatory. This closes a hole
2
in which move arguments could be cast between TCGv_i32 and TCGv_i64.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg-op.c | 7 +------
8
tcg/tcg.c | 5 +++--
9
tcg/aarch64/tcg-target.c.inc | 10 ++++++++++
10
tcg/i386/tcg-target.c.inc | 20 +++++++++++++-------
11
tcg/loongarch64/tcg-target.c.inc | 15 ++++++++++-----
12
tcg/mips/tcg-target.c.inc | 17 ++++++++++++-----
13
tcg/ppc/tcg-target.c.inc | 12 ++++++++++++
14
tcg/riscv/tcg-target.c.inc | 15 ++++++++++-----
15
tcg/s390x/tcg-target.c.inc | 10 ++++++++++
16
tcg/sparc64/tcg-target.c.inc | 10 ++++++++++
17
tcg/tci/tcg-target.c.inc | 12 ++++++++++++
18
11 files changed, 103 insertions(+), 30 deletions(-)
19
20
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg-op.c
23
+++ b/tcg/tcg-op.c
24
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
25
{
26
if (TCG_TARGET_REG_BITS == 32) {
27
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
28
- } else if (TCG_TARGET_HAS_extr_i64_i32) {
29
+ } else {
30
tcg_gen_op2(INDEX_op_extrh_i64_i32, TCG_TYPE_I32,
31
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
32
- } else {
33
- TCGv_i64 t = tcg_temp_ebb_new_i64();
34
- tcg_gen_shri_i64(t, arg, 32);
35
- tcg_gen_mov_i32(ret, (TCGv_i32)t);
36
- tcg_temp_free_i64(t);
37
}
38
}
39
40
diff --git a/tcg/tcg.c b/tcg/tcg.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/tcg.c
43
+++ b/tcg/tcg.c
44
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
45
OUTOP(INDEX_op_ext_i32_i64, TCGOutOpUnary, outop_exts_i32_i64),
46
OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
47
OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
48
+ OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
49
#endif
50
};
51
52
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
53
case INDEX_op_ext_i32_i64:
54
case INDEX_op_extu_i32_i64:
55
case INDEX_op_extrl_i64_i32:
56
+ case INDEX_op_extrh_i64_i32:
57
case INDEX_op_deposit_i64:
58
return TCG_TARGET_REG_BITS == 64;
59
60
case INDEX_op_extract2_i64:
61
return TCG_TARGET_HAS_extract2_i64;
62
- case INDEX_op_extrh_i64_i32:
63
- return TCG_TARGET_HAS_extr_i64_i32;
64
case INDEX_op_add2_i64:
65
return TCG_TARGET_HAS_add2_i64;
66
case INDEX_op_sub2_i64:
67
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
68
case INDEX_op_ext_i32_i64:
69
case INDEX_op_extu_i32_i64:
70
case INDEX_op_extrl_i64_i32:
71
+ case INDEX_op_extrh_i64_i32:
72
assert(TCG_TARGET_REG_BITS == 64);
73
/* fall through */
74
case INDEX_op_ctpop:
75
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/aarch64/tcg-target.c.inc
78
+++ b/tcg/aarch64/tcg-target.c.inc
79
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
80
.out_rrr = tgen_eqv,
81
};
82
83
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
84
+{
85
+ tcg_out_ubfm(s, TCG_TYPE_I64, a0, a1, 32, 63);
86
+}
87
+
88
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
89
+ .base.static_constraint = C_O1_I1(r, r),
90
+ .out_rr = tgen_extrh_i64_i32,
91
+};
92
+
93
static void tgen_mul(TCGContext *s, TCGType type,
94
TCGReg a0, TCGReg a1, TCGReg a2)
95
{
96
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/i386/tcg-target.c.inc
99
+++ b/tcg/i386/tcg-target.c.inc
100
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
101
.base.static_constraint = C_NotImplemented,
102
};
103
104
+#if TCG_TARGET_REG_BITS == 64
105
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
106
+{
107
+ tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
108
+}
109
+
110
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
111
+ .base.static_constraint = C_O1_I1(r, 0),
112
+ .out_rr = tgen_extrh_i64_i32,
113
+};
114
+#endif /* TCG_TARGET_REG_BITS == 64 */
115
+
116
static void tgen_mul(TCGContext *s, TCGType type,
117
TCGReg a0, TCGReg a1, TCGReg a2)
118
{
119
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
120
.out_rr = tgen_sextract,
121
};
122
123
+
124
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
125
const TCGArg args[TCG_MAX_OP_ARGS],
126
const int const_args[TCG_MAX_OP_ARGS])
127
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
128
tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
129
}
130
break;
131
-
132
- case INDEX_op_extrh_i64_i32:
133
- tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
134
- break;
135
#endif
136
137
OP_32_64(deposit):
138
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
139
case INDEX_op_st_i64:
140
return C_O0_I2(re, r);
141
142
- case INDEX_op_extrh_i64_i32:
143
- return C_O1_I1(r, 0);
144
-
145
case INDEX_op_extract2_i32:
146
case INDEX_op_extract2_i64:
147
return C_O1_I2(r, 0, r);
148
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
149
index XXXXXXX..XXXXXXX 100644
150
--- a/tcg/loongarch64/tcg-target.c.inc
151
+++ b/tcg/loongarch64/tcg-target.c.inc
152
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
153
.base.static_constraint = C_NotImplemented,
154
};
155
156
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
157
+{
158
+ tcg_out_opc_srai_d(s, a0, a1, 32);
159
+}
160
+
161
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
162
+ .base.static_constraint = C_O1_I1(r, r),
163
+ .out_rr = tgen_extrh_i64_i32,
164
+};
165
+
166
static void tgen_mul(TCGContext *s, TCGType type,
167
TCGReg a0, TCGReg a1, TCGReg a2)
168
{
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
170
tcg_out_opc_b(s, 0);
171
break;
172
173
- case INDEX_op_extrh_i64_i32:
174
- tcg_out_opc_srai_d(s, a0, a1, 32);
175
- break;
176
-
177
case INDEX_op_deposit_i32:
178
tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
179
break;
180
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
181
case INDEX_op_qemu_st_i128:
182
return C_O0_I3(r, r, r);
183
184
- case INDEX_op_extrh_i64_i32:
185
case INDEX_op_ld8s_i32:
186
case INDEX_op_ld8s_i64:
187
case INDEX_op_ld8u_i32:
188
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
189
index XXXXXXX..XXXXXXX 100644
190
--- a/tcg/mips/tcg-target.c.inc
191
+++ b/tcg/mips/tcg-target.c.inc
192
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
193
.base.static_constraint = C_NotImplemented,
194
};
195
196
+#if TCG_TARGET_REG_BITS == 64
197
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
198
+{
199
+ tcg_out_dsra(s, a0, a1, 32);
200
+}
201
+
202
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
203
+ .base.static_constraint = C_O1_I1(r, r),
204
+ .out_rr = tgen_extrh_i64_i32,
205
+};
206
+#endif
207
+
208
static void tgen_mul(TCGContext *s, TCGType type,
209
TCGReg a0, TCGReg a1, TCGReg a2)
210
{
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
212
tcg_out_ldst(s, i1, a0, a1, a2);
213
break;
214
215
- case INDEX_op_extrh_i64_i32:
216
- tcg_out_dsra(s, a0, a1, 32);
217
- break;
218
-
219
case INDEX_op_deposit_i32:
220
tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
221
break;
222
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
223
case INDEX_op_ld32s_i64:
224
case INDEX_op_ld32u_i64:
225
case INDEX_op_ld_i64:
226
- case INDEX_op_extrh_i64_i32:
227
return C_O1_I1(r, r);
228
229
case INDEX_op_st8_i32:
230
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
231
index XXXXXXX..XXXXXXX 100644
232
--- a/tcg/ppc/tcg-target.c.inc
233
+++ b/tcg/ppc/tcg-target.c.inc
234
@@ -XXX,XX +XXX,XX @@ static void tgen_eqv(TCGContext *s, TCGType type,
235
tcg_out32(s, EQV | SAB(a1, a0, a2));
236
}
237
238
+#if TCG_TARGET_REG_BITS == 64
239
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
240
+{
241
+ tcg_out_shri64(s, a0, a1, 32);
242
+}
243
+
244
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
245
+ .base.static_constraint = C_O1_I1(r, r),
246
+ .out_rr = tgen_extrh_i64_i32,
247
+};
248
+#endif
249
+
250
static void tgen_divs(TCGContext *s, TCGType type,
251
TCGReg a0, TCGReg a1, TCGReg a2)
252
{
253
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
254
index XXXXXXX..XXXXXXX 100644
255
--- a/tcg/riscv/tcg-target.c.inc
256
+++ b/tcg/riscv/tcg-target.c.inc
257
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
258
.out_rrr = tgen_eqv,
259
};
260
261
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
262
+{
263
+ tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
264
+}
265
+
266
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
267
+ .base.static_constraint = C_O1_I1(r, r),
268
+ .out_rr = tgen_extrh_i64_i32,
269
+};
270
+
271
static void tgen_mul(TCGContext *s, TCGType type,
272
TCGReg a0, TCGReg a1, TCGReg a2)
273
{
274
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
275
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
276
break;
277
278
- case INDEX_op_extrh_i64_i32:
279
- tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
280
- break;
281
-
282
case INDEX_op_mb:
283
tcg_out_mb(s, a0);
284
break;
285
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
286
case INDEX_op_ld32s_i64:
287
case INDEX_op_ld32u_i64:
288
case INDEX_op_ld_i64:
289
- case INDEX_op_extrh_i64_i32:
290
return C_O1_I1(r, r);
291
292
case INDEX_op_st8_i32:
293
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
294
index XXXXXXX..XXXXXXX 100644
295
--- a/tcg/s390x/tcg-target.c.inc
296
+++ b/tcg/s390x/tcg-target.c.inc
297
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
298
.out_rrr = tgen_eqv,
299
};
300
301
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
302
+{
303
+ tcg_out_sh64(s, RSY_SRLG, a0, a1, TCG_REG_NONE, 32);
304
+}
305
+
306
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
307
+ .base.static_constraint = C_O1_I1(r, r),
308
+ .out_rr = tgen_extrh_i64_i32,
309
+};
310
+
311
static void tgen_mul(TCGContext *s, TCGType type,
312
TCGReg a0, TCGReg a1, TCGReg a2)
313
{
314
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
315
index XXXXXXX..XXXXXXX 100644
316
--- a/tcg/sparc64/tcg-target.c.inc
317
+++ b/tcg/sparc64/tcg-target.c.inc
318
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
319
.base.static_constraint = C_NotImplemented,
320
};
321
322
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
323
+{
324
+ tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
325
+}
326
+
327
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
328
+ .base.static_constraint = C_O1_I1(r, r),
329
+ .out_rr = tgen_extrh_i64_i32,
330
+};
331
+
332
static void tgen_mul(TCGContext *s, TCGType type,
333
TCGReg a0, TCGReg a1, TCGReg a2)
334
{
335
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
336
index XXXXXXX..XXXXXXX 100644
337
--- a/tcg/tci/tcg-target.c.inc
338
+++ b/tcg/tci/tcg-target.c.inc
339
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_eqv = {
340
.out_rrr = tgen_eqv,
341
};
342
343
+#if TCG_TARGET_REG_BITS == 64
344
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
345
+{
346
+ tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32);
347
+}
348
+
349
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
350
+ .base.static_constraint = C_O1_I1(r, r),
351
+ .out_rr = tgen_extrh_i64_i32,
352
+};
353
+#endif
354
+
355
static void tgen_mul(TCGContext *s, TCGType type,
356
TCGReg a0, TCGReg a1, TCGReg a2)
357
{
358
--
359
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 33 ++++++++++++++
5
tcg/tci.c | 8 ++--
6
tcg/aarch64/tcg-target.c.inc | 30 +++++--------
7
tcg/arm/tcg-target.c.inc | 29 ++++++------
8
tcg/i386/tcg-target.c.inc | 76 ++++++++++++++++----------------
9
tcg/loongarch64/tcg-target.c.inc | 27 +++++++-----
10
tcg/mips/tcg-target.c.inc | 27 +++++++-----
11
tcg/ppc/tcg-target.c.inc | 44 +++++++++---------
12
tcg/riscv/tcg-target.c.inc | 4 ++
13
tcg/s390x/tcg-target.c.inc | 60 +++++++++++++------------
14
tcg/sparc64/tcg-target.c.inc | 4 ++
15
tcg/tci/tcg-target.c.inc | 19 ++++----
16
12 files changed, 206 insertions(+), 155 deletions(-)
17
1
18
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/tcg.c
21
+++ b/tcg/tcg.c
22
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpBswap {
23
TCGReg a0, TCGReg a1, unsigned flags);
24
} TCGOutOpBswap;
25
26
+typedef struct TCGOutOpDeposit {
27
+ TCGOutOp base;
28
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
29
+ TCGReg a2, unsigned ofs, unsigned len);
30
+ void (*out_rri)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
31
+ tcg_target_long a2, unsigned ofs, unsigned len);
32
+ void (*out_rzr)(TCGContext *s, TCGType type, TCGReg a0,
33
+ TCGReg a2, unsigned ofs, unsigned len);
34
+} TCGOutOpDeposit;
35
+
36
typedef struct TCGOutOpDivRem {
37
TCGOutOp base;
38
void (*out_rr01r)(TCGContext *s, TCGType type,
39
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
40
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
41
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
42
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
43
+ OUTOP(INDEX_op_deposit_i32, TCGOutOpDeposit, outop_deposit),
44
+ OUTOP(INDEX_op_deposit_i64, TCGOutOpDeposit, outop_deposit),
45
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
46
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
47
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
48
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
49
}
50
break;
51
52
+ case INDEX_op_deposit_i32:
53
+ case INDEX_op_deposit_i64:
54
+ {
55
+ const TCGOutOpDeposit *out = &outop_deposit;
56
+
57
+ if (const_args[2]) {
58
+ tcg_debug_assert(!const_args[1]);
59
+ out->out_rri(s, type, new_args[0], new_args[1],
60
+ new_args[2], new_args[3], new_args[4]);
61
+ } else if (const_args[1]) {
62
+ tcg_debug_assert(new_args[1] == 0);
63
+ tcg_debug_assert(!const_args[2]);
64
+ out->out_rzr(s, type, new_args[0], new_args[2],
65
+ new_args[3], new_args[4]);
66
+ } else {
67
+ out->out_rrr(s, type, new_args[0], new_args[1],
68
+ new_args[2], new_args[3], new_args[4]);
69
+ }
70
+ }
71
+ break;
72
+
73
case INDEX_op_divs2:
74
case INDEX_op_divu2:
75
{
76
diff --git a/tcg/tci.c b/tcg/tci.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tci.c
79
+++ b/tcg/tci.c
80
@@ -XXX,XX +XXX,XX @@
81
82
83
#define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS)
84
+#define deposit_tr glue(deposit, TCG_TARGET_REG_BITS)
85
#define extract_tr glue(extract, TCG_TARGET_REG_BITS)
86
#define sextract_tr glue(sextract, TCG_TARGET_REG_BITS)
87
88
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
89
regs[r0] = ror32(regs[r1], regs[r2] & 31);
90
break;
91
case INDEX_op_deposit_i32:
92
+ case INDEX_op_deposit_i64:
93
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
94
- regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
95
+ regs[r0] = deposit_tr(regs[r1], pos, len, regs[r2]);
96
break;
97
case INDEX_op_extract:
98
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
99
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
100
tci_args_rrr(insn, &r0, &r1, &r2);
101
regs[r0] = ror64(regs[r1], regs[r2] & 63);
102
break;
103
- case INDEX_op_deposit_i64:
104
- tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
105
- regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
106
- break;
107
case INDEX_op_ext_i32_i64:
108
tci_args_rr(insn, &r0, &r1);
109
regs[r0] = (int32_t)regs[r1];
110
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
111
index XXXXXXX..XXXXXXX 100644
112
--- a/tcg/aarch64/tcg-target.c.inc
113
+++ b/tcg/aarch64/tcg-target.c.inc
114
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
115
tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
116
}
117
118
-static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
119
- TCGReg rn, unsigned lsb, unsigned width)
120
-{
121
- unsigned size = ext ? 64 : 32;
122
- unsigned a = (size - lsb) & (size - 1);
123
- unsigned b = width - 1;
124
- tcg_out_bfm(s, ext, rd, rn, a, b);
125
-}
126
-
127
static void tgen_cmp(TCGContext *s, TCGType ext, TCGCond cond,
128
TCGReg a, TCGReg b)
129
{
130
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
131
.out = tgen_movcond,
132
};
133
134
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
135
+ TCGReg a2, unsigned ofs, unsigned len)
136
+{
137
+ unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
138
+ tcg_out_bfm(s, type, a0, a2, -ofs & mask, len - 1);
139
+}
140
+
141
+static const TCGOutOpDeposit outop_deposit = {
142
+ .base.static_constraint = C_O1_I2(r, 0, rz),
143
+ .out_rrr = tgen_deposit,
144
+};
145
+
146
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
147
unsigned ofs, unsigned len)
148
{
149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
150
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
151
break;
152
153
- case INDEX_op_deposit_i64:
154
- case INDEX_op_deposit_i32:
155
- tcg_out_dep(s, ext, a0, a2, args[3], args[4]);
156
- break;
157
-
158
case INDEX_op_extract2_i64:
159
case INDEX_op_extract2_i32:
160
tcg_out_extr(s, ext, a0, a2, a1, args[3]);
161
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
162
case INDEX_op_qemu_st_i128:
163
return C_O0_I3(rz, rz, r);
164
165
- case INDEX_op_deposit_i32:
166
- case INDEX_op_deposit_i64:
167
- return C_O1_I2(r, 0, rz);
168
-
169
case INDEX_op_extract2_i32:
170
case INDEX_op_extract2_i64:
171
return C_O1_I2(r, rz, rz);
172
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
173
index XXXXXXX..XXXXXXX 100644
174
--- a/tcg/arm/tcg-target.c.inc
175
+++ b/tcg/arm/tcg-target.c.inc
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
177
g_assert_not_reached();
178
}
179
180
-static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
181
- TCGArg a1, int ofs, int len, bool const_a1)
182
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
183
+ TCGReg a2, unsigned ofs, unsigned len)
184
{
185
- if (const_a1) {
186
- /* bfi becomes bfc with rn == 15. */
187
- a1 = 15;
188
- }
189
/* bfi/bfc */
190
- tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
191
+ tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a1
192
| (ofs << 7) | ((ofs + len - 1) << 16));
193
}
194
195
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
196
+ tcg_target_long a2, unsigned ofs, unsigned len)
197
+{
198
+ /* bfi becomes bfc with rn == 15. */
199
+ tgen_deposit(s, type, a0, a1, 15, ofs, len);
200
+}
201
+
202
+static const TCGOutOpDeposit outop_deposit = {
203
+ .base.static_constraint = C_O1_I2(r, 0, rZ),
204
+ .out_rrr = tgen_deposit,
205
+ .out_rri = tgen_depositi,
206
+};
207
+
208
static void tgen_extract(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn,
209
unsigned ofs, unsigned len)
210
{
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
212
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
213
break;
214
215
- case INDEX_op_deposit_i32:
216
- tcg_out_deposit(s, COND_AL, args[0], args[2],
217
- args[3], args[4], const_args[2]);
218
- break;
219
case INDEX_op_extract2_i32:
220
/* ??? These optimization vs zero should be generic. */
221
/* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
222
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
223
case INDEX_op_st_i32:
224
return C_O0_I2(r, r);
225
226
- case INDEX_op_deposit_i32:
227
- return C_O1_I2(r, 0, rZ);
228
case INDEX_op_extract2_i32:
229
return C_O1_I2(r, rZ, rZ);
230
case INDEX_op_add2_i32:
231
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
232
index XXXXXXX..XXXXXXX 100644
233
--- a/tcg/i386/tcg-target.c.inc
234
+++ b/tcg/i386/tcg-target.c.inc
235
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
236
.out_rr = tgen_not,
237
};
238
239
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
240
+ TCGReg a2, unsigned ofs, unsigned len)
241
+{
242
+ if (ofs == 0 && len == 8) {
243
+ tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
244
+ } else if (ofs == 0 && len == 16) {
245
+ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
246
+ } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) {
247
+ tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
248
+ } else {
249
+ g_assert_not_reached();
250
+ }
251
+}
252
+
253
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
254
+ tcg_target_long a2, unsigned ofs, unsigned len)
255
+{
256
+ if (ofs == 0 && len == 8) {
257
+ tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0), 0, a0, 0);
258
+ tcg_out8(s, a2);
259
+ } else if (ofs == 0 && len == 16) {
260
+ tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0), 0, a0, 0);
261
+ tcg_out16(s, a2);
262
+ } else if (TCG_TARGET_REG_BITS == 32 && ofs == 8 && len == 8) {
263
+ tcg_out8(s, OPC_MOVB_Ib + a0 + 4);
264
+ tcg_out8(s, a2);
265
+ } else {
266
+ g_assert_not_reached();
267
+ }
268
+}
269
+
270
+static const TCGOutOpDeposit outop_deposit = {
271
+ .base.static_constraint = C_O1_I2(q, 0, qi),
272
+ .out_rrr = tgen_deposit,
273
+ .out_rri = tgen_depositi,
274
+};
275
+
276
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
277
unsigned ofs, unsigned len)
278
{
279
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
280
const int const_args[TCG_MAX_OP_ARGS])
281
{
282
TCGArg a0, a1, a2;
283
- int const_a2, rexw;
284
+ int rexw;
285
286
#if TCG_TARGET_REG_BITS == 64
287
# define OP_32_64(x) \
288
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
289
a0 = args[0];
290
a1 = args[1];
291
a2 = args[2];
292
- const_a2 = const_args[2];
293
rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
294
295
switch (opc) {
296
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
297
break;
298
#endif
299
300
- OP_32_64(deposit):
301
- if (args[3] == 0 && args[4] == 8) {
302
- /* load bits 0..7 */
303
- if (const_a2) {
304
- tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0),
305
- 0, a0, 0);
306
- tcg_out8(s, a2);
307
- } else {
308
- tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
309
- }
310
- } else if (TCG_TARGET_REG_BITS == 32 && args[3] == 8 && args[4] == 8) {
311
- /* load bits 8..15 */
312
- if (const_a2) {
313
- tcg_out8(s, OPC_MOVB_Ib + a0 + 4);
314
- tcg_out8(s, a2);
315
- } else {
316
- tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
317
- }
318
- } else if (args[3] == 0 && args[4] == 16) {
319
- /* load bits 0..15 */
320
- if (const_a2) {
321
- tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0),
322
- 0, a0, 0);
323
- tcg_out16(s, a2);
324
- } else {
325
- tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
326
- }
327
- } else {
328
- g_assert_not_reached();
329
- }
330
- break;
331
-
332
OP_32_64(extract2):
333
/* Note that SHRD outputs to the r/m operand. */
334
tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
335
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
336
case INDEX_op_extract2_i64:
337
return C_O1_I2(r, 0, r);
338
339
- case INDEX_op_deposit_i32:
340
- case INDEX_op_deposit_i64:
341
- return C_O1_I2(q, 0, qi);
342
-
343
case INDEX_op_add2_i32:
344
case INDEX_op_add2_i64:
345
case INDEX_op_sub2_i32:
346
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
347
index XXXXXXX..XXXXXXX 100644
348
--- a/tcg/loongarch64/tcg-target.c.inc
349
+++ b/tcg/loongarch64/tcg-target.c.inc
350
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
351
.out_rr = tgen_not,
352
};
353
354
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
355
+ TCGReg a2, unsigned ofs, unsigned len)
356
+{
357
+ if (type == TCG_TYPE_I32) {
358
+ tcg_out_opc_bstrins_w(s, a0, a2, ofs, ofs + len - 1);
359
+ } else {
360
+ tcg_out_opc_bstrins_d(s, a0, a2, ofs, ofs + len - 1);
361
+ }
362
+}
363
+
364
+static const TCGOutOpDeposit outop_deposit = {
365
+ .base.static_constraint = C_O1_I2(r, 0, rz),
366
+ .out_rrr = tgen_deposit,
367
+};
368
+
369
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
370
unsigned ofs, unsigned len)
371
{
372
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
373
tcg_out_opc_b(s, 0);
374
break;
375
376
- case INDEX_op_deposit_i32:
377
- tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
378
- break;
379
- case INDEX_op_deposit_i64:
380
- tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
381
- break;
382
-
383
case INDEX_op_ld8s_i32:
384
case INDEX_op_ld8s_i64:
385
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
386
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
387
case INDEX_op_qemu_ld_i64:
388
return C_O1_I1(r, r);
389
390
- case INDEX_op_deposit_i32:
391
- case INDEX_op_deposit_i64:
392
- /* Must deposit into the same register as input */
393
- return C_O1_I2(r, 0, rz);
394
-
395
case INDEX_op_ld_vec:
396
case INDEX_op_dupm_vec:
397
case INDEX_op_dup_vec:
398
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
399
index XXXXXXX..XXXXXXX 100644
400
--- a/tcg/mips/tcg-target.c.inc
401
+++ b/tcg/mips/tcg-target.c.inc
402
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
403
.out_rr = tgen_not,
404
};
405
406
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
407
+ TCGReg a2, unsigned ofs, unsigned len)
408
+{
409
+ if (type == TCG_TYPE_I32) {
410
+ tcg_out_opc_bf(s, OPC_INS, a0, a2, ofs + len - 1, ofs);
411
+ } else {
412
+ tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
413
+ ofs + len - 1, ofs);
414
+ }
415
+}
416
+
417
+static const TCGOutOpDeposit outop_deposit = {
418
+ .base.static_constraint = C_O1_I2(r, 0, rz),
419
+ .out_rrr = tgen_deposit,
420
+};
421
+
422
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
423
unsigned ofs, unsigned len)
424
{
425
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
426
tcg_out_ldst(s, i1, a0, a1, a2);
427
break;
428
429
- case INDEX_op_deposit_i32:
430
- tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
431
- break;
432
- case INDEX_op_deposit_i64:
433
- tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
434
- args[3] + args[4] - 1, args[3]);
435
- break;
436
-
437
case INDEX_op_qemu_ld_i32:
438
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
439
break;
440
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
441
case INDEX_op_st_i64:
442
return C_O0_I2(rz, r);
443
444
- case INDEX_op_deposit_i32:
445
- case INDEX_op_deposit_i64:
446
- return C_O1_I2(r, 0, rz);
447
case INDEX_op_add2_i32:
448
case INDEX_op_sub2_i32:
449
return C_O2_I4(r, r, rz, rz, rN, rN);
450
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
451
index XXXXXXX..XXXXXXX 100644
452
--- a/tcg/ppc/tcg-target.c.inc
453
+++ b/tcg/ppc/tcg-target.c.inc
454
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
455
.out_rr = tgen_not,
456
};
457
458
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
459
+ TCGReg a2, unsigned ofs, unsigned len)
460
+{
461
+ if (type == TCG_TYPE_I32) {
462
+ tcg_out_rlw(s, RLWIMI, a0, a2, ofs, 32 - ofs - len, 31 - ofs);
463
+ } else {
464
+ tcg_out_rld(s, RLDIMI, a0, a2, ofs, 64 - ofs - len);
465
+ }
466
+}
467
+
468
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
469
+ tcg_target_long a2, unsigned ofs, unsigned len)
470
+{
471
+ tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len));
472
+}
473
+
474
+static const TCGOutOpDeposit outop_deposit = {
475
+ .base.static_constraint = C_O1_I2(r, 0, rZ),
476
+ .out_rrr = tgen_deposit,
477
+ .out_rri = tgen_depositi,
478
+};
479
+
480
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
481
unsigned ofs, unsigned len)
482
{
483
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
484
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
485
break;
486
487
- case INDEX_op_deposit_i32:
488
- if (const_args[2]) {
489
- uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
490
- tcg_out_andi32(s, args[0], args[0], ~mask);
491
- } else {
492
- tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
493
- 32 - args[3] - args[4], 31 - args[3]);
494
- }
495
- break;
496
- case INDEX_op_deposit_i64:
497
- if (const_args[2]) {
498
- uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
499
- tcg_out_andi64(s, args[0], args[0], ~mask);
500
- } else {
501
- tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
502
- 64 - args[3] - args[4]);
503
- }
504
- break;
505
-
506
#if TCG_TARGET_REG_BITS == 64
507
case INDEX_op_add2_i64:
508
#else
509
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
510
case INDEX_op_st_i64:
511
return C_O0_I2(r, r);
512
513
- case INDEX_op_deposit_i32:
514
- case INDEX_op_deposit_i64:
515
- return C_O1_I2(r, 0, rZ);
516
case INDEX_op_add2_i64:
517
case INDEX_op_add2_i32:
518
return C_O2_I4(r, r, r, r, rI, rZM);
519
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
520
index XXXXXXX..XXXXXXX 100644
521
--- a/tcg/riscv/tcg-target.c.inc
522
+++ b/tcg/riscv/tcg-target.c.inc
523
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
524
.out_rr = tgen_not,
525
};
526
527
+static const TCGOutOpDeposit outop_deposit = {
528
+ .base.static_constraint = C_NotImplemented,
529
+};
530
+
531
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
532
unsigned ofs, unsigned len)
533
{
534
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
535
index XXXXXXX..XXXXXXX 100644
536
--- a/tcg/s390x/tcg-target.c.inc
537
+++ b/tcg/s390x/tcg-target.c.inc
538
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
539
.out = tgen_movcond,
540
};
541
542
-static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
543
- int ofs, int len, int z)
544
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
545
+ TCGReg a2, unsigned ofs, unsigned len)
546
{
547
- int lsb = (63 - ofs);
548
- int msb = lsb - (len - 1);
549
- tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
550
+ unsigned lsb = (63 - ofs);
551
+ unsigned msb = lsb - (len - 1);
552
+
553
+ /*
554
+ * Since we can't support "0Z" as a constraint, we allow a1 in
555
+ * any register. Fix things up as if a matching constraint.
556
+ */
557
+ if (a0 != a1) {
558
+ if (a0 == a2) {
559
+ tcg_out_mov(s, type, TCG_TMP0, a2);
560
+ a2 = TCG_TMP0;
561
+ }
562
+ tcg_out_mov(s, type, a0, a1);
563
+ }
564
+ tcg_out_risbg(s, a0, a2, msb, lsb, ofs, false);
565
}
566
567
+static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2,
568
+ unsigned ofs, unsigned len)
569
+{
570
+ unsigned lsb = (63 - ofs);
571
+ unsigned msb = lsb - (len - 1);
572
+ tcg_out_risbg(s, a0, a2, msb, lsb, ofs, true);
573
+}
574
+
575
+static const TCGOutOpDeposit outop_deposit = {
576
+ .base.static_constraint = C_O1_I2(r, rZ, r),
577
+ .out_rrr = tgen_deposit,
578
+ .out_rzr = tgen_depositz,
579
+};
580
+
581
static void tgen_extract(TCGContext *s, TCGType type, TCGReg dest,
582
TCGReg src, unsigned ofs, unsigned len)
583
{
584
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
585
const TCGArg args[TCG_MAX_OP_ARGS],
586
const int const_args[TCG_MAX_OP_ARGS])
587
{
588
- TCGArg a0, a1, a2;
589
+ TCGArg a0;
590
591
switch (opc) {
592
case INDEX_op_goto_ptr:
593
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
594
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
595
break;
596
597
- OP_32_64(deposit):
598
- a0 = args[0], a1 = args[1], a2 = args[2];
599
- if (const_args[1]) {
600
- tgen_deposit(s, a0, a2, args[3], args[4], 1);
601
- } else {
602
- /* Since we can't support "0Z" as a constraint, we allow a1 in
603
- any register. Fix things up as if a matching constraint. */
604
- if (a0 != a1) {
605
- if (a0 == a2) {
606
- tcg_out_mov(s, type, TCG_TMP0, a2);
607
- a2 = TCG_TMP0;
608
- }
609
- tcg_out_mov(s, type, a0, a1);
610
- }
611
- tgen_deposit(s, a0, a2, args[3], args[4], 0);
612
- }
613
- break;
614
-
615
case INDEX_op_mb:
616
/* The host memory model is quite strong, we simply need to
617
serialize the instruction stream. */
618
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
619
case INDEX_op_qemu_st_i128:
620
return C_O0_I3(o, m, r);
621
622
- case INDEX_op_deposit_i32:
623
- case INDEX_op_deposit_i64:
624
- return C_O1_I2(r, rZ, r);
625
-
626
case INDEX_op_add2_i32:
627
case INDEX_op_sub2_i32:
628
return C_N1_O1_I4(r, r, 0, 1, ri, r);
629
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
630
index XXXXXXX..XXXXXXX 100644
631
--- a/tcg/sparc64/tcg-target.c.inc
632
+++ b/tcg/sparc64/tcg-target.c.inc
633
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_not = {
634
.out_rr = tgen_not,
635
};
636
637
+static const TCGOutOpDeposit outop_deposit = {
638
+ .base.static_constraint = C_NotImplemented,
639
+};
640
+
641
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
642
unsigned ofs, unsigned len)
643
{
644
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
645
index XXXXXXX..XXXXXXX 100644
646
--- a/tcg/tci/tcg-target.c.inc
647
+++ b/tcg/tci/tcg-target.c.inc
648
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
649
case INDEX_op_st_i64:
650
return C_O0_I2(r, r);
651
652
- case INDEX_op_deposit_i32:
653
- case INDEX_op_deposit_i64:
654
- return C_O1_I2(r, r, r);
655
-
656
case INDEX_op_add2_i32:
657
case INDEX_op_add2_i64:
658
case INDEX_op_sub2_i32:
659
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_ctz = {
660
.out_rrr = tgen_ctz,
661
};
662
663
+static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
664
+ TCGReg a2, unsigned ofs, unsigned len)
665
+{
666
+ tcg_out_op_rrrbb(s, INDEX_op_deposit_i64, a0, a1, a2, ofs, len);
667
+}
668
+
669
+static const TCGOutOpDeposit outop_deposit = {
670
+ .base.static_constraint = C_O1_I2(r, r, r),
671
+ .out_rrr = tgen_deposit,
672
+};
673
+
674
static void tgen_divs(TCGContext *s, TCGType type,
675
TCGReg a0, TCGReg a1, TCGReg a2)
676
{
677
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
678
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
679
break;
680
681
- CASE_32_64(deposit)
682
- tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
683
- break;
684
-
685
CASE_32_64(add2)
686
CASE_32_64(sub2)
687
tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
688
--
689
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Use ANDI for deposit 0 into a register.
2
Use UBFIZ, aka UBFM, for deposit register into 0.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target-con-set.h | 2 +-
8
tcg/aarch64/tcg-target.c.inc | 29 ++++++++++++++++++++++++++++-
9
2 files changed, 29 insertions(+), 2 deletions(-)
10
11
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/aarch64/tcg-target-con-set.h
14
+++ b/tcg/aarch64/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
16
C_O1_I1(w, r)
17
C_O1_I1(w, w)
18
C_O1_I1(w, wr)
19
-C_O1_I2(r, 0, rz)
20
C_O1_I2(r, r, r)
21
C_O1_I2(r, r, rA)
22
C_O1_I2(r, r, rAL)
23
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rC)
24
C_O1_I2(r, r, ri)
25
C_O1_I2(r, r, rL)
26
C_O1_I2(r, rz, rz)
27
+C_O1_I2(r, rZ, rZ)
28
C_O1_I2(w, 0, w)
29
C_O1_I2(w, w, w)
30
C_O1_I2(w, w, wN)
31
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target.c.inc
34
+++ b/tcg/aarch64/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
36
TCGReg a2, unsigned ofs, unsigned len)
37
{
38
unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
39
+
40
+ /*
41
+ * Since we can't support "0Z" as a constraint, we allow a1 in
42
+ * any register. Fix things up as if a matching constraint.
43
+ */
44
+ if (a0 != a1) {
45
+ if (a0 == a2) {
46
+ tcg_out_mov(s, type, TCG_REG_TMP0, a2);
47
+ a2 = TCG_REG_TMP0;
48
+ }
49
+ tcg_out_mov(s, type, a0, a1);
50
+ }
51
tcg_out_bfm(s, type, a0, a2, -ofs & mask, len - 1);
52
}
53
54
+static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
55
+ tcg_target_long a2, unsigned ofs, unsigned len)
56
+{
57
+ tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len));
58
+}
59
+
60
+static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2,
61
+ unsigned ofs, unsigned len)
62
+{
63
+ int max = type == TCG_TYPE_I32 ? 31 : 63;
64
+ tcg_out_ubfm(s, type, a0, a2, -ofs & max, len - 1);
65
+}
66
+
67
static const TCGOutOpDeposit outop_deposit = {
68
- .base.static_constraint = C_O1_I2(r, 0, rz),
69
+ .base.static_constraint = C_O1_I2(r, rZ, rZ),
70
.out_rrr = tgen_deposit,
71
+ .out_rri = tgen_depositi,
72
+ .out_rzr = tgen_depositz,
73
};
74
75
static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
76
--
77
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg-opc.h | 3 +--
5
tcg/optimize.c | 2 +-
6
tcg/tcg-op.c | 8 ++++----
7
tcg/tcg.c | 9 +++------
8
tcg/tci.c | 6 ++----
9
docs/devel/tcg-ops.rst | 6 ++++--
10
tcg/tci/tcg-target.c.inc | 2 +-
11
7 files changed, 16 insertions(+), 20 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(bswap64, 1, 1, 1, TCG_OPF_INT)
18
DEF(clz, 1, 2, 0, TCG_OPF_INT)
19
DEF(ctpop, 1, 1, 0, TCG_OPF_INT)
20
DEF(ctz, 1, 2, 0, TCG_OPF_INT)
21
+DEF(deposit, 1, 2, 2, TCG_OPF_INT)
22
DEF(divs, 1, 2, 0, TCG_OPF_INT)
23
DEF(divs2, 2, 3, 0, TCG_OPF_INT)
24
DEF(divu, 1, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(st8_i32, 0, 2, 1, 0)
26
DEF(st16_i32, 0, 2, 1, 0)
27
DEF(st_i32, 0, 2, 1, 0)
28
/* shifts/rotates */
29
-DEF(deposit_i32, 1, 2, 2, 0)
30
DEF(extract2_i32, 1, 2, 1, 0)
31
32
DEF(add2_i32, 2, 4, 0, 0)
33
@@ -XXX,XX +XXX,XX @@ DEF(st16_i64, 0, 2, 1, 0)
34
DEF(st32_i64, 0, 2, 1, 0)
35
DEF(st_i64, 0, 2, 1, 0)
36
/* shifts/rotates */
37
-DEF(deposit_i64, 1, 2, 2, 0)
38
DEF(extract2_i64, 1, 2, 1, 0)
39
40
/* size changing ops */
41
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/optimize.c
44
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
46
case INDEX_op_ctpop:
47
done = fold_ctpop(&ctx, op);
48
break;
49
- CASE_OP_32_64(deposit):
50
+ case INDEX_op_deposit:
51
done = fold_deposit(&ctx, op);
52
break;
53
case INDEX_op_divs:
54
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/tcg-op.c
57
+++ b/tcg/tcg-op.c
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
59
return;
60
}
61
if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
62
- tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
63
+ tcg_gen_op5ii_i32(INDEX_op_deposit, ret, arg1, arg2, ofs, len);
64
return;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
68
tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
69
} else if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
70
TCGv_i32 zero = tcg_constant_i32(0);
71
- tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
72
+ tcg_gen_op5ii_i32(INDEX_op_deposit, ret, zero, arg, ofs, len);
73
} else {
74
/*
75
* To help two-operand hosts we prefer to zero-extend first,
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
77
78
if (TCG_TARGET_REG_BITS == 64) {
79
if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
80
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
81
+ tcg_gen_op5ii_i64(INDEX_op_deposit, ret, arg1, arg2, ofs, len);
82
return;
83
}
84
} else {
85
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
86
} else if (TCG_TARGET_REG_BITS == 64 &&
87
TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
88
TCGv_i64 zero = tcg_constant_i64(0);
89
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
90
+ tcg_gen_op5ii_i64(INDEX_op_deposit, ret, zero, arg, ofs, len);
91
} else {
92
if (TCG_TARGET_REG_BITS == 32) {
93
if (ofs >= 32) {
94
diff --git a/tcg/tcg.c b/tcg/tcg.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/tcg/tcg.c
97
+++ b/tcg/tcg.c
98
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
99
OUTOP(INDEX_op_clz, TCGOutOpBinary, outop_clz),
100
OUTOP(INDEX_op_ctpop, TCGOutOpUnary, outop_ctpop),
101
OUTOP(INDEX_op_ctz, TCGOutOpBinary, outop_ctz),
102
- OUTOP(INDEX_op_deposit_i32, TCGOutOpDeposit, outop_deposit),
103
- OUTOP(INDEX_op_deposit_i64, TCGOutOpDeposit, outop_deposit),
104
+ OUTOP(INDEX_op_deposit, TCGOutOpDeposit, outop_deposit),
105
OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs),
106
OUTOP(INDEX_op_divu, TCGOutOpBinary, outop_divu),
107
OUTOP(INDEX_op_divs2, TCGOutOpDivRem, outop_divs2),
108
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
109
case INDEX_op_add:
110
case INDEX_op_and:
111
case INDEX_op_brcond:
112
+ case INDEX_op_deposit:
113
case INDEX_op_extract:
114
case INDEX_op_mov:
115
case INDEX_op_movcond:
116
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
117
case INDEX_op_st8_i32:
118
case INDEX_op_st16_i32:
119
case INDEX_op_st_i32:
120
- case INDEX_op_deposit_i32:
121
return true;
122
123
case INDEX_op_extract2_i32:
124
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
125
case INDEX_op_extu_i32_i64:
126
case INDEX_op_extrl_i64_i32:
127
case INDEX_op_extrh_i64_i32:
128
- case INDEX_op_deposit_i64:
129
return TCG_TARGET_REG_BITS == 64;
130
131
case INDEX_op_extract2_i64:
132
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
133
}
134
break;
135
136
- case INDEX_op_deposit_i32:
137
- case INDEX_op_deposit_i64:
138
+ case INDEX_op_deposit:
139
{
140
const TCGOutOpDeposit *out = &outop_deposit;
141
142
diff --git a/tcg/tci.c b/tcg/tci.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/tci.c
145
+++ b/tcg/tci.c
146
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
147
tci_args_rrr(insn, &r0, &r1, &r2);
148
regs[r0] = ror32(regs[r1], regs[r2] & 31);
149
break;
150
- case INDEX_op_deposit_i32:
151
- case INDEX_op_deposit_i64:
152
+ case INDEX_op_deposit:
153
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
154
regs[r0] = deposit_tr(regs[r1], pos, len, regs[r2]);
155
break;
156
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
157
op_name, str_r(r0), str_r(r1), str_r(r2));
158
break;
159
160
- case INDEX_op_deposit_i32:
161
- case INDEX_op_deposit_i64:
162
+ case INDEX_op_deposit:
163
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
164
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
165
op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
166
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
167
index XXXXXXX..XXXXXXX 100644
168
--- a/docs/devel/tcg-ops.rst
169
+++ b/docs/devel/tcg-ops.rst
170
@@ -XXX,XX +XXX,XX @@ Misc
171
- | Indicate that the value of *t0* won't be used later. It is useful to
172
force dead code elimination.
173
174
- * - deposit_i32/i64 *dest*, *t1*, *t2*, *pos*, *len*
175
+ * - deposit *dest*, *t1*, *t2*, *pos*, *len*
176
177
- | Deposit *t2* as a bitfield into *t1*, placing the result in *dest*.
178
|
179
@@ -XXX,XX +XXX,XX @@ Misc
180
| *len* - the length of the bitfield
181
| *pos* - the position of the first bit, counting from the LSB
182
|
183
- | For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field
184
+ | For example, "deposit dest, t1, t2, 8, 4" indicates a 4-bit field
185
at bit 8. This operation would be equivalent to
186
|
187
| *dest* = (*t1* & ~0x0f00) | ((*t2* << 8) & 0x0f00)
188
+ |
189
+ | on TCG_TYPE_I32.
190
191
* - extract *dest*, *t1*, *pos*, *len*
192
193
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
194
index XXXXXXX..XXXXXXX 100644
195
--- a/tcg/tci/tcg-target.c.inc
196
+++ b/tcg/tci/tcg-target.c.inc
197
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_ctz = {
198
static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
199
TCGReg a2, unsigned ofs, unsigned len)
200
{
201
- tcg_out_op_rrrbb(s, INDEX_op_deposit_i64, a0, a1, a2, ofs, len);
202
+ tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len);
203
}
204
205
static const TCGOutOpDeposit outop_deposit = {
206
--
207
2.43.0
208
209
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-has.h | 2 --
5
tcg/arm/tcg-target-has.h | 1 -
6
tcg/i386/tcg-target-has.h | 2 --
7
tcg/loongarch64/tcg-target-has.h | 2 --
8
tcg/mips/tcg-target-has.h | 6 -----
9
tcg/ppc/tcg-target-has.h | 2 --
10
tcg/riscv/tcg-target-has.h | 2 --
11
tcg/s390x/tcg-target-has.h | 2 --
12
tcg/sparc64/tcg-target-has.h | 2 --
13
tcg/tcg-has.h | 1 -
14
tcg/tci/tcg-target-has.h | 2 --
15
tcg/tcg-op.c | 12 +++++-----
16
tcg/tcg.c | 24 ++++++++++++++++----
17
tcg/aarch64/tcg-target.c.inc | 20 +++++++++--------
18
tcg/arm/tcg-target.c.inc | 38 ++++++++++++--------------------
19
tcg/i386/tcg-target.c.inc | 25 ++++++++++++---------
20
tcg/loongarch64/tcg-target.c.inc | 5 +++++
21
tcg/mips/tcg-target.c.inc | 5 +++++
22
tcg/ppc/tcg-target.c.inc | 4 ++++
23
tcg/riscv/tcg-target.c.inc | 5 +++++
24
tcg/s390x/tcg-target.c.inc | 4 ++++
25
tcg/sparc64/tcg-target.c.inc | 4 ++++
26
tcg/tci/tcg-target.c.inc | 4 ++++
27
23 files changed, 97 insertions(+), 77 deletions(-)
28
1
29
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/aarch64/tcg-target-has.h
32
+++ b/tcg/aarch64/tcg-target-has.h
33
@@ -XXX,XX +XXX,XX @@
34
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
35
36
/* optional instructions */
37
-#define TCG_TARGET_HAS_extract2_i32 1
38
#define TCG_TARGET_HAS_add2_i32 1
39
#define TCG_TARGET_HAS_sub2_i32 1
40
#define TCG_TARGET_HAS_extr_i64_i32 0
41
#define TCG_TARGET_HAS_qemu_st8_i32 0
42
43
-#define TCG_TARGET_HAS_extract2_i64 1
44
#define TCG_TARGET_HAS_add2_i64 1
45
#define TCG_TARGET_HAS_sub2_i64 1
46
47
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/arm/tcg-target-has.h
50
+++ b/tcg/arm/tcg-target-has.h
51
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
52
#endif
53
54
/* optional instructions */
55
-#define TCG_TARGET_HAS_extract2_i32 1
56
#define TCG_TARGET_HAS_qemu_st8_i32 0
57
58
#define TCG_TARGET_HAS_qemu_ldst_i128 0
59
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/tcg/i386/tcg-target-has.h
62
+++ b/tcg/i386/tcg-target-has.h
63
@@ -XXX,XX +XXX,XX @@
64
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
65
66
/* optional instructions */
67
-#define TCG_TARGET_HAS_extract2_i32 1
68
#define TCG_TARGET_HAS_add2_i32 1
69
#define TCG_TARGET_HAS_sub2_i32 1
70
71
#if TCG_TARGET_REG_BITS == 64
72
/* Keep 32-bit values zero-extended in a register. */
73
#define TCG_TARGET_HAS_extr_i64_i32 1
74
-#define TCG_TARGET_HAS_extract2_i64 1
75
#define TCG_TARGET_HAS_add2_i64 1
76
#define TCG_TARGET_HAS_sub2_i64 1
77
#define TCG_TARGET_HAS_qemu_st8_i32 0
78
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
79
index XXXXXXX..XXXXXXX 100644
80
--- a/tcg/loongarch64/tcg-target-has.h
81
+++ b/tcg/loongarch64/tcg-target-has.h
82
@@ -XXX,XX +XXX,XX @@
83
#include "host/cpuinfo.h"
84
85
/* optional instructions */
86
-#define TCG_TARGET_HAS_extract2_i32 0
87
#define TCG_TARGET_HAS_add2_i32 0
88
#define TCG_TARGET_HAS_sub2_i32 0
89
#define TCG_TARGET_HAS_qemu_st8_i32 0
90
91
/* 64-bit operations */
92
-#define TCG_TARGET_HAS_extract2_i64 0
93
#define TCG_TARGET_HAS_extr_i64_i32 1
94
#define TCG_TARGET_HAS_add2_i64 0
95
#define TCG_TARGET_HAS_sub2_i64 0
96
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/mips/tcg-target-has.h
99
+++ b/tcg/mips/tcg-target-has.h
100
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
101
#endif
102
103
/* optional instructions detected at runtime */
104
-#define TCG_TARGET_HAS_extract2_i32 0
105
#define TCG_TARGET_HAS_qemu_st8_i32 0
106
-
107
-#if TCG_TARGET_REG_BITS == 64
108
-#define TCG_TARGET_HAS_extract2_i64 0
109
-#endif
110
-
111
#define TCG_TARGET_HAS_qemu_ldst_i128 0
112
#define TCG_TARGET_HAS_tst 0
113
114
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
115
index XXXXXXX..XXXXXXX 100644
116
--- a/tcg/ppc/tcg-target-has.h
117
+++ b/tcg/ppc/tcg-target-has.h
118
@@ -XXX,XX +XXX,XX @@
119
#define have_vsx (cpuinfo & CPUINFO_VSX)
120
121
/* optional instructions */
122
-#define TCG_TARGET_HAS_extract2_i32 0
123
#define TCG_TARGET_HAS_qemu_st8_i32 0
124
125
#if TCG_TARGET_REG_BITS == 64
126
#define TCG_TARGET_HAS_add2_i32 0
127
#define TCG_TARGET_HAS_sub2_i32 0
128
#define TCG_TARGET_HAS_extr_i64_i32 0
129
-#define TCG_TARGET_HAS_extract2_i64 0
130
#define TCG_TARGET_HAS_add2_i64 1
131
#define TCG_TARGET_HAS_sub2_i64 1
132
#endif
133
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
134
index XXXXXXX..XXXXXXX 100644
135
--- a/tcg/riscv/tcg-target-has.h
136
+++ b/tcg/riscv/tcg-target-has.h
137
@@ -XXX,XX +XXX,XX @@
138
#include "host/cpuinfo.h"
139
140
/* optional instructions */
141
-#define TCG_TARGET_HAS_extract2_i32 0
142
#define TCG_TARGET_HAS_add2_i32 1
143
#define TCG_TARGET_HAS_sub2_i32 1
144
#define TCG_TARGET_HAS_qemu_st8_i32 0
145
146
-#define TCG_TARGET_HAS_extract2_i64 0
147
#define TCG_TARGET_HAS_extr_i64_i32 1
148
#define TCG_TARGET_HAS_add2_i64 1
149
#define TCG_TARGET_HAS_sub2_i64 1
150
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/s390x/tcg-target-has.h
153
+++ b/tcg/s390x/tcg-target-has.h
154
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
155
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
156
157
/* optional instructions */
158
-#define TCG_TARGET_HAS_extract2_i32 0
159
#define TCG_TARGET_HAS_add2_i32 1
160
#define TCG_TARGET_HAS_sub2_i32 1
161
#define TCG_TARGET_HAS_extr_i64_i32 0
162
#define TCG_TARGET_HAS_qemu_st8_i32 0
163
164
-#define TCG_TARGET_HAS_extract2_i64 0
165
#define TCG_TARGET_HAS_add2_i64 1
166
#define TCG_TARGET_HAS_sub2_i64 1
167
168
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/sparc64/tcg-target-has.h
171
+++ b/tcg/sparc64/tcg-target-has.h
172
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
173
#endif
174
175
/* optional instructions */
176
-#define TCG_TARGET_HAS_extract2_i32 0
177
#define TCG_TARGET_HAS_add2_i32 1
178
#define TCG_TARGET_HAS_sub2_i32 1
179
#define TCG_TARGET_HAS_qemu_st8_i32 0
180
181
#define TCG_TARGET_HAS_extr_i64_i32 0
182
-#define TCG_TARGET_HAS_extract2_i64 0
183
#define TCG_TARGET_HAS_add2_i64 1
184
#define TCG_TARGET_HAS_sub2_i64 1
185
186
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/tcg-has.h
189
+++ b/tcg/tcg-has.h
190
@@ -XXX,XX +XXX,XX @@
191
#if TCG_TARGET_REG_BITS == 32
192
/* Turn some undef macros into false macros. */
193
#define TCG_TARGET_HAS_extr_i64_i32 0
194
-#define TCG_TARGET_HAS_extract2_i64 0
195
#define TCG_TARGET_HAS_add2_i64 0
196
#define TCG_TARGET_HAS_sub2_i64 0
197
/* Turn some undef macros into true macros. */
198
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
199
index XXXXXXX..XXXXXXX 100644
200
--- a/tcg/tci/tcg-target-has.h
201
+++ b/tcg/tci/tcg-target-has.h
202
@@ -XXX,XX +XXX,XX @@
203
#ifndef TCG_TARGET_HAS_H
204
#define TCG_TARGET_HAS_H
205
206
-#define TCG_TARGET_HAS_extract2_i32 0
207
#define TCG_TARGET_HAS_qemu_st8_i32 0
208
209
#if TCG_TARGET_REG_BITS == 64
210
#define TCG_TARGET_HAS_extr_i64_i32 0
211
-#define TCG_TARGET_HAS_extract2_i64 0
212
#define TCG_TARGET_HAS_add2_i32 1
213
#define TCG_TARGET_HAS_sub2_i32 1
214
#define TCG_TARGET_HAS_add2_i64 1
215
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/tcg-op.c
218
+++ b/tcg/tcg-op.c
219
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
220
221
t1 = tcg_temp_ebb_new_i32();
222
223
- if (TCG_TARGET_HAS_extract2_i32) {
224
+ if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
225
if (ofs + len == 32) {
226
tcg_gen_shli_i32(t1, arg1, len);
227
tcg_gen_extract2_i32(ret, t1, arg2, len);
228
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
229
tcg_gen_mov_i32(ret, ah);
230
} else if (al == ah) {
231
tcg_gen_rotri_i32(ret, al, ofs);
232
- } else if (TCG_TARGET_HAS_extract2_i32) {
233
+ } else if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
234
tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
235
} else {
236
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
237
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
238
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
239
}
240
} else if (right) {
241
- if (TCG_TARGET_HAS_extract2_i32) {
242
+ if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
243
tcg_gen_extract2_i32(TCGV_LOW(ret),
244
TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
245
} else {
246
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
247
tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
248
}
249
} else {
250
- if (TCG_TARGET_HAS_extract2_i32) {
251
+ if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
252
tcg_gen_extract2_i32(TCGV_HIGH(ret),
253
TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
254
} else {
255
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
256
257
t1 = tcg_temp_ebb_new_i64();
258
259
- if (TCG_TARGET_HAS_extract2_i64) {
260
+ if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
261
if (ofs + len == 64) {
262
tcg_gen_shli_i64(t1, arg1, len);
263
tcg_gen_extract2_i64(ret, t1, arg2, len);
264
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
265
tcg_gen_mov_i64(ret, ah);
266
} else if (al == ah) {
267
tcg_gen_rotri_i64(ret, al, ofs);
268
- } else if (TCG_TARGET_HAS_extract2_i64) {
269
+ } else if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
270
tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
271
} else {
272
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
273
diff --git a/tcg/tcg.c b/tcg/tcg.c
274
index XXXXXXX..XXXXXXX 100644
275
--- a/tcg/tcg.c
276
+++ b/tcg/tcg.c
277
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpExtract {
278
unsigned ofs, unsigned len);
279
} TCGOutOpExtract;
280
281
+typedef struct TCGOutOpExtract2 {
282
+ TCGOutOp base;
283
+ void (*out_rrr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
284
+ TCGReg a2, unsigned shr);
285
+} TCGOutOpExtract2;
286
+
287
typedef struct TCGOutOpMovcond {
288
TCGOutOp base;
289
void (*out)(TCGContext *s, TCGType type, TCGCond cond,
290
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
291
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
292
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
293
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
294
+ OUTOP(INDEX_op_extract2_i32, TCGOutOpExtract2, outop_extract2),
295
+ OUTOP(INDEX_op_extract2_i64, TCGOutOpExtract2, outop_extract2),
296
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
297
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
298
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
299
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
300
case INDEX_op_st_i32:
301
return true;
302
303
- case INDEX_op_extract2_i32:
304
- return TCG_TARGET_HAS_extract2_i32;
305
case INDEX_op_add2_i32:
306
return TCG_TARGET_HAS_add2_i32;
307
case INDEX_op_sub2_i32:
308
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
309
case INDEX_op_extrh_i64_i32:
310
return TCG_TARGET_REG_BITS == 64;
311
312
- case INDEX_op_extract2_i64:
313
- return TCG_TARGET_HAS_extract2_i64;
314
case INDEX_op_add2_i64:
315
return TCG_TARGET_HAS_add2_i64;
316
case INDEX_op_sub2_i64:
317
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
318
}
319
break;
320
321
+ case INDEX_op_extract2_i32:
322
+ case INDEX_op_extract2_i64:
323
+ {
324
+ const TCGOutOpExtract2 *out = &outop_extract2;
325
+
326
+ tcg_debug_assert(!const_args[1]);
327
+ tcg_debug_assert(!const_args[2]);
328
+ out->out_rrr(s, type, new_args[0], new_args[1],
329
+ new_args[2], new_args[3]);
330
+ }
331
+ break;
332
+
333
case INDEX_op_muls2:
334
case INDEX_op_mulu2:
335
{
336
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
337
index XXXXXXX..XXXXXXX 100644
338
--- a/tcg/aarch64/tcg-target.c.inc
339
+++ b/tcg/aarch64/tcg-target.c.inc
340
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
341
.out_rr = tgen_sextract,
342
};
343
344
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
345
+ TCGReg a1, TCGReg a2, unsigned shr)
346
+{
347
+ tcg_out_extr(s, type, a0, a2, a1, shr);
348
+}
349
+
350
+static const TCGOutOpExtract2 outop_extract2 = {
351
+ .base.static_constraint = C_O1_I2(r, rz, rz),
352
+ .out_rrr = tgen_extract2,
353
+};
354
+
355
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
356
const TCGArg args[TCG_MAX_OP_ARGS],
357
const int const_args[TCG_MAX_OP_ARGS])
358
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
359
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
360
break;
361
362
- case INDEX_op_extract2_i64:
363
- case INDEX_op_extract2_i32:
364
- tcg_out_extr(s, ext, a0, a2, a1, args[3]);
365
- break;
366
-
367
case INDEX_op_add2_i32:
368
tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
369
(int32_t)args[4], args[5], const_args[4],
370
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
371
case INDEX_op_qemu_st_i128:
372
return C_O0_I3(rz, rz, r);
373
374
- case INDEX_op_extract2_i32:
375
- case INDEX_op_extract2_i64:
376
- return C_O1_I2(r, rz, rz);
377
-
378
case INDEX_op_add2_i32:
379
case INDEX_op_add2_i64:
380
case INDEX_op_sub2_i32:
381
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
382
index XXXXXXX..XXXXXXX 100644
383
--- a/tcg/arm/tcg-target.c.inc
384
+++ b/tcg/arm/tcg-target.c.inc
385
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond2 outop_setcond2 = {
386
.out = tgen_setcond2,
387
};
388
389
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
390
+ TCGReg a1, TCGReg a2, unsigned shr)
391
+{
392
+ /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
393
+ tgen_shli(s, TCG_TYPE_I32, TCG_REG_TMP, a2, 32 - shr);
394
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, TCG_REG_TMP,
395
+ a1, SHIFT_IMM_LSR(shr));
396
+}
397
+
398
+static const TCGOutOpExtract2 outop_extract2 = {
399
+ .base.static_constraint = C_O1_I2(r, r, r),
400
+ .out_rrr = tgen_extract2,
401
+};
402
+
403
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
404
const TCGArg args[TCG_MAX_OP_ARGS],
405
const int const_args[TCG_MAX_OP_ARGS])
406
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
407
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
408
break;
409
410
- case INDEX_op_extract2_i32:
411
- /* ??? These optimization vs zero should be generic. */
412
- /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
413
- if (const_args[1]) {
414
- if (const_args[2]) {
415
- tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
416
- } else {
417
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
418
- args[2], SHIFT_IMM_LSL(32 - args[3]));
419
- }
420
- } else if (const_args[2]) {
421
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
422
- args[1], SHIFT_IMM_LSR(args[3]));
423
- } else {
424
- /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
425
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
426
- args[2], SHIFT_IMM_LSL(32 - args[3]));
427
- tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
428
- args[1], SHIFT_IMM_LSR(args[3]));
429
- }
430
- break;
431
-
432
case INDEX_op_mb:
433
tcg_out_mb(s, args[0]);
434
break;
435
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
436
case INDEX_op_st_i32:
437
return C_O0_I2(r, r);
438
439
- case INDEX_op_extract2_i32:
440
- return C_O1_I2(r, rZ, rZ);
441
case INDEX_op_add2_i32:
442
return C_O2_I4(r, r, r, r, rIN, rIK);
443
case INDEX_op_sub2_i32:
444
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
445
index XXXXXXX..XXXXXXX 100644
446
--- a/tcg/i386/tcg-target.c.inc
447
+++ b/tcg/i386/tcg-target.c.inc
448
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
449
.out_rr = tgen_sextract,
450
};
451
452
+static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
453
+ TCGReg a1, TCGReg a2, unsigned shr)
454
+{
455
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
456
+
457
+ /* Note that SHRD outputs to the r/m operand. */
458
+ tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
459
+ tcg_out8(s, shr);
460
+}
461
+
462
+static const TCGOutOpExtract2 outop_extract2 = {
463
+ .base.static_constraint = C_O1_I2(r, 0, r),
464
+ .out_rrr = tgen_extract2,
465
+};
466
+
467
468
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
469
const TCGArg args[TCG_MAX_OP_ARGS],
470
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
471
break;
472
#endif
473
474
- OP_32_64(extract2):
475
- /* Note that SHRD outputs to the r/m operand. */
476
- tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
477
- tcg_out8(s, args[3]);
478
- break;
479
-
480
case INDEX_op_mb:
481
tcg_out_mb(s, a0);
482
break;
483
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
484
case INDEX_op_st_i64:
485
return C_O0_I2(re, r);
486
487
- case INDEX_op_extract2_i32:
488
- case INDEX_op_extract2_i64:
489
- return C_O1_I2(r, 0, r);
490
-
491
case INDEX_op_add2_i32:
492
case INDEX_op_add2_i64:
493
case INDEX_op_sub2_i32:
494
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
495
index XXXXXXX..XXXXXXX 100644
496
--- a/tcg/loongarch64/tcg-target.c.inc
497
+++ b/tcg/loongarch64/tcg-target.c.inc
498
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
499
.out_rr = tgen_sextract,
500
};
501
502
+static const TCGOutOpExtract2 outop_extract2 = {
503
+ .base.static_constraint = C_NotImplemented,
504
+};
505
+
506
+
507
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
508
const TCGArg args[TCG_MAX_OP_ARGS],
509
const int const_args[TCG_MAX_OP_ARGS])
510
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
511
index XXXXXXX..XXXXXXX 100644
512
--- a/tcg/mips/tcg-target.c.inc
513
+++ b/tcg/mips/tcg-target.c.inc
514
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
515
.out_rr = tgen_sextract,
516
};
517
518
+static const TCGOutOpExtract2 outop_extract2 = {
519
+ .base.static_constraint = C_NotImplemented,
520
+};
521
+
522
+
523
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
524
const TCGArg args[TCG_MAX_OP_ARGS],
525
const int const_args[TCG_MAX_OP_ARGS])
526
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
527
index XXXXXXX..XXXXXXX 100644
528
--- a/tcg/ppc/tcg-target.c.inc
529
+++ b/tcg/ppc/tcg-target.c.inc
530
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
531
.out_rr = tgen_sextract,
532
};
533
534
+static const TCGOutOpExtract2 outop_extract2 = {
535
+ .base.static_constraint = C_NotImplemented,
536
+};
537
+
538
539
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
540
const TCGArg args[TCG_MAX_OP_ARGS],
541
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
542
index XXXXXXX..XXXXXXX 100644
543
--- a/tcg/riscv/tcg-target.c.inc
544
+++ b/tcg/riscv/tcg-target.c.inc
545
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
546
.out_rr = tgen_sextract,
547
};
548
549
+static const TCGOutOpExtract2 outop_extract2 = {
550
+ .base.static_constraint = C_NotImplemented,
551
+};
552
+
553
+
554
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
555
const TCGArg args[TCG_MAX_OP_ARGS],
556
const int const_args[TCG_MAX_OP_ARGS])
557
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
558
index XXXXXXX..XXXXXXX 100644
559
--- a/tcg/s390x/tcg-target.c.inc
560
+++ b/tcg/s390x/tcg-target.c.inc
561
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
562
.out_rr = tgen_sextract,
563
};
564
565
+static const TCGOutOpExtract2 outop_extract2 = {
566
+ .base.static_constraint = C_NotImplemented,
567
+};
568
+
569
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
570
{
571
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
572
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
573
index XXXXXXX..XXXXXXX 100644
574
--- a/tcg/sparc64/tcg-target.c.inc
575
+++ b/tcg/sparc64/tcg-target.c.inc
576
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
577
.out_rr = tgen_sextract,
578
};
579
580
+static const TCGOutOpExtract2 outop_extract2 = {
581
+ .base.static_constraint = C_NotImplemented,
582
+};
583
+
584
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
585
const TCGArg args[TCG_MAX_OP_ARGS],
586
const int const_args[TCG_MAX_OP_ARGS])
587
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
588
index XXXXXXX..XXXXXXX 100644
589
--- a/tcg/tci/tcg-target.c.inc
590
+++ b/tcg/tci/tcg-target.c.inc
591
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract outop_sextract = {
592
.out_rr = tcg_out_sextract,
593
};
594
595
+static const TCGOutOpExtract2 outop_extract2 = {
596
+ .base.static_constraint = C_NotImplemented,
597
+};
598
+
599
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
600
{
601
tcg_out_sextract(s, type, rd, rs, 0, 8);
602
--
603
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/tcg/tcg-opc.h | 5 +----
6
tcg/optimize.c | 10 +++++-----
7
tcg/tcg-op.c | 16 ++++++++--------
8
tcg/tcg.c | 6 ++----
9
docs/devel/tcg-ops.rst | 4 ++--
10
target/i386/tcg/emit.c.inc | 12 +-----------
11
6 files changed, 19 insertions(+), 34 deletions(-)
12
1
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(divu, 1, 2, 0, TCG_OPF_INT)
18
DEF(divu2, 2, 3, 0, TCG_OPF_INT)
19
DEF(eqv, 1, 2, 0, TCG_OPF_INT)
20
DEF(extract, 1, 1, 2, TCG_OPF_INT)
21
+DEF(extract2, 1, 2, 1, TCG_OPF_INT)
22
DEF(movcond, 1, 4, 1, TCG_OPF_INT)
23
DEF(mul, 1, 2, 0, TCG_OPF_INT)
24
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
25
@@ -XXX,XX +XXX,XX @@ DEF(ld_i32, 1, 1, 1, 0)
26
DEF(st8_i32, 0, 2, 1, 0)
27
DEF(st16_i32, 0, 2, 1, 0)
28
DEF(st_i32, 0, 2, 1, 0)
29
-/* shifts/rotates */
30
-DEF(extract2_i32, 1, 2, 1, 0)
31
32
DEF(add2_i32, 2, 4, 0, 0)
33
DEF(sub2_i32, 2, 4, 0, 0)
34
@@ -XXX,XX +XXX,XX @@ DEF(st8_i64, 0, 2, 1, 0)
35
DEF(st16_i64, 0, 2, 1, 0)
36
DEF(st32_i64, 0, 2, 1, 0)
37
DEF(st_i64, 0, 2, 1, 0)
38
-/* shifts/rotates */
39
-DEF(extract2_i64, 1, 2, 1, 0)
40
41
/* size changing ops */
42
DEF(ext_i32_i64, 1, 1, 0, 0)
43
diff --git a/tcg/optimize.c b/tcg/optimize.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/optimize.c
46
+++ b/tcg/optimize.c
47
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
48
uint64_t v2 = arg_info(op->args[2])->val;
49
int shr = op->args[3];
50
51
- if (op->opc == INDEX_op_extract2_i64) {
52
- v1 >>= shr;
53
- v2 <<= 64 - shr;
54
- } else {
55
+ if (ctx->type == TCG_TYPE_I32) {
56
v1 = (uint32_t)v1 >> shr;
57
v2 = (uint64_t)((int32_t)v2 << (32 - shr));
58
+ } else {
59
+ v1 >>= shr;
60
+ v2 <<= 64 - shr;
61
}
62
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
63
}
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
case INDEX_op_extract:
66
done = fold_extract(&ctx, op);
67
break;
68
- CASE_OP_32_64(extract2):
69
+ case INDEX_op_extract2:
70
done = fold_extract2(&ctx, op);
71
break;
72
case INDEX_op_ext_i32_i64:
73
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/tcg-op.c
76
+++ b/tcg/tcg-op.c
77
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
78
79
t1 = tcg_temp_ebb_new_i32();
80
81
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
82
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
83
if (ofs + len == 32) {
84
tcg_gen_shli_i32(t1, arg1, len);
85
tcg_gen_extract2_i32(ret, t1, arg2, len);
86
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
87
tcg_gen_mov_i32(ret, ah);
88
} else if (al == ah) {
89
tcg_gen_rotri_i32(ret, al, ofs);
90
- } else if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
91
- tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
92
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
93
+ tcg_gen_op4i_i32(INDEX_op_extract2, ret, al, ah, ofs);
94
} else {
95
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
96
tcg_gen_shri_i32(t0, al, ofs);
97
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
98
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
99
}
100
} else if (right) {
101
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
102
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
103
tcg_gen_extract2_i32(TCGV_LOW(ret),
104
TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
105
} else {
106
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
107
tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
108
}
109
} else {
110
- if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
111
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
112
tcg_gen_extract2_i32(TCGV_HIGH(ret),
113
TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
114
} else {
115
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
116
117
t1 = tcg_temp_ebb_new_i64();
118
119
- if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
120
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
121
if (ofs + len == 64) {
122
tcg_gen_shli_i64(t1, arg1, len);
123
tcg_gen_extract2_i64(ret, t1, arg2, len);
124
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
125
tcg_gen_mov_i64(ret, ah);
126
} else if (al == ah) {
127
tcg_gen_rotri_i64(ret, al, ofs);
128
- } else if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
129
- tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
130
+ } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
131
+ tcg_gen_op4i_i64(INDEX_op_extract2, ret, al, ah, ofs);
132
} else {
133
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
134
tcg_gen_shri_i64(t0, al, ofs);
135
diff --git a/tcg/tcg.c b/tcg/tcg.c
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/tcg.c
138
+++ b/tcg/tcg.c
139
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
140
OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
141
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
142
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
143
- OUTOP(INDEX_op_extract2_i32, TCGOutOpExtract2, outop_extract2),
144
- OUTOP(INDEX_op_extract2_i64, TCGOutOpExtract2, outop_extract2),
145
+ OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
146
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
147
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
148
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
149
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
150
}
151
break;
152
153
- case INDEX_op_extract2_i32:
154
- case INDEX_op_extract2_i64:
155
+ case INDEX_op_extract2:
156
{
157
const TCGOutOpExtract2 *out = &outop_extract2;
158
159
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
160
index XXXXXXX..XXXXXXX 100644
161
--- a/docs/devel/tcg-ops.rst
162
+++ b/docs/devel/tcg-ops.rst
163
@@ -XXX,XX +XXX,XX @@ Misc
164
|
165
| (using an arithmetic right shift) on TCG_TYPE_I32.
166
167
- * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
168
+ * - extract2 *dest*, *t1*, *t2*, *pos*
169
170
- - | For N = {32,64}, extract an N-bit quantity from the concatenation
171
+ - | For TCG_TYPE_I{N}, extract an N-bit quantity from the concatenation
172
of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} expander
173
accepts 0 <= *pos* <= N as inputs. The backend code generator will
174
not see either 0 or N as inputs for these opcodes.
175
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
176
index XXXXXXX..XXXXXXX 100644
177
--- a/target/i386/tcg/emit.c.inc
178
+++ b/target/i386/tcg/emit.c.inc
179
@@ -XXX,XX +XXX,XX @@
180
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
181
*/
182
183
-/*
184
- * Sometimes, knowing what the backend has can produce better code.
185
- * The exact opcode to check depends on 32- vs. 64-bit.
186
- */
187
-#ifdef TARGET_X86_64
188
-#define INDEX_op_extract2_tl INDEX_op_extract2_i64
189
-#else
190
-#define INDEX_op_extract2_tl INDEX_op_extract2_i32
191
-#endif
192
-
193
#define MMX_OFFSET(reg) \
194
({ assert((reg) >= 0 && (reg) <= 7); \
195
offsetof(CPUX86State, fpregs[reg].mmx); })
196
@@ -XXX,XX +XXX,XX @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
197
tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
198
while (vec_len > 8) {
199
vec_len -= 8;
200
- if (tcg_op_supported(INDEX_op_extract2_tl, TCG_TYPE_TL, 0)) {
201
+ if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) {
202
/*
203
* Load the next byte of the result into the high byte of T.
204
* TCG does a similar expansion of deposit to shl+extract2; by
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
Deleted patch
1
No need to expand to i64 to perform the add.
2
This is smaller on a loongarch64 host, e.g.
3
1
4
    bstrpick_d r28, r27, 31, 0
5
    bstrpick_d r29, r24, 31, 0
6
    add_d r28, r28, r29
7
    addi_w r29, r28, 0
8
    srai_d r28, r28, 32
9
---
10
    add_w r28, r27, r24
11
    sltu r29, r28, r24
12
13
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
tcg/tcg-op.c | 17 +++++++++--------
17
1 file changed, 9 insertions(+), 8 deletions(-)
18
19
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/tcg-op.c
22
+++ b/tcg/tcg-op.c
23
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
24
if (TCG_TARGET_HAS_add2_i32) {
25
tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
26
} else {
27
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
28
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
29
- tcg_gen_concat_i32_i64(t0, al, ah);
30
- tcg_gen_concat_i32_i64(t1, bl, bh);
31
- tcg_gen_add_i64(t0, t0, t1);
32
- tcg_gen_extr_i64_i32(rl, rh, t0);
33
- tcg_temp_free_i64(t0);
34
- tcg_temp_free_i64(t1);
35
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
36
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
37
+ tcg_gen_add_i32(t0, al, bl);
38
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, al);
39
+ tcg_gen_add_i32(rh, ah, bh);
40
+ tcg_gen_add_i32(rh, rh, t1);
41
+ tcg_gen_mov_i32(rl, t0);
42
+ tcg_temp_free_i32(t0);
43
+ tcg_temp_free_i32(t1);
44
}
45
}
46
47
--
48
2.43.0
diff view generated by jsdifflib
Deleted patch
1
No need to expand to i64 to perform the subtract.
2
1
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/tcg-op.c | 17 +++++++++--------
7
1 file changed, 9 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tcg-op.c
12
+++ b/tcg/tcg-op.c
13
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
14
if (TCG_TARGET_HAS_sub2_i32) {
15
tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
16
} else {
17
- TCGv_i64 t0 = tcg_temp_ebb_new_i64();
18
- TCGv_i64 t1 = tcg_temp_ebb_new_i64();
19
- tcg_gen_concat_i32_i64(t0, al, ah);
20
- tcg_gen_concat_i32_i64(t1, bl, bh);
21
- tcg_gen_sub_i64(t0, t0, t1);
22
- tcg_gen_extr_i64_i32(rl, rh, t0);
23
- tcg_temp_free_i64(t0);
24
- tcg_temp_free_i64(t1);
25
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
26
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
27
+ tcg_gen_sub_i32(t0, al, bl);
28
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, al, bl);
29
+ tcg_gen_sub_i32(rh, ah, bh);
30
+ tcg_gen_sub_i32(rh, rh, t1);
31
+ tcg_gen_mov_i32(rl, t0);
32
+ tcg_temp_free_i32(t0);
33
+ tcg_temp_free_i32(t1);
34
}
35
}
36
37
--
38
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Require TCG_TARGET_HAS_{add2,sub2}_i32 be defined,
2
one way or another.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/arm/tcg-target-has.h | 2 ++
8
tcg/mips/tcg-target-has.h | 3 +++
9
tcg/ppc/tcg-target-has.h | 3 +++
10
tcg/tcg-has.h | 3 ---
11
tcg/tci/tcg-target-has.h | 4 ++--
12
5 files changed, 10 insertions(+), 5 deletions(-)
13
14
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/arm/tcg-target-has.h
17
+++ b/tcg/arm/tcg-target-has.h
18
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
19
#endif
20
21
/* optional instructions */
22
+#define TCG_TARGET_HAS_add2_i32 1
23
+#define TCG_TARGET_HAS_sub2_i32 1
24
#define TCG_TARGET_HAS_qemu_st8_i32 0
25
26
#define TCG_TARGET_HAS_qemu_ldst_i128 0
27
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/mips/tcg-target-has.h
30
+++ b/tcg/mips/tcg-target-has.h
31
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
32
#define TCG_TARGET_HAS_sub2_i64 0
33
#define TCG_TARGET_HAS_ext32s_i64 1
34
#define TCG_TARGET_HAS_ext32u_i64 1
35
+#else
36
+#define TCG_TARGET_HAS_add2_i32 1
37
+#define TCG_TARGET_HAS_sub2_i32 1
38
#endif
39
40
/* optional instructions detected at runtime */
41
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/ppc/tcg-target-has.h
44
+++ b/tcg/ppc/tcg-target-has.h
45
@@ -XXX,XX +XXX,XX @@
46
#define TCG_TARGET_HAS_extr_i64_i32 0
47
#define TCG_TARGET_HAS_add2_i64 1
48
#define TCG_TARGET_HAS_sub2_i64 1
49
+#else
50
+#define TCG_TARGET_HAS_add2_i32 1
51
+#define TCG_TARGET_HAS_sub2_i32 1
52
#endif
53
54
#define TCG_TARGET_HAS_qemu_ldst_i128 \
55
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/tcg-has.h
58
+++ b/tcg/tcg-has.h
59
@@ -XXX,XX +XXX,XX @@
60
#define TCG_TARGET_HAS_extr_i64_i32 0
61
#define TCG_TARGET_HAS_add2_i64 0
62
#define TCG_TARGET_HAS_sub2_i64 0
63
-/* Turn some undef macros into true macros. */
64
-#define TCG_TARGET_HAS_add2_i32 1
65
-#define TCG_TARGET_HAS_sub2_i32 1
66
#endif
67
68
#if !defined(TCG_TARGET_HAS_v64) \
69
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tci/tcg-target-has.h
72
+++ b/tcg/tci/tcg-target-has.h
73
@@ -XXX,XX +XXX,XX @@
74
#define TCG_TARGET_HAS_H
75
76
#define TCG_TARGET_HAS_qemu_st8_i32 0
77
+#define TCG_TARGET_HAS_add2_i32 1
78
+#define TCG_TARGET_HAS_sub2_i32 1
79
80
#if TCG_TARGET_REG_BITS == 64
81
#define TCG_TARGET_HAS_extr_i64_i32 0
82
-#define TCG_TARGET_HAS_add2_i32 1
83
-#define TCG_TARGET_HAS_sub2_i32 1
84
#define TCG_TARGET_HAS_add2_i64 1
85
#define TCG_TARGET_HAS_sub2_i64 1
86
#endif /* TCG_TARGET_REG_BITS == 64 */
87
--
88
2.43.0
diff view generated by jsdifflib
Deleted patch
1
We now produce exactly the same code via generic expansion.
2
1
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/mips/tcg-target-con-set.h | 1 -
7
tcg/mips/tcg-target-con-str.h | 1 -
8
tcg/mips/tcg-target-has.h | 7 ++--
9
tcg/mips/tcg-target.c.inc | 67 +----------------------------------
10
4 files changed, 3 insertions(+), 73 deletions(-)
11
12
diff --git a/tcg/mips/tcg-target-con-set.h b/tcg/mips/tcg-target-con-set.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/mips/tcg-target-con-set.h
15
+++ b/tcg/mips/tcg-target-con-set.h
16
@@ -XXX,XX +XXX,XX @@ C_O1_I4(r, r, rz, rz, rz)
17
C_O1_I4(r, r, r, rz, rz)
18
C_O2_I1(r, r, r)
19
C_O2_I2(r, r, r, r)
20
-C_O2_I4(r, r, rz, rz, rN, rN)
21
diff --git a/tcg/mips/tcg-target-con-str.h b/tcg/mips/tcg-target-con-str.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/mips/tcg-target-con-str.h
24
+++ b/tcg/mips/tcg-target-con-str.h
25
@@ -XXX,XX +XXX,XX @@ REGS('r', ALL_GENERAL_REGS)
26
CONST('I', TCG_CT_CONST_U16)
27
CONST('J', TCG_CT_CONST_S16)
28
CONST('K', TCG_CT_CONST_P2M1)
29
-CONST('N', TCG_CT_CONST_N16)
30
CONST('W', TCG_CT_CONST_WSZ)
31
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/mips/tcg-target-has.h
34
+++ b/tcg/mips/tcg-target-has.h
35
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
36
#endif
37
38
/* optional instructions */
39
-
40
-#if TCG_TARGET_REG_BITS == 64
41
#define TCG_TARGET_HAS_add2_i32 0
42
#define TCG_TARGET_HAS_sub2_i32 0
43
+
44
+#if TCG_TARGET_REG_BITS == 64
45
#define TCG_TARGET_HAS_extr_i64_i32 1
46
#define TCG_TARGET_HAS_add2_i64 0
47
#define TCG_TARGET_HAS_sub2_i64 0
48
#define TCG_TARGET_HAS_ext32s_i64 1
49
#define TCG_TARGET_HAS_ext32u_i64 1
50
-#else
51
-#define TCG_TARGET_HAS_add2_i32 1
52
-#define TCG_TARGET_HAS_sub2_i32 1
53
#endif
54
55
/* optional instructions detected at runtime */
56
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
57
index XXXXXXX..XXXXXXX 100644
58
--- a/tcg/mips/tcg-target.c.inc
59
+++ b/tcg/mips/tcg-target.c.inc
60
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
61
#define TCG_CT_CONST_U16 0x100 /* Unsigned 16-bit: 0 - 0xffff. */
62
#define TCG_CT_CONST_S16 0x200 /* Signed 16-bit: -32768 - 32767 */
63
#define TCG_CT_CONST_P2M1 0x400 /* Power of 2 minus 1. */
64
-#define TCG_CT_CONST_N16 0x800 /* "Negatable" 16-bit: -32767 - 32767 */
65
-#define TCG_CT_CONST_WSZ 0x1000 /* word size */
66
+#define TCG_CT_CONST_WSZ 0x800 /* word size */
67
68
#define ALL_GENERAL_REGS 0xffffffffu
69
70
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
71
return 1;
72
} else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
73
return 1;
74
- } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) {
75
- return 1;
76
} else if ((ct & TCG_CT_CONST_P2M1)
77
&& use_mips32r2_instructions && is_p2m1(val)) {
78
return 1;
79
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
80
return false;
81
}
82
83
-static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
84
- TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
85
- bool cbh, bool is_sub)
86
-{
87
- TCGReg th = TCG_TMP1;
88
-
89
- /* If we have a negative constant such that negating it would
90
- make the high part zero, we can (usually) eliminate one insn. */
91
- if (cbl && cbh && bh == -1 && bl != 0) {
92
- bl = -bl;
93
- bh = 0;
94
- is_sub = !is_sub;
95
- }
96
-
97
- /* By operating on the high part first, we get to use the final
98
- carry operation to move back from the temporary. */
99
- if (!cbh) {
100
- tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh);
101
- } else if (bh != 0 || ah == rl) {
102
- tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh));
103
- } else {
104
- th = ah;
105
- }
106
-
107
- /* Note that tcg optimization should eliminate the bl == 0 case. */
108
- if (is_sub) {
109
- if (cbl) {
110
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl);
111
- tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl);
112
- } else {
113
- tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl);
114
- tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl);
115
- }
116
- tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0);
117
- } else {
118
- if (cbl) {
119
- tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl);
120
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl);
121
- } else if (rl == al && rl == bl) {
122
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, al, TCG_TARGET_REG_BITS - 1);
123
- tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
124
- } else {
125
- tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl);
126
- tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl));
127
- }
128
- tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0);
129
- }
130
-}
131
-
132
#define SETCOND_INV TCG_TARGET_NB_REGS
133
#define SETCOND_NEZ (SETCOND_INV << 1)
134
#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
135
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
136
}
137
break;
138
139
- case INDEX_op_add2_i32:
140
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
141
- const_args[4], const_args[5], false);
142
- break;
143
- case INDEX_op_sub2_i32:
144
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
145
- const_args[4], const_args[5], true);
146
- break;
147
-
148
case INDEX_op_mb:
149
tcg_out_mb(s, a0);
150
break;
151
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
152
case INDEX_op_st_i64:
153
return C_O0_I2(rz, r);
154
155
- case INDEX_op_add2_i32:
156
- case INDEX_op_sub2_i32:
157
- return C_O2_I4(r, r, rz, rz, rN, rN);
158
-
159
case INDEX_op_qemu_ld_i32:
160
return C_O1_I1(r, r);
161
case INDEX_op_qemu_st_i32:
162
--
163
2.43.0
diff view generated by jsdifflib
Deleted patch
1
We now produce exactly the same code via generic expansion.
2
1
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/riscv/tcg-target-con-set.h | 1 -
7
tcg/riscv/tcg-target-has.h | 6 +--
8
tcg/riscv/tcg-target.c.inc | 86 +---------------------------------
9
3 files changed, 3 insertions(+), 90 deletions(-)
10
11
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target-con-set.h
14
+++ b/tcg/riscv/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, ri)
16
C_O1_I2(r, r, rI)
17
C_N1_I2(r, r, rM)
18
C_O1_I4(r, r, rI, rM, rM)
19
-C_O2_I4(r, r, rz, rz, rM, rM)
20
C_O0_I2(v, r)
21
C_O1_I1(v, r)
22
C_O1_I1(v, v)
23
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/riscv/tcg-target-has.h
26
+++ b/tcg/riscv/tcg-target-has.h
27
@@ -XXX,XX +XXX,XX @@
28
#include "host/cpuinfo.h"
29
30
/* optional instructions */
31
-#define TCG_TARGET_HAS_add2_i32 1
32
-#define TCG_TARGET_HAS_sub2_i32 1
33
#define TCG_TARGET_HAS_qemu_st8_i32 0
34
35
#define TCG_TARGET_HAS_extr_i64_i32 1
36
-#define TCG_TARGET_HAS_add2_i64 1
37
-#define TCG_TARGET_HAS_sub2_i64 1
38
+#define TCG_TARGET_HAS_add2_i64 0
39
+#define TCG_TARGET_HAS_sub2_i64 0
40
41
#define TCG_TARGET_HAS_qemu_ldst_i128 0
42
43
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/riscv/tcg-target.c.inc
46
+++ b/tcg/riscv/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
48
}
49
/*
50
* Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
51
- * Used by addsub2 and movcond, which may need the negative value,
52
+ * Used by movcond, which may need the negative value,
53
* and requires the modified constant to be representable.
54
*/
55
if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
56
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
57
return false;
58
}
59
60
-static void tcg_out_addsub2(TCGContext *s,
61
- TCGReg rl, TCGReg rh,
62
- TCGReg al, TCGReg ah,
63
- TCGArg bl, TCGArg bh,
64
- bool cbl, bool cbh, bool is_sub, bool is32bit)
65
-{
66
- const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
67
- const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
68
- const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
69
- TCGReg th = TCG_REG_TMP1;
70
-
71
- /* If we have a negative constant such that negating it would
72
- make the high part zero, we can (usually) eliminate one insn. */
73
- if (cbl && cbh && bh == -1 && bl != 0) {
74
- bl = -bl;
75
- bh = 0;
76
- is_sub = !is_sub;
77
- }
78
-
79
- /* By operating on the high part first, we get to use the final
80
- carry operation to move back from the temporary. */
81
- if (!cbh) {
82
- tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
83
- } else if (bh != 0 || ah == rl) {
84
- tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
85
- } else {
86
- th = ah;
87
- }
88
-
89
- /* Note that tcg optimization should eliminate the bl == 0 case. */
90
- if (is_sub) {
91
- if (cbl) {
92
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
93
- tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
94
- } else {
95
- tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
96
- tcg_out_opc_reg(s, opc_sub, rl, al, bl);
97
- }
98
- tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
99
- } else {
100
- if (cbl) {
101
- tcg_out_opc_imm(s, opc_addi, rl, al, bl);
102
- tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
103
- } else if (al == bl) {
104
- /*
105
- * If the input regs overlap, this is a simple doubling
106
- * and carry-out is the input msb. This special case is
107
- * required when the output reg overlaps the input,
108
- * but we might as well use it always.
109
- */
110
- tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
111
- tcg_out_opc_reg(s, opc_add, rl, al, al);
112
- } else {
113
- tcg_out_opc_reg(s, opc_add, rl, al, bl);
114
- tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
115
- rl, (rl == bl ? al : bl));
116
- }
117
- tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
118
- }
119
-}
120
-
121
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
122
TCGReg dst, TCGReg src)
123
{
124
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
125
tcg_out_ldst(s, OPC_SD, a0, a1, a2);
126
break;
127
128
- case INDEX_op_add2_i32:
129
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
130
- const_args[4], const_args[5], false, true);
131
- break;
132
- case INDEX_op_add2_i64:
133
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
134
- const_args[4], const_args[5], false, false);
135
- break;
136
- case INDEX_op_sub2_i32:
137
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
138
- const_args[4], const_args[5], true, true);
139
- break;
140
- case INDEX_op_sub2_i64:
141
- tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
142
- const_args[4], const_args[5], true, false);
143
- break;
144
-
145
case INDEX_op_qemu_ld_i32:
146
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
147
break;
148
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
149
case INDEX_op_st_i64:
150
return C_O0_I2(rz, r);
151
152
- case INDEX_op_add2_i32:
153
- case INDEX_op_add2_i64:
154
- case INDEX_op_sub2_i32:
155
- case INDEX_op_sub2_i64:
156
- return C_O2_I4(r, r, rz, rz, rM, rM);
157
-
158
case INDEX_op_qemu_ld_i32:
159
case INDEX_op_qemu_ld_i64:
160
return C_O1_I1(r, r);
161
--
162
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Use per-loop variables instead of one 'i' for the function.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 23 +++++++++++------------
8
1 file changed, 11 insertions(+), 12 deletions(-)
9
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
13
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
15
int nb_temps = s->nb_temps;
16
TCGOp *op, *op_prev;
17
TCGRegSet *prefs;
18
- int i;
19
20
prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
21
- for (i = 0; i < nb_temps; ++i) {
22
+ for (int i = 0; i < nb_temps; ++i) {
23
s->temps[i].state_ptr = prefs + i;
24
}
25
26
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
27
28
/* pure functions can be removed if their result is unused */
29
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
30
- for (i = 0; i < nb_oargs; i++) {
31
+ for (int i = 0; i < nb_oargs; i++) {
32
ts = arg_temp(op->args[i]);
33
if (ts->state != TS_DEAD) {
34
goto do_not_remove_call;
35
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
36
do_not_remove_call:
37
38
/* Output args are dead. */
39
- for (i = 0; i < nb_oargs; i++) {
40
+ for (int i = 0; i < nb_oargs; i++) {
41
ts = arg_temp(op->args[i]);
42
if (ts->state & TS_DEAD) {
43
arg_life |= DEAD_ARG << i;
44
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
45
}
46
47
/* Record arguments that die in this helper. */
48
- for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
49
+ for (int i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
50
ts = arg_temp(op->args[i]);
51
if (ts->state & TS_DEAD) {
52
arg_life |= DEAD_ARG << i;
53
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
54
* order so that if a temp is used more than once, the stack
55
* reset to max happens before the register reset to 0.
56
*/
57
- for (i = nb_iargs - 1; i >= 0; i--) {
58
+ for (int i = nb_iargs - 1; i >= 0; i--) {
59
const TCGCallArgumentLoc *loc = &info->in[i];
60
ts = arg_temp(op->args[nb_oargs + i]);
61
62
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
63
* If a temp is used once, this produces a single set bit;
64
* if a temp is used multiple times, this produces a set.
65
*/
66
- for (i = 0; i < nb_iargs; i++) {
67
+ for (int i = 0; i < nb_iargs; i++) {
68
const TCGCallArgumentLoc *loc = &info->in[i];
69
ts = arg_temp(op->args[nb_oargs + i]);
70
71
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
72
its outputs are dead. We assume that nb_oargs == 0
73
implies side effects */
74
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
75
- for (i = 0; i < nb_oargs; i++) {
76
+ for (int i = 0; i < nb_oargs; i++) {
77
if (arg_temp(op->args[i])->state != TS_DEAD) {
78
goto do_not_remove;
79
}
80
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
81
break;
82
83
do_not_remove:
84
- for (i = 0; i < nb_oargs; i++) {
85
+ for (int i = 0; i < nb_oargs; i++) {
86
ts = arg_temp(op->args[i]);
87
88
/* Remember the preference of the uses that followed. */
89
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
90
}
91
92
/* Record arguments that die in this opcode. */
93
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
94
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
95
ts = arg_temp(op->args[i]);
96
if (ts->state & TS_DEAD) {
97
arg_life |= DEAD_ARG << i;
98
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
99
}
100
101
/* Input arguments are live for preceding opcodes. */
102
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
103
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
104
ts = arg_temp(op->args[i]);
105
if (ts->state & TS_DEAD) {
106
/* For operands that were dead, initially allow
107
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
108
109
default:
110
args_ct = opcode_args_ct(op);
111
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
112
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
113
const TCGArgConstraint *ct = &args_ct[i];
114
TCGRegSet set, *pset;
115
116
--
117
2.43.0
118
119
diff view generated by jsdifflib
Deleted patch
1
Sink the sets of the def, nb_iargs, nb_oargs variables to
2
the default and do_not_remove labels. They're not really
3
needed beforehand, and it avoids preceding code from having
4
to keep them up-to-date. Note that def had *not* been kept
5
up-to-date; thankfully only def->flags had been used and
6
those bits were constant between opcode changes.
7
1
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/tcg.c | 20 +++++++-------------
12
1 file changed, 7 insertions(+), 13 deletions(-)
13
14
diff --git a/tcg/tcg.c b/tcg/tcg.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg.c
17
+++ b/tcg/tcg.c
18
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
19
case INDEX_op_sub2_i64:
20
opc_new = INDEX_op_sub;
21
do_addsub2:
22
- nb_iargs = 4;
23
- nb_oargs = 2;
24
/* Test if the high part of the operation is dead, but not
25
the low part. The result can be optimized to a simple
26
add or sub. This happens often for x86_64 guest when the
27
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
28
op->args[1] = op->args[2];
29
op->args[2] = op->args[4];
30
/* Fall through and mark the single-word operation live. */
31
- nb_iargs = 2;
32
- nb_oargs = 1;
33
}
34
goto do_not_remove;
35
36
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
37
opc_new = INDEX_op_mul;
38
opc_new2 = INDEX_op_muluh;
39
do_mul2:
40
- nb_iargs = 2;
41
- nb_oargs = 2;
42
if (arg_temp(op->args[1])->state == TS_DEAD) {
43
if (arg_temp(op->args[0])->state == TS_DEAD) {
44
/* Both parts of the operation are dead. */
45
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
46
goto do_not_remove;
47
}
48
/* Mark the single-word operation live. */
49
- nb_oargs = 1;
50
goto do_not_remove;
51
52
default:
53
- /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
54
- nb_iargs = def->nb_iargs;
55
- nb_oargs = def->nb_oargs;
56
-
57
/* Test if the operation can be removed because all
58
its outputs are dead. We assume that nb_oargs == 0
59
implies side effects */
60
- if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
61
- for (int i = 0; i < nb_oargs; i++) {
62
+ def = &tcg_op_defs[opc];
63
+ if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && def->nb_oargs != 0) {
64
+ for (int i = def->nb_oargs - 1; i >= 0; i--) {
65
if (arg_temp(op->args[i])->state != TS_DEAD) {
66
goto do_not_remove;
67
}
68
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
69
break;
70
71
do_not_remove:
72
+ def = &tcg_op_defs[opc];
73
+ nb_iargs = def->nb_iargs;
74
+ nb_oargs = def->nb_oargs;
75
+
76
for (int i = 0; i < nb_oargs; i++) {
77
ts = arg_temp(op->args[i]);
78
79
--
80
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Liveness needs to track carry-live state in order to
2
determine if the (hidden) output of the opcode is used.
3
Code generation needs to track carry-live state in order
4
to avoid clobbering cpu flags when loading constants.
5
1
6
So far, output routines and backends are unchanged.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/tcg/tcg-opc.h | 10 +++
12
include/tcg/tcg.h | 13 +++-
13
tcg/optimize.c | 11 +++
14
tcg/tcg.c | 150 ++++++++++++++++++++++++++++++++++++++---
15
docs/devel/tcg-ops.rst | 61 +++++++++++++++++
16
5 files changed, 235 insertions(+), 10 deletions(-)
17
18
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/tcg/tcg-opc.h
21
+++ b/include/tcg/tcg-opc.h
22
@@ -XXX,XX +XXX,XX @@ DEF(shr, 1, 2, 0, TCG_OPF_INT)
23
DEF(sub, 1, 2, 0, TCG_OPF_INT)
24
DEF(xor, 1, 2, 0, TCG_OPF_INT)
25
26
+DEF(addco, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
27
+DEF(addc1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
28
+DEF(addci, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
29
+DEF(addcio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
30
+
31
+DEF(subbo, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
32
+DEF(subb1o, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_OUT)
33
+DEF(subbi, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN)
34
+DEF(subbio, 1, 2, 0, TCG_OPF_INT | TCG_OPF_CARRY_IN | TCG_OPF_CARRY_OUT)
35
+
36
/* load/store */
37
DEF(ld8u_i32, 1, 1, 1, 0)
38
DEF(ld8s_i32, 1, 1, 1, 0)
39
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/include/tcg/tcg.h
42
+++ b/include/tcg/tcg.h
43
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
44
MemOp riscv_cur_vsew;
45
TCGType riscv_cur_type;
46
#endif
47
+ /*
48
+ * During the tcg_reg_alloc_op loop, we are within a sequence of
49
+ * carry-using opcodes like addco+addci.
50
+ */
51
+ bool carry_live;
52
53
GHashTable *const_table[TCG_TYPE_COUNT];
54
TCGTempSet free_temps[TCG_TYPE_COUNT];
55
@@ -XXX,XX +XXX,XX @@ enum {
56
/* Instruction operands are vectors. */
57
TCG_OPF_VECTOR = 0x40,
58
/* Instruction is a conditional branch. */
59
- TCG_OPF_COND_BRANCH = 0x80
60
+ TCG_OPF_COND_BRANCH = 0x80,
61
+ /* Instruction produces carry out. */
62
+ TCG_OPF_CARRY_OUT = 0x100,
63
+ /* Instruction consumes carry in. */
64
+ TCG_OPF_CARRY_IN = 0x200,
65
};
66
67
typedef struct TCGOpDef {
68
const char *name;
69
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
70
- uint8_t flags;
71
+ uint16_t flags;
72
} TCGOpDef;
73
74
extern const TCGOpDef tcg_op_defs[];
75
diff --git a/tcg/optimize.c b/tcg/optimize.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/optimize.c
78
+++ b/tcg/optimize.c
79
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
80
return finish_folding(ctx, op);
81
}
82
83
+static bool fold_add_carry(OptContext *ctx, TCGOp *op)
84
+{
85
+ fold_commutative(ctx, op);
86
+ return finish_folding(ctx, op);
87
+}
88
+
89
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
90
{
91
bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
92
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
93
case INDEX_op_add_vec:
94
done = fold_add_vec(&ctx, op);
95
break;
96
+ case INDEX_op_addci:
97
+ case INDEX_op_addco:
98
+ case INDEX_op_addcio:
99
+ done = fold_add_carry(&ctx, op);
100
+ break;
101
CASE_OP_32_64(add2):
102
done = fold_add2(&ctx, op);
103
break;
104
diff --git a/tcg/tcg.c b/tcg/tcg.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/tcg.c
107
+++ b/tcg/tcg.c
108
@@ -XXX,XX +XXX,XX @@ liveness_pass_0(TCGContext *s)
109
}
110
}
111
112
+static void assert_carry_dead(TCGContext *s)
113
+{
114
+ /*
115
+ * Carry operations can be separated by a few insns like mov,
116
+ * load or store, but they should always be "close", and
117
+ * carry-out operations should always be paired with carry-in.
118
+ * At various boundaries, carry must have been consumed.
119
+ */
120
+ tcg_debug_assert(!s->carry_live);
121
+}
122
+
123
/* Liveness analysis : update the opc_arg_life array to tell if a
124
given input arguments is dead. Instructions updating dead
125
temporaries are removed. */
126
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
127
/* ??? Should be redundant with the exit_tb that ends the TB. */
128
la_func_end(s, nb_globals, nb_temps);
129
130
+ s->carry_live = false;
131
QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
132
int nb_iargs, nb_oargs;
133
TCGOpcode opc_new, opc_new2;
134
TCGLifeData arg_life = 0;
135
TCGTemp *ts;
136
TCGOpcode opc = op->opc;
137
- const TCGOpDef *def = &tcg_op_defs[opc];
138
+ const TCGOpDef *def;
139
const TCGArgConstraint *args_ct;
140
141
switch (opc) {
142
case INDEX_op_call:
143
+ assert_carry_dead(s);
144
{
145
const TCGHelperInfo *info = tcg_call_info(op);
146
int call_flags = tcg_call_flags(op);
147
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
148
}
149
break;
150
case INDEX_op_insn_start:
151
+ assert_carry_dead(s);
152
break;
153
case INDEX_op_discard:
154
/* mark the temporary as dead */
155
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
156
case INDEX_op_sub2_i64:
157
opc_new = INDEX_op_sub;
158
do_addsub2:
159
+ assert_carry_dead(s);
160
/* Test if the high part of the operation is dead, but not
161
the low part. The result can be optimized to a simple
162
add or sub. This happens often for x86_64 guest when the
163
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
164
opc_new = INDEX_op_mul;
165
opc_new2 = INDEX_op_muluh;
166
do_mul2:
167
+ assert_carry_dead(s);
168
if (arg_temp(op->args[1])->state == TS_DEAD) {
169
if (arg_temp(op->args[0])->state == TS_DEAD) {
170
/* Both parts of the operation are dead. */
171
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
172
/* Mark the single-word operation live. */
173
goto do_not_remove;
174
175
+ case INDEX_op_addco:
176
+ if (s->carry_live) {
177
+ goto do_not_remove;
178
+ }
179
+ op->opc = opc = INDEX_op_add;
180
+ goto do_default;
181
+
182
+ case INDEX_op_addcio:
183
+ if (s->carry_live) {
184
+ goto do_not_remove;
185
+ }
186
+ op->opc = opc = INDEX_op_addci;
187
+ goto do_default;
188
+
189
+ case INDEX_op_subbo:
190
+ if (s->carry_live) {
191
+ goto do_not_remove;
192
+ }
193
+ /* Lower to sub, but this may also require canonicalization. */
194
+ op->opc = opc = INDEX_op_sub;
195
+ ts = arg_temp(op->args[2]);
196
+ if (ts->kind == TEMP_CONST) {
197
+ ts = tcg_constant_internal(ts->type, -ts->val);
198
+ if (ts->state_ptr == NULL) {
199
+ tcg_debug_assert(temp_idx(ts) == nb_temps);
200
+ nb_temps++;
201
+ ts->state_ptr = tcg_malloc(sizeof(TCGRegSet));
202
+ ts->state = TS_DEAD;
203
+ la_reset_pref(ts);
204
+ }
205
+ op->args[2] = temp_arg(ts);
206
+ op->opc = opc = INDEX_op_add;
207
+ }
208
+ goto do_default;
209
+
210
+ case INDEX_op_subbio:
211
+ if (s->carry_live) {
212
+ goto do_not_remove;
213
+ }
214
+ op->opc = opc = INDEX_op_subbi;
215
+ goto do_default;
216
+
217
+ case INDEX_op_addc1o:
218
+ if (s->carry_live) {
219
+ goto do_not_remove;
220
+ }
221
+ /* Lower to add, add +1. */
222
+ op_prev = tcg_op_insert_before(s, op, INDEX_op_add,
223
+ TCGOP_TYPE(op), 3);
224
+ op_prev->args[0] = op->args[0];
225
+ op_prev->args[1] = op->args[1];
226
+ op_prev->args[2] = op->args[2];
227
+ op->opc = opc = INDEX_op_add;
228
+ op->args[1] = op->args[0];
229
+ ts = arg_temp(op->args[0]);
230
+ ts = tcg_constant_internal(ts->type, 1);
231
+ op->args[2] = temp_arg(ts);
232
+ goto do_default;
233
+
234
+ case INDEX_op_subb1o:
235
+ if (s->carry_live) {
236
+ goto do_not_remove;
237
+ }
238
+ /* Lower to sub, add -1. */
239
+ op_prev = tcg_op_insert_before(s, op, INDEX_op_sub,
240
+ TCGOP_TYPE(op), 3);
241
+ op_prev->args[0] = op->args[0];
242
+ op_prev->args[1] = op->args[1];
243
+ op_prev->args[2] = op->args[2];
244
+ op->opc = opc = INDEX_op_add;
245
+ op->args[1] = op->args[0];
246
+ ts = arg_temp(op->args[0]);
247
+ ts = tcg_constant_internal(ts->type, -1);
248
+ op->args[2] = temp_arg(ts);
249
+ goto do_default;
250
+
251
default:
252
- /* Test if the operation can be removed because all
253
- its outputs are dead. We assume that nb_oargs == 0
254
- implies side effects */
255
+ do_default:
256
+ /*
257
+ * Test if the operation can be removed because all
258
+ * its outputs are dead. We assume that nb_oargs == 0
259
+ * implies side effects.
260
+ */
261
def = &tcg_op_defs[opc];
262
if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && def->nb_oargs != 0) {
263
for (int i = def->nb_oargs - 1; i >= 0; i--) {
264
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
265
266
/* If end of basic block, update. */
267
if (def->flags & TCG_OPF_BB_EXIT) {
268
+ assert_carry_dead(s);
269
la_func_end(s, nb_globals, nb_temps);
270
} else if (def->flags & TCG_OPF_COND_BRANCH) {
271
+ assert_carry_dead(s);
272
la_bb_sync(s, nb_globals, nb_temps);
273
} else if (def->flags & TCG_OPF_BB_END) {
274
+ assert_carry_dead(s);
275
la_bb_end(s, nb_globals, nb_temps);
276
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
277
+ assert_carry_dead(s);
278
la_global_sync(s, nb_globals);
279
if (def->flags & TCG_OPF_CALL_CLOBBER) {
280
la_cross_call(s, nb_temps);
281
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
282
arg_life |= DEAD_ARG << i;
283
}
284
}
285
+ if (def->flags & TCG_OPF_CARRY_OUT) {
286
+ s->carry_live = false;
287
+ }
288
289
/* Input arguments are live for preceding opcodes. */
290
for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
291
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
292
ts->state &= ~TS_DEAD;
293
}
294
}
295
+ if (def->flags & TCG_OPF_CARRY_IN) {
296
+ s->carry_live = true;
297
+ }
298
299
/* Incorporate constraints for this operand. */
300
switch (opc) {
301
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
302
}
303
op->life = arg_life;
304
}
305
+ assert_carry_dead(s);
306
}
307
308
/* Liveness analysis: Convert indirect regs to direct temporaries. */
309
@@ -XXX,XX +XXX,XX @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
310
all globals are stored at their canonical location. */
311
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
312
{
313
- int i;
314
-
315
- for (i = s->nb_globals; i < s->nb_temps; i++) {
316
+ assert_carry_dead(s);
317
+ for (int i = s->nb_globals; i < s->nb_temps; i++) {
318
TCGTemp *ts = &s->temps[i];
319
320
switch (ts->kind) {
321
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
322
*/
323
static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
324
{
325
+ assert_carry_dead(s);
326
sync_globals(s, allocated_regs);
327
328
for (int i = s->nb_globals; i < s->nb_temps; i++) {
329
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
330
int const_args[TCG_MAX_OP_ARGS];
331
TCGCond op_cond;
332
333
+ if (def->flags & TCG_OPF_CARRY_IN) {
334
+ tcg_debug_assert(s->carry_live);
335
+ }
336
+
337
nb_oargs = def->nb_oargs;
338
nb_iargs = def->nb_iargs;
339
340
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
341
tcg_reg_alloc_bb_end(s, i_allocated_regs);
342
} else {
343
if (def->flags & TCG_OPF_CALL_CLOBBER) {
344
+ assert_carry_dead(s);
345
/* XXX: permit generic clobber register list ? */
346
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
347
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
348
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
349
350
case INDEX_op_sub:
351
{
352
- const TCGOutOpSubtract *out = &outop_sub;
353
+ const TCGOutOpSubtract *out =
354
+ container_of(all_outop[op->opc], TCGOutOpSubtract, base);
355
356
/*
357
* Constants should never appear in the second source operand.
358
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
359
}
360
break;
361
362
+ case INDEX_op_addco:
363
+ case INDEX_op_subbo:
364
+ case INDEX_op_addci:
365
+ case INDEX_op_subbi:
366
+ case INDEX_op_addcio:
367
+ case INDEX_op_subbio:
368
+ case INDEX_op_addc1o:
369
+ case INDEX_op_subb1o:
370
+ g_assert_not_reached();
371
+
372
case INDEX_op_bswap64:
373
case INDEX_op_ext_i32_i64:
374
case INDEX_op_extu_i32_i64:
375
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
376
break;
377
}
378
379
+ if (def->flags & TCG_OPF_CARRY_IN) {
380
+ s->carry_live = false;
381
+ }
382
+ if (def->flags & TCG_OPF_CARRY_OUT) {
383
+ s->carry_live = true;
384
+ }
385
+
386
/* move the outputs in the correct register if needed */
387
for(i = 0; i < nb_oargs; i++) {
388
ts = arg_temp(op->args[i]);
389
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
390
tcg_out_tb_start(s);
391
392
num_insns = -1;
393
+ s->carry_live = false;
394
QTAILQ_FOREACH(op, &s->ops, link) {
395
TCGOpcode opc = op->opc;
396
397
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
398
tcg_reg_alloc_dup(s, op);
399
break;
400
case INDEX_op_insn_start:
401
+ assert_carry_dead(s);
402
if (num_insns >= 0) {
403
size_t off = tcg_current_code_size(s);
404
s->gen_insn_end_off[num_insns] = off;
405
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
406
tcg_out_label(s, arg_label(op->args[0]));
407
break;
408
case INDEX_op_call:
409
+ assert_carry_dead(s);
410
tcg_reg_alloc_call(s, op);
411
break;
412
case INDEX_op_exit_tb:
413
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
414
return -2;
415
}
416
}
417
+ assert_carry_dead(s);
418
+
419
tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
420
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
421
422
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
423
index XXXXXXX..XXXXXXX 100644
424
--- a/docs/devel/tcg-ops.rst
425
+++ b/docs/devel/tcg-ops.rst
426
@@ -XXX,XX +XXX,XX @@ Multiword arithmetic support
427
428
.. list-table::
429
430
+ * - addco *t0*, *t1*, *t2*
431
+
432
+ - | Compute *t0* = *t1* + *t2* and in addition output to the
433
+ carry bit provided by the host architecture.
434
+
435
+ * - addci *t0, *t1*, *t2*
436
+
437
+ - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the
438
+ input carry bit provided by the host architecture.
439
+ The output carry bit need not be computed.
440
+
441
+ * - addcio *t0, *t1*, *t2*
442
+
443
+ - | Compute *t0* = *t1* + *t2* + *C*, where *C* is the
444
+ input carry bit provided by the host architecture,
445
+ and also compute the output carry bit.
446
+
447
+ * - addc1o *t0, *t1*, *t2*
448
+
449
+ - | Compute *t0* = *t1* + *t2* + 1, and in addition output to the
450
+ carry bit provided by the host architecture. This is akin to
451
+ *addcio* with a fixed carry-in value of 1.
452
+ | This is intended to be used by the optimization pass,
453
+ intermediate to complete folding of the addition chain.
454
+ In some cases complete folding is not possible and this
455
+ opcode will remain until output. If this happens, the
456
+ code generator will use ``tcg_out_set_carry`` and then
457
+ the output routine for *addcio*.
458
+
459
+ * - subbo *t0*, *t1*, *t2*
460
+
461
+ - | Compute *t0* = *t1* - *t2* and in addition output to the
462
+ borrow bit provided by the host architecture.
463
+ | Depending on the host architecture, the carry bit may or may not be
464
+ identical to the borrow bit. Thus the addc\* and subb\*
465
+ opcodes must not be mixed.
466
+
467
+ * - subbi *t0, *t1*, *t2*
468
+
469
+ - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the
470
+ input borrow bit provided by the host architecture.
471
+ The output borrow bit need not be computed.
472
+
473
+ * - subbio *t0, *t1*, *t2*
474
+
475
+ - | Compute *t0* = *t1* - *t2* - *B*, where *B* is the
476
+ input borrow bit provided by the host architecture,
477
+ and also compute the output borrow bit.
478
+
479
+ * - subb1o *t0, *t1*, *t2*
480
+
481
+ - | Compute *t0* = *t1* - *t2* - 1, and in addition output to the
482
+ borrow bit provided by the host architecture. This is akin to
483
+ *subbio* with a fixed borrow-in value of 1.
484
+ | This is intended to be used by the optimization pass,
485
+ intermediate to complete folding of the subtraction chain.
486
+ In some cases complete folding is not possible and this
487
+ opcode will remain until output. If this happens, the
488
+ code generator will use ``tcg_out_set_borrow`` and then
489
+ the output routine for *subbio*.
490
+
491
* - add2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
492
493
sub2_i32/i64 *t0_low*, *t0_high*, *t1_low*, *t1_high*, *t2_low*, *t2_high*
494
--
495
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 61 +++++++++++++++++++++++++++-----
5
tcg/aarch64/tcg-target.c.inc | 34 ++++++++++++++++++
6
tcg/arm/tcg-target.c.inc | 34 ++++++++++++++++++
7
tcg/i386/tcg-target.c.inc | 34 ++++++++++++++++++
8
tcg/loongarch64/tcg-target.c.inc | 34 ++++++++++++++++++
9
tcg/mips/tcg-target.c.inc | 34 ++++++++++++++++++
10
tcg/ppc/tcg-target.c.inc | 34 ++++++++++++++++++
11
tcg/riscv/tcg-target.c.inc | 34 ++++++++++++++++++
12
tcg/s390x/tcg-target.c.inc | 34 ++++++++++++++++++
13
tcg/sparc64/tcg-target.c.inc | 34 ++++++++++++++++++
14
tcg/tci/tcg-target.c.inc | 34 ++++++++++++++++++
15
11 files changed, 393 insertions(+), 8 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
22
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
23
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
24
static void tcg_out_goto_tb(TCGContext *s, int which);
25
+static void tcg_out_set_carry(TCGContext *s);
26
+static void tcg_out_set_borrow(TCGContext *s);
27
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
28
const TCGArg args[TCG_MAX_OP_ARGS],
29
const int const_args[TCG_MAX_OP_ARGS]);
30
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOp {
31
TCGConstraintSetIndex (*dynamic_constraint)(TCGType type, unsigned flags);
32
} TCGOutOp;
33
34
+typedef struct TCGOutOpAddSubCarry {
35
+ TCGOutOp base;
36
+ void (*out_rrr)(TCGContext *s, TCGType type,
37
+ TCGReg a0, TCGReg a1, TCGReg a2);
38
+ void (*out_rri)(TCGContext *s, TCGType type,
39
+ TCGReg a0, TCGReg a1, tcg_target_long a2);
40
+ void (*out_rir)(TCGContext *s, TCGType type,
41
+ TCGReg a0, tcg_target_long a1, TCGReg a2);
42
+ void (*out_rii)(TCGContext *s, TCGType type,
43
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2);
44
+} TCGOutOpAddSubCarry;
45
+
46
typedef struct TCGOutOpBinary {
47
TCGOutOp base;
48
void (*out_rrr)(TCGContext *s, TCGType type,
49
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpUnary outop_extrl_i64_i32 = {
50
/* Register allocation descriptions for every TCGOpcode. */
51
static const TCGOutOp * const all_outop[NB_OPS] = {
52
OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
53
+ OUTOP(INDEX_op_addci, TCGOutOpAddSubCarry, outop_addci),
54
+ OUTOP(INDEX_op_addcio, TCGOutOpBinary, outop_addcio),
55
+ OUTOP(INDEX_op_addco, TCGOutOpBinary, outop_addco),
56
+ /* addc1o is implemented with set_carry + addcio */
57
+ OUTOP(INDEX_op_addc1o, TCGOutOpBinary, outop_addcio),
58
OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and),
59
OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc),
60
OUTOP(INDEX_op_brcond, TCGOutOpBrcond, outop_brcond),
61
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
62
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
63
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
64
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
65
+ OUTOP(INDEX_op_subbi, TCGOutOpAddSubCarry, outop_subbi),
66
+ OUTOP(INDEX_op_subbio, TCGOutOpAddSubCarry, outop_subbio),
67
+ OUTOP(INDEX_op_subbo, TCGOutOpAddSubCarry, outop_subbo),
68
+ /* subb1o is implemented with set_borrow + subbio */
69
+ OUTOP(INDEX_op_subb1o, TCGOutOpAddSubCarry, outop_subbio),
70
OUTOP(INDEX_op_xor, TCGOutOpBinary, outop_xor),
71
72
#if TCG_TARGET_REG_BITS == 32
73
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
74
/* emit instruction */
75
TCGType type = TCGOP_TYPE(op);
76
switch (op->opc) {
77
+ case INDEX_op_addc1o:
78
+ tcg_out_set_carry(s);
79
+ /* fall through */
80
case INDEX_op_add:
81
+ case INDEX_op_addcio:
82
+ case INDEX_op_addco:
83
case INDEX_op_and:
84
case INDEX_op_andc:
85
case INDEX_op_clz:
86
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
87
88
case INDEX_op_sub:
89
{
90
- const TCGOutOpSubtract *out =
91
- container_of(all_outop[op->opc], TCGOutOpSubtract, base);
92
+ const TCGOutOpSubtract *out = &outop_sub;
93
94
/*
95
* Constants should never appear in the second source operand.
96
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
97
}
98
break;
99
100
- case INDEX_op_addco:
101
- case INDEX_op_subbo:
102
+ case INDEX_op_subb1o:
103
+ tcg_out_set_borrow(s);
104
+ /* fall through */
105
case INDEX_op_addci:
106
case INDEX_op_subbi:
107
- case INDEX_op_addcio:
108
case INDEX_op_subbio:
109
- case INDEX_op_addc1o:
110
- case INDEX_op_subb1o:
111
- g_assert_not_reached();
112
+ case INDEX_op_subbo:
113
+ {
114
+ const TCGOutOpAddSubCarry *out =
115
+ container_of(all_outop[op->opc], TCGOutOpAddSubCarry, base);
116
+
117
+ if (const_args[2]) {
118
+ if (const_args[1]) {
119
+ out->out_rii(s, type, new_args[0],
120
+ new_args[1], new_args[2]);
121
+ } else {
122
+ out->out_rri(s, type, new_args[0],
123
+ new_args[1], new_args[2]);
124
+ }
125
+ } else if (const_args[1]) {
126
+ out->out_rir(s, type, new_args[0], new_args[1], new_args[2]);
127
+ } else {
128
+ out->out_rrr(s, type, new_args[0], new_args[1], new_args[2]);
129
+ }
130
+ }
131
+ break;
132
133
case INDEX_op_bswap64:
134
case INDEX_op_ext_i32_i64:
135
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/aarch64/tcg-target.c.inc
138
+++ b/tcg/aarch64/tcg-target.c.inc
139
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
140
.out_rri = tgen_addi,
141
};
142
143
+static const TCGOutOpBinary outop_addco = {
144
+ .base.static_constraint = C_NotImplemented,
145
+};
146
+
147
+static const TCGOutOpAddSubCarry outop_addci = {
148
+ .base.static_constraint = C_NotImplemented,
149
+};
150
+
151
+static const TCGOutOpBinary outop_addcio = {
152
+ .base.static_constraint = C_NotImplemented,
153
+};
154
+
155
+static void tcg_out_set_carry(TCGContext *s)
156
+{
157
+ g_assert_not_reached();
158
+}
159
+
160
static void tgen_and(TCGContext *s, TCGType type,
161
TCGReg a0, TCGReg a1, TCGReg a2)
162
{
163
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
164
.out_rrr = tgen_sub,
165
};
166
167
+static const TCGOutOpAddSubCarry outop_subbo = {
168
+ .base.static_constraint = C_NotImplemented,
169
+};
170
+
171
+static const TCGOutOpAddSubCarry outop_subbi = {
172
+ .base.static_constraint = C_NotImplemented,
173
+};
174
+
175
+static const TCGOutOpAddSubCarry outop_subbio = {
176
+ .base.static_constraint = C_NotImplemented,
177
+};
178
+
179
+static void tcg_out_set_borrow(TCGContext *s)
180
+{
181
+ g_assert_not_reached();
182
+}
183
+
184
static void tgen_xor(TCGContext *s, TCGType type,
185
TCGReg a0, TCGReg a1, TCGReg a2)
186
{
187
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
188
index XXXXXXX..XXXXXXX 100644
189
--- a/tcg/arm/tcg-target.c.inc
190
+++ b/tcg/arm/tcg-target.c.inc
191
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
192
.out_rri = tgen_addi,
193
};
194
195
+static const TCGOutOpBinary outop_addco = {
196
+ .base.static_constraint = C_NotImplemented,
197
+};
198
+
199
+static const TCGOutOpAddSubCarry outop_addci = {
200
+ .base.static_constraint = C_NotImplemented,
201
+};
202
+
203
+static const TCGOutOpBinary outop_addcio = {
204
+ .base.static_constraint = C_NotImplemented,
205
+};
206
+
207
+static void tcg_out_set_carry(TCGContext *s)
208
+{
209
+ g_assert_not_reached();
210
+}
211
+
212
static void tgen_and(TCGContext *s, TCGType type,
213
TCGReg a0, TCGReg a1, TCGReg a2)
214
{
215
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
216
.out_rir = tgen_subfi,
217
};
218
219
+static const TCGOutOpAddSubCarry outop_subbo = {
220
+ .base.static_constraint = C_NotImplemented,
221
+};
222
+
223
+static const TCGOutOpAddSubCarry outop_subbi = {
224
+ .base.static_constraint = C_NotImplemented,
225
+};
226
+
227
+static const TCGOutOpAddSubCarry outop_subbio = {
228
+ .base.static_constraint = C_NotImplemented,
229
+};
230
+
231
+static void tcg_out_set_borrow(TCGContext *s)
232
+{
233
+ g_assert_not_reached();
234
+}
235
+
236
static void tgen_xor(TCGContext *s, TCGType type,
237
TCGReg a0, TCGReg a1, TCGReg a2)
238
{
239
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/tcg/i386/tcg-target.c.inc
242
+++ b/tcg/i386/tcg-target.c.inc
243
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
244
.out_rri = tgen_addi,
245
};
246
247
+static const TCGOutOpBinary outop_addco = {
248
+ .base.static_constraint = C_NotImplemented,
249
+};
250
+
251
+static const TCGOutOpAddSubCarry outop_addci = {
252
+ .base.static_constraint = C_NotImplemented,
253
+};
254
+
255
+static const TCGOutOpBinary outop_addcio = {
256
+ .base.static_constraint = C_NotImplemented,
257
+};
258
+
259
+static void tcg_out_set_carry(TCGContext *s)
260
+{
261
+ g_assert_not_reached();
262
+}
263
+
264
static void tgen_and(TCGContext *s, TCGType type,
265
TCGReg a0, TCGReg a1, TCGReg a2)
266
{
267
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
268
.out_rrr = tgen_sub,
269
};
270
271
+static const TCGOutOpAddSubCarry outop_subbo = {
272
+ .base.static_constraint = C_NotImplemented,
273
+};
274
+
275
+static const TCGOutOpAddSubCarry outop_subbi = {
276
+ .base.static_constraint = C_NotImplemented,
277
+};
278
+
279
+static const TCGOutOpAddSubCarry outop_subbio = {
280
+ .base.static_constraint = C_NotImplemented,
281
+};
282
+
283
+static void tcg_out_set_borrow(TCGContext *s)
284
+{
285
+ g_assert_not_reached();
286
+}
287
+
288
static void tgen_xor(TCGContext *s, TCGType type,
289
TCGReg a0, TCGReg a1, TCGReg a2)
290
{
291
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
292
index XXXXXXX..XXXXXXX 100644
293
--- a/tcg/loongarch64/tcg-target.c.inc
294
+++ b/tcg/loongarch64/tcg-target.c.inc
295
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
296
.out_rri = tcg_out_addi,
297
};
298
299
+static const TCGOutOpBinary outop_addco = {
300
+ .base.static_constraint = C_NotImplemented,
301
+};
302
+
303
+static const TCGOutOpAddSubCarry outop_addci = {
304
+ .base.static_constraint = C_NotImplemented,
305
+};
306
+
307
+static const TCGOutOpBinary outop_addcio = {
308
+ .base.static_constraint = C_NotImplemented,
309
+};
310
+
311
+static void tcg_out_set_carry(TCGContext *s)
312
+{
313
+ g_assert_not_reached();
314
+}
315
+
316
static void tgen_and(TCGContext *s, TCGType type,
317
TCGReg a0, TCGReg a1, TCGReg a2)
318
{
319
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
320
.out_rrr = tgen_sub,
321
};
322
323
+static const TCGOutOpAddSubCarry outop_subbo = {
324
+ .base.static_constraint = C_NotImplemented,
325
+};
326
+
327
+static const TCGOutOpAddSubCarry outop_subbi = {
328
+ .base.static_constraint = C_NotImplemented,
329
+};
330
+
331
+static const TCGOutOpAddSubCarry outop_subbio = {
332
+ .base.static_constraint = C_NotImplemented,
333
+};
334
+
335
+static void tcg_out_set_borrow(TCGContext *s)
336
+{
337
+ g_assert_not_reached();
338
+}
339
+
340
static void tgen_xor(TCGContext *s, TCGType type,
341
TCGReg a0, TCGReg a1, TCGReg a2)
342
{
343
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
344
index XXXXXXX..XXXXXXX 100644
345
--- a/tcg/mips/tcg-target.c.inc
346
+++ b/tcg/mips/tcg-target.c.inc
347
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
348
.out_rri = tgen_addi,
349
};
350
351
+static const TCGOutOpBinary outop_addco = {
352
+ .base.static_constraint = C_NotImplemented,
353
+};
354
+
355
+static const TCGOutOpAddSubCarry outop_addci = {
356
+ .base.static_constraint = C_NotImplemented,
357
+};
358
+
359
+static const TCGOutOpBinary outop_addcio = {
360
+ .base.static_constraint = C_NotImplemented,
361
+};
362
+
363
+static void tcg_out_set_carry(TCGContext *s)
364
+{
365
+ g_assert_not_reached();
366
+}
367
+
368
static void tgen_and(TCGContext *s, TCGType type,
369
TCGReg a0, TCGReg a1, TCGReg a2)
370
{
371
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
372
.out_rrr = tgen_sub,
373
};
374
375
+static const TCGOutOpAddSubCarry outop_subbo = {
376
+ .base.static_constraint = C_NotImplemented,
377
+};
378
+
379
+static const TCGOutOpAddSubCarry outop_subbi = {
380
+ .base.static_constraint = C_NotImplemented,
381
+};
382
+
383
+static const TCGOutOpAddSubCarry outop_subbio = {
384
+ .base.static_constraint = C_NotImplemented,
385
+};
386
+
387
+static void tcg_out_set_borrow(TCGContext *s)
388
+{
389
+ g_assert_not_reached();
390
+}
391
+
392
static void tgen_xor(TCGContext *s, TCGType type,
393
TCGReg a0, TCGReg a1, TCGReg a2)
394
{
395
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
396
index XXXXXXX..XXXXXXX 100644
397
--- a/tcg/ppc/tcg-target.c.inc
398
+++ b/tcg/ppc/tcg-target.c.inc
399
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
400
.out_rri = tgen_addi,
401
};
402
403
+static const TCGOutOpBinary outop_addco = {
404
+ .base.static_constraint = C_NotImplemented,
405
+};
406
+
407
+static const TCGOutOpAddSubCarry outop_addci = {
408
+ .base.static_constraint = C_NotImplemented,
409
+};
410
+
411
+static const TCGOutOpBinary outop_addcio = {
412
+ .base.static_constraint = C_NotImplemented,
413
+};
414
+
415
+static void tcg_out_set_carry(TCGContext *s)
416
+{
417
+ g_assert_not_reached();
418
+}
419
+
420
static void tgen_and(TCGContext *s, TCGType type,
421
TCGReg a0, TCGReg a1, TCGReg a2)
422
{
423
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
424
.out_rir = tgen_subfi,
425
};
426
427
+static const TCGOutOpAddSubCarry outop_subbo = {
428
+ .base.static_constraint = C_NotImplemented,
429
+};
430
+
431
+static const TCGOutOpAddSubCarry outop_subbi = {
432
+ .base.static_constraint = C_NotImplemented,
433
+};
434
+
435
+static const TCGOutOpAddSubCarry outop_subbio = {
436
+ .base.static_constraint = C_NotImplemented,
437
+};
438
+
439
+static void tcg_out_set_borrow(TCGContext *s)
440
+{
441
+ g_assert_not_reached();
442
+}
443
+
444
static void tgen_xor(TCGContext *s, TCGType type,
445
TCGReg a0, TCGReg a1, TCGReg a2)
446
{
447
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
448
index XXXXXXX..XXXXXXX 100644
449
--- a/tcg/riscv/tcg-target.c.inc
450
+++ b/tcg/riscv/tcg-target.c.inc
451
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
452
.out_rri = tgen_addi,
453
};
454
455
+static const TCGOutOpBinary outop_addco = {
456
+ .base.static_constraint = C_NotImplemented,
457
+};
458
+
459
+static const TCGOutOpAddSubCarry outop_addci = {
460
+ .base.static_constraint = C_NotImplemented,
461
+};
462
+
463
+static const TCGOutOpBinary outop_addcio = {
464
+ .base.static_constraint = C_NotImplemented,
465
+};
466
+
467
+static void tcg_out_set_carry(TCGContext *s)
468
+{
469
+ g_assert_not_reached();
470
+}
471
+
472
static void tgen_and(TCGContext *s, TCGType type,
473
TCGReg a0, TCGReg a1, TCGReg a2)
474
{
475
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
476
.out_rrr = tgen_sub,
477
};
478
479
+static const TCGOutOpAddSubCarry outop_subbo = {
480
+ .base.static_constraint = C_NotImplemented,
481
+};
482
+
483
+static const TCGOutOpAddSubCarry outop_subbi = {
484
+ .base.static_constraint = C_NotImplemented,
485
+};
486
+
487
+static const TCGOutOpAddSubCarry outop_subbio = {
488
+ .base.static_constraint = C_NotImplemented,
489
+};
490
+
491
+static void tcg_out_set_borrow(TCGContext *s)
492
+{
493
+ g_assert_not_reached();
494
+}
495
+
496
static void tgen_xor(TCGContext *s, TCGType type,
497
TCGReg a0, TCGReg a1, TCGReg a2)
498
{
499
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
500
index XXXXXXX..XXXXXXX 100644
501
--- a/tcg/s390x/tcg-target.c.inc
502
+++ b/tcg/s390x/tcg-target.c.inc
503
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
504
.out_rri = tgen_addi,
505
};
506
507
+static const TCGOutOpBinary outop_addco = {
508
+ .base.static_constraint = C_NotImplemented,
509
+};
510
+
511
+static const TCGOutOpAddSubCarry outop_addci = {
512
+ .base.static_constraint = C_NotImplemented,
513
+};
514
+
515
+static const TCGOutOpBinary outop_addcio = {
516
+ .base.static_constraint = C_NotImplemented,
517
+};
518
+
519
+static void tcg_out_set_carry(TCGContext *s)
520
+{
521
+ g_assert_not_reached();
522
+}
523
+
524
static void tgen_and(TCGContext *s, TCGType type,
525
TCGReg a0, TCGReg a1, TCGReg a2)
526
{
527
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
528
.out_rrr = tgen_sub,
529
};
530
531
+static const TCGOutOpAddSubCarry outop_subbo = {
532
+ .base.static_constraint = C_NotImplemented,
533
+};
534
+
535
+static const TCGOutOpAddSubCarry outop_subbi = {
536
+ .base.static_constraint = C_NotImplemented,
537
+};
538
+
539
+static const TCGOutOpAddSubCarry outop_subbio = {
540
+ .base.static_constraint = C_NotImplemented,
541
+};
542
+
543
+static void tcg_out_set_borrow(TCGContext *s)
544
+{
545
+ g_assert_not_reached();
546
+}
547
+
548
static void tgen_xor(TCGContext *s, TCGType type,
549
TCGReg a0, TCGReg a1, TCGReg a2)
550
{
551
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
552
index XXXXXXX..XXXXXXX 100644
553
--- a/tcg/sparc64/tcg-target.c.inc
554
+++ b/tcg/sparc64/tcg-target.c.inc
555
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
556
.out_rri = tgen_addi,
557
};
558
559
+static const TCGOutOpBinary outop_addco = {
560
+ .base.static_constraint = C_NotImplemented,
561
+};
562
+
563
+static const TCGOutOpAddSubCarry outop_addci = {
564
+ .base.static_constraint = C_NotImplemented,
565
+};
566
+
567
+static const TCGOutOpBinary outop_addcio = {
568
+ .base.static_constraint = C_NotImplemented,
569
+};
570
+
571
+static void tcg_out_set_carry(TCGContext *s)
572
+{
573
+ g_assert_not_reached();
574
+}
575
+
576
static void tgen_and(TCGContext *s, TCGType type,
577
TCGReg a0, TCGReg a1, TCGReg a2)
578
{
579
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
580
.out_rrr = tgen_sub,
581
};
582
583
+static const TCGOutOpAddSubCarry outop_subbo = {
584
+ .base.static_constraint = C_NotImplemented,
585
+};
586
+
587
+static const TCGOutOpAddSubCarry outop_subbi = {
588
+ .base.static_constraint = C_NotImplemented,
589
+};
590
+
591
+static const TCGOutOpAddSubCarry outop_subbio = {
592
+ .base.static_constraint = C_NotImplemented,
593
+};
594
+
595
+static void tcg_out_set_borrow(TCGContext *s)
596
+{
597
+ g_assert_not_reached();
598
+}
599
+
600
static void tgen_xor(TCGContext *s, TCGType type,
601
TCGReg a0, TCGReg a1, TCGReg a2)
602
{
603
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
604
index XXXXXXX..XXXXXXX 100644
605
--- a/tcg/tci/tcg-target.c.inc
606
+++ b/tcg/tci/tcg-target.c.inc
607
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
608
.out_rrr = tgen_add,
609
};
610
611
+static const TCGOutOpBinary outop_addco = {
612
+ .base.static_constraint = C_NotImplemented,
613
+};
614
+
615
+static const TCGOutOpAddSubCarry outop_addci = {
616
+ .base.static_constraint = C_NotImplemented,
617
+};
618
+
619
+static const TCGOutOpBinary outop_addcio = {
620
+ .base.static_constraint = C_NotImplemented,
621
+};
622
+
623
+static void tcg_out_set_carry(TCGContext *s)
624
+{
625
+ g_assert_not_reached();
626
+}
627
+
628
static void tgen_and(TCGContext *s, TCGType type,
629
TCGReg a0, TCGReg a1, TCGReg a2)
630
{
631
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
632
.out_rrr = tgen_sub,
633
};
634
635
+static const TCGOutOpAddSubCarry outop_subbo = {
636
+ .base.static_constraint = C_NotImplemented,
637
+};
638
+
639
+static const TCGOutOpAddSubCarry outop_subbi = {
640
+ .base.static_constraint = C_NotImplemented,
641
+};
642
+
643
+static const TCGOutOpAddSubCarry outop_subbio = {
644
+ .base.static_constraint = C_NotImplemented,
645
+};
646
+
647
+static void tcg_out_set_borrow(TCGContext *s)
648
+{
649
+ g_assert_not_reached();
650
+}
651
+
652
static void tgen_xor(TCGContext *s, TCGType type,
653
TCGReg a0, TCGReg a1, TCGReg a2)
654
{
655
--
656
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Propagate known carry when possible, and simplify the opcodes
2
to not require carry-in when known. The result will be cleaned
3
up further by the subsequent liveness analysis pass.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 319 ++++++++++++++++++++++++++++++++++++++++++++++++-
9
1 file changed, 316 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
16
17
/* In flight values from optimization. */
18
TCGType type;
19
+ int carry_state; /* -1 = non-constant, {0,1} = constant carry-in */
20
} OptContext;
21
22
static inline TempOptInfo *ts_info(TCGTemp *ts)
23
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
24
* 3) those that produce information about the result value.
25
*/
26
27
+static bool fold_addco(OptContext *ctx, TCGOp *op);
28
static bool fold_or(OptContext *ctx, TCGOp *op);
29
static bool fold_orc(OptContext *ctx, TCGOp *op);
30
+static bool fold_subbo(OptContext *ctx, TCGOp *op);
31
static bool fold_xor(OptContext *ctx, TCGOp *op);
32
33
static bool fold_add(OptContext *ctx, TCGOp *op)
34
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
35
return finish_folding(ctx, op);
36
}
37
38
-static bool fold_add_carry(OptContext *ctx, TCGOp *op)
39
+static void squash_prev_carryout(OptContext *ctx, TCGOp *op)
40
+{
41
+ TempOptInfo *t2;
42
+
43
+ op = QTAILQ_PREV(op, link);
44
+ switch (op->opc) {
45
+ case INDEX_op_addco:
46
+ op->opc = INDEX_op_add;
47
+ fold_add(ctx, op);
48
+ break;
49
+ case INDEX_op_addcio:
50
+ op->opc = INDEX_op_addci;
51
+ break;
52
+ case INDEX_op_addc1o:
53
+ op->opc = INDEX_op_add;
54
+ t2 = arg_info(op->args[2]);
55
+ if (ti_is_const(t2)) {
56
+ op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
57
+ /* Perform other constant folding, if needed. */
58
+ fold_add(ctx, op);
59
+ } else {
60
+ TCGArg ret = op->args[0];
61
+ op = opt_insert_after(ctx, op, INDEX_op_add, 3);
62
+ op->args[0] = ret;
63
+ op->args[1] = ret;
64
+ op->args[2] = arg_new_constant(ctx, 1);
65
+ }
66
+ break;
67
+ default:
68
+ g_assert_not_reached();
69
+ }
70
+}
71
+
72
+static bool fold_addci(OptContext *ctx, TCGOp *op)
73
{
74
fold_commutative(ctx, op);
75
+
76
+ if (ctx->carry_state < 0) {
77
+ return finish_folding(ctx, op);
78
+ }
79
+
80
+ squash_prev_carryout(ctx, op);
81
+ op->opc = INDEX_op_add;
82
+
83
+ if (ctx->carry_state > 0) {
84
+ TempOptInfo *t2 = arg_info(op->args[2]);
85
+
86
+ /*
87
+ * Propagate the known carry-in into a constant, if possible.
88
+ * Otherwise emit a second add +1.
89
+ */
90
+ if (ti_is_const(t2)) {
91
+ op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
92
+ } else {
93
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_add, 3);
94
+
95
+ op2->args[0] = op->args[0];
96
+ op2->args[1] = op->args[1];
97
+ op2->args[2] = op->args[2];
98
+ fold_add(ctx, op2);
99
+
100
+ op->args[1] = op->args[0];
101
+ op->args[2] = arg_new_constant(ctx, 1);
102
+ }
103
+ }
104
+
105
+ ctx->carry_state = -1;
106
+ return fold_add(ctx, op);
107
+}
108
+
109
+static bool fold_addcio(OptContext *ctx, TCGOp *op)
110
+{
111
+ TempOptInfo *t1, *t2;
112
+ int carry_out = -1;
113
+ uint64_t sum, max;
114
+
115
+ fold_commutative(ctx, op);
116
+ t1 = arg_info(op->args[1]);
117
+ t2 = arg_info(op->args[2]);
118
+
119
+ /*
120
+ * The z_mask value is >= the maximum value that can be represented
121
+ * with the known zero bits. So adding the z_mask values will not
122
+ * overflow if and only if the true values cannot overflow.
123
+ */
124
+ if (!uadd64_overflow(t1->z_mask, t2->z_mask, &sum) &&
125
+ !uadd64_overflow(sum, ctx->carry_state != 0, &sum)) {
126
+ carry_out = 0;
127
+ }
128
+
129
+ if (ctx->carry_state < 0) {
130
+ ctx->carry_state = carry_out;
131
+ return finish_folding(ctx, op);
132
+ }
133
+
134
+ squash_prev_carryout(ctx, op);
135
+ if (ctx->carry_state == 0) {
136
+ goto do_addco;
137
+ }
138
+
139
+ /* Propagate the known carry-in into a constant, if possible. */
140
+ max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
141
+ if (ti_is_const(t2)) {
142
+ uint64_t v = ti_const_val(t2) & max;
143
+ if (v < max) {
144
+ op->args[2] = arg_new_constant(ctx, v + 1);
145
+ goto do_addco;
146
+ }
147
+ /* max + known carry in produces known carry out. */
148
+ carry_out = 1;
149
+ }
150
+ if (ti_is_const(t1)) {
151
+ uint64_t v = ti_const_val(t1) & max;
152
+ if (v < max) {
153
+ op->args[1] = arg_new_constant(ctx, v + 1);
154
+ goto do_addco;
155
+ }
156
+ carry_out = 1;
157
+ }
158
+
159
+ /* Adjust the opcode to remember the known carry-in. */
160
+ op->opc = INDEX_op_addc1o;
161
+ ctx->carry_state = carry_out;
162
+ return finish_folding(ctx, op);
163
+
164
+ do_addco:
165
+ op->opc = INDEX_op_addco;
166
+ return fold_addco(ctx, op);
167
+}
168
+
169
+static bool fold_addco(OptContext *ctx, TCGOp *op)
170
+{
171
+ TempOptInfo *t1, *t2;
172
+ int carry_out = -1;
173
+ uint64_t ign;
174
+
175
+ fold_commutative(ctx, op);
176
+ t1 = arg_info(op->args[1]);
177
+ t2 = arg_info(op->args[2]);
178
+
179
+ if (ti_is_const(t2)) {
180
+ uint64_t v2 = ti_const_val(t2);
181
+
182
+ if (ti_is_const(t1)) {
183
+ uint64_t v1 = ti_const_val(t1);
184
+ /* Given sign-extension of z_mask for I32, we need not truncate. */
185
+ carry_out = uadd64_overflow(v1, v2, &ign);
186
+ } else if (v2 == 0) {
187
+ carry_out = 0;
188
+ }
189
+ } else {
190
+ /*
191
+ * The z_mask value is >= the maximum value that can be represented
192
+ * with the known zero bits. So adding the z_mask values will not
193
+ * overflow if and only if the true values cannot overflow.
194
+ */
195
+ if (!uadd64_overflow(t1->z_mask, t2->z_mask, &ign)) {
196
+ carry_out = 0;
197
+ }
198
+ }
199
+ ctx->carry_state = carry_out;
200
return finish_folding(ctx, op);
201
}
202
203
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
204
return fold_addsub2(ctx, op, false);
205
}
206
207
+static void squash_prev_borrowout(OptContext *ctx, TCGOp *op)
208
+{
209
+ TempOptInfo *t2;
210
+
211
+ op = QTAILQ_PREV(op, link);
212
+ switch (op->opc) {
213
+ case INDEX_op_subbo:
214
+ op->opc = INDEX_op_sub;
215
+ fold_sub(ctx, op);
216
+ break;
217
+ case INDEX_op_subbio:
218
+ op->opc = INDEX_op_subbi;
219
+ break;
220
+ case INDEX_op_subb1o:
221
+ t2 = arg_info(op->args[2]);
222
+ if (ti_is_const(t2)) {
223
+ op->opc = INDEX_op_add;
224
+ op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
225
+ /* Perform other constant folding, if needed. */
226
+ fold_add(ctx, op);
227
+ } else {
228
+ TCGArg ret = op->args[0];
229
+ op->opc = INDEX_op_sub;
230
+ op = opt_insert_after(ctx, op, INDEX_op_add, 3);
231
+ op->args[0] = ret;
232
+ op->args[1] = ret;
233
+ op->args[2] = arg_new_constant(ctx, -1);
234
+ }
235
+ break;
236
+ default:
237
+ g_assert_not_reached();
238
+ }
239
+}
240
+
241
+static bool fold_subbi(OptContext *ctx, TCGOp *op)
242
+{
243
+ TempOptInfo *t2;
244
+ int borrow_in = ctx->carry_state;
245
+
246
+ if (borrow_in < 0) {
247
+ return finish_folding(ctx, op);
248
+ }
249
+ ctx->carry_state = -1;
250
+
251
+ squash_prev_borrowout(ctx, op);
252
+ if (borrow_in == 0) {
253
+ op->opc = INDEX_op_sub;
254
+ return fold_sub(ctx, op);
255
+ }
256
+
257
+ /*
258
+ * Propagate the known carry-in into any constant, then negate to
259
+ * transform from sub to add. If there is no constant, emit a
260
+ * separate add -1.
261
+ */
262
+ t2 = arg_info(op->args[2]);
263
+ if (ti_is_const(t2)) {
264
+ op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
265
+ } else {
266
+ TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_sub, 3);
267
+
268
+ op2->args[0] = op->args[0];
269
+ op2->args[1] = op->args[1];
270
+ op2->args[2] = op->args[2];
271
+ fold_sub(ctx, op2);
272
+
273
+ op->args[1] = op->args[0];
274
+ op->args[2] = arg_new_constant(ctx, -1);
275
+ }
276
+ op->opc = INDEX_op_add;
277
+ return fold_add(ctx, op);
278
+}
279
+
280
+static bool fold_subbio(OptContext *ctx, TCGOp *op)
281
+{
282
+ TempOptInfo *t1, *t2;
283
+ int borrow_out = -1;
284
+
285
+ if (ctx->carry_state < 0) {
286
+ return finish_folding(ctx, op);
287
+ }
288
+
289
+ squash_prev_borrowout(ctx, op);
290
+ if (ctx->carry_state == 0) {
291
+ goto do_subbo;
292
+ }
293
+
294
+ t1 = arg_info(op->args[1]);
295
+ t2 = arg_info(op->args[2]);
296
+
297
+ /* Propagate the known borrow-in into a constant, if possible. */
298
+ if (ti_is_const(t2)) {
299
+ uint64_t max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
300
+ uint64_t v = ti_const_val(t2) & max;
301
+
302
+ if (v < max) {
303
+ op->args[2] = arg_new_constant(ctx, v + 1);
304
+ goto do_subbo;
305
+ }
306
+ /* subtracting max + 1 produces known borrow out. */
307
+ borrow_out = 1;
308
+ }
309
+ if (ti_is_const(t1)) {
310
+ uint64_t v = ti_const_val(t1);
311
+ if (v != 0) {
312
+ op->args[2] = arg_new_constant(ctx, v - 1);
313
+ goto do_subbo;
314
+ }
315
+ }
316
+
317
+ /* Adjust the opcode to remember the known carry-in. */
318
+ op->opc = INDEX_op_subb1o;
319
+ ctx->carry_state = borrow_out;
320
+ return finish_folding(ctx, op);
321
+
322
+ do_subbo:
323
+ op->opc = INDEX_op_subbo;
324
+ return fold_subbo(ctx, op);
325
+}
326
+
327
+static bool fold_subbo(OptContext *ctx, TCGOp *op)
328
+{
329
+ TempOptInfo *t1 = arg_info(op->args[1]);
330
+ TempOptInfo *t2 = arg_info(op->args[2]);
331
+ int borrow_out = -1;
332
+
333
+ if (ti_is_const(t2)) {
334
+ uint64_t v2 = ti_const_val(t2);
335
+ if (v2 == 0) {
336
+ borrow_out = 0;
337
+ } else if (ti_is_const(t1)) {
338
+ uint64_t v1 = ti_const_val(t1);
339
+ borrow_out = v1 < v2;
340
+ }
341
+ }
342
+ ctx->carry_state = borrow_out;
343
+ return finish_folding(ctx, op);
344
+}
345
+
346
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
347
{
348
uint64_t z_mask = -1, s_mask = 0;
349
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
350
done = fold_add_vec(&ctx, op);
351
break;
352
case INDEX_op_addci:
353
- case INDEX_op_addco:
354
+ done = fold_addci(&ctx, op);
355
+ break;
356
case INDEX_op_addcio:
357
- done = fold_add_carry(&ctx, op);
358
+ done = fold_addcio(&ctx, op);
359
+ break;
360
+ case INDEX_op_addco:
361
+ done = fold_addco(&ctx, op);
362
break;
363
CASE_OP_32_64(add2):
364
done = fold_add2(&ctx, op);
365
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
366
case INDEX_op_sub:
367
done = fold_sub(&ctx, op);
368
break;
369
+ case INDEX_op_subbi:
370
+ done = fold_subbi(&ctx, op);
371
+ break;
372
+ case INDEX_op_subbio:
373
+ done = fold_subbio(&ctx, op);
374
+ break;
375
+ case INDEX_op_subbo:
376
+ done = fold_subbo(&ctx, op);
377
+ break;
378
case INDEX_op_sub_vec:
379
done = fold_sub_vec(&ctx, op);
380
break;
381
--
382
2.43.0
diff view generated by jsdifflib
Deleted patch
1
For most binary operands, two const operands fold.
2
However, the add/sub carry opcodes have a third input.
3
Prefer "reg, zero, const" since many risc hosts have a
4
zero register that can fit a "reg, reg, const" insn format.
5
1
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 18 ++++++++++++------
10
1 file changed, 12 insertions(+), 6 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
17
18
#define NO_DEST temp_arg(NULL)
19
20
+static int pref_commutative(TempOptInfo *ti)
21
+{
22
+ /* Slight preference for non-zero constants second. */
23
+ return !ti_is_const(ti) ? 0 : ti_const_val(ti) ? 3 : 2;
24
+}
25
+
26
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
27
{
28
TCGArg a1 = *p1, a2 = *p2;
29
int sum = 0;
30
- sum += arg_is_const(a1);
31
- sum -= arg_is_const(a2);
32
+ sum += pref_commutative(arg_info(a1));
33
+ sum -= pref_commutative(arg_info(a2));
34
35
/* Prefer the constant in second argument, and then the form
36
op a, a, b, which is better handled on non-RISC hosts. */
37
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
38
static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
39
{
40
int sum = 0;
41
- sum += arg_is_const(p1[0]);
42
- sum += arg_is_const(p1[1]);
43
- sum -= arg_is_const(p2[0]);
44
- sum -= arg_is_const(p2[1]);
45
+ sum += pref_commutative(arg_info(p1[0]));
46
+ sum += pref_commutative(arg_info(p1[1]));
47
+ sum -= pref_commutative(arg_info(p2[0]));
48
+ sum -= pref_commutative(arg_info(p2[1]));
49
if (sum > 0) {
50
TCGArg t;
51
t = p1[0], p1[0] = p2[0], p2[0] = t;
52
--
53
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg-op.c | 29 +++++++++++++++++++++++++++--
5
1 file changed, 27 insertions(+), 2 deletions(-)
6
1
7
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tcg-op.c
10
+++ b/tcg/tcg-op.c
11
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
12
void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
13
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
14
{
15
- if (TCG_TARGET_HAS_add2_i32) {
16
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
17
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
18
+ tcg_gen_op3_i32(INDEX_op_addco, t0, al, bl);
19
+ tcg_gen_op3_i32(INDEX_op_addci, rh, ah, bh);
20
+ tcg_gen_mov_i32(rl, t0);
21
+ tcg_temp_free_i32(t0);
22
+ } else if (TCG_TARGET_HAS_add2_i32) {
23
tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh);
24
} else {
25
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
26
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
27
void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
28
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
29
{
30
- if (TCG_TARGET_HAS_add2_i64) {
31
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_REG, 0)) {
32
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
33
+
34
+ if (TCG_TARGET_REG_BITS == 32) {
35
+ tcg_gen_op3_i32(INDEX_op_addco, TCGV_LOW(t0),
36
+ TCGV_LOW(al), TCGV_LOW(bl));
37
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(t0),
38
+ TCGV_HIGH(al), TCGV_HIGH(bl));
39
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(rh),
40
+ TCGV_LOW(ah), TCGV_LOW(bh));
41
+ tcg_gen_op3_i32(INDEX_op_addci, TCGV_HIGH(rh),
42
+ TCGV_HIGH(ah), TCGV_HIGH(bh));
43
+ } else {
44
+ tcg_gen_op3_i64(INDEX_op_addco, t0, al, bl);
45
+ tcg_gen_op3_i64(INDEX_op_addci, rh, ah, bh);
46
+ }
47
+
48
+ tcg_gen_mov_i64(rl, t0);
49
+ tcg_temp_free_i64(t0);
50
+ } else if (TCG_TARGET_HAS_add2_i64) {
51
tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh);
52
} else {
53
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
54
--
55
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg-op.c | 29 +++++++++++++++++++++++++++--
5
1 file changed, 27 insertions(+), 2 deletions(-)
6
1
7
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tcg-op.c
10
+++ b/tcg/tcg-op.c
11
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
12
void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
13
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
14
{
15
- if (TCG_TARGET_HAS_sub2_i32) {
16
+ if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_I32, 0)) {
17
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
18
+ tcg_gen_op3_i32(INDEX_op_subbo, t0, al, bl);
19
+ tcg_gen_op3_i32(INDEX_op_subbi, rh, ah, bh);
20
+ tcg_gen_mov_i32(rl, t0);
21
+ tcg_temp_free_i32(t0);
22
+ } else if (TCG_TARGET_HAS_sub2_i32) {
23
tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh);
24
} else {
25
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
26
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
27
void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
28
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
29
{
30
- if (TCG_TARGET_HAS_sub2_i64) {
31
+ if (tcg_op_supported(INDEX_op_subbi, TCG_TYPE_REG, 0)) {
32
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
33
+
34
+ if (TCG_TARGET_REG_BITS == 32) {
35
+ tcg_gen_op3_i32(INDEX_op_subbo, TCGV_LOW(t0),
36
+ TCGV_LOW(al), TCGV_LOW(bl));
37
+ tcg_gen_op3_i32(INDEX_op_subbio, TCGV_HIGH(t0),
38
+ TCGV_HIGH(al), TCGV_HIGH(bl));
39
+ tcg_gen_op3_i32(INDEX_op_subbio, TCGV_LOW(rh),
40
+ TCGV_LOW(ah), TCGV_LOW(bh));
41
+ tcg_gen_op3_i32(INDEX_op_subbi, TCGV_HIGH(rh),
42
+ TCGV_HIGH(ah), TCGV_HIGH(bh));
43
+ } else {
44
+ tcg_gen_op3_i64(INDEX_op_subbo, t0, al, bl);
45
+ tcg_gen_op3_i64(INDEX_op_subbi, rh, ah, bh);
46
+ }
47
+
48
+ tcg_gen_mov_i64(rl, t0);
49
+ tcg_temp_free_i64(t0);
50
+ } else if (TCG_TARGET_HAS_sub2_i64) {
51
tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh);
52
} else {
53
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
54
--
55
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Do not clobber flags if they're live. Required in order
2
to perform register allocation on add/sub carry opcodes.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type,
15
{
16
tcg_target_long diff;
17
18
- if (arg == 0) {
19
+ if (arg == 0 && !s->carry_live) {
20
tgen_arithr(s, ARITH_XOR, ret, ret);
21
return;
22
}
23
--
24
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/i386/tcg-target-con-set.h | 1 -
5
tcg/i386/tcg-target-has.h | 8 +--
6
tcg/i386/tcg-target.c.inc | 117 +++++++++++++++++++++-------------
7
3 files changed, 76 insertions(+), 50 deletions(-)
8
1
9
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target-con-set.h
12
+++ b/tcg/i386/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O2_I1(r, r, L)
14
C_O2_I2(a, d, a, r)
15
C_O2_I2(r, r, L, L)
16
C_O2_I3(a, d, 0, 1, r)
17
-C_N1_O1_I4(r, r, 0, 1, re, re)
18
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/i386/tcg-target-has.h
21
+++ b/tcg/i386/tcg-target-has.h
22
@@ -XXX,XX +XXX,XX @@
23
#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
24
25
/* optional instructions */
26
-#define TCG_TARGET_HAS_add2_i32 1
27
-#define TCG_TARGET_HAS_sub2_i32 1
28
+#define TCG_TARGET_HAS_add2_i32 0
29
+#define TCG_TARGET_HAS_sub2_i32 0
30
31
#if TCG_TARGET_REG_BITS == 64
32
/* Keep 32-bit values zero-extended in a register. */
33
#define TCG_TARGET_HAS_extr_i64_i32 1
34
-#define TCG_TARGET_HAS_add2_i64 1
35
-#define TCG_TARGET_HAS_sub2_i64 1
36
+#define TCG_TARGET_HAS_add2_i64 0
37
+#define TCG_TARGET_HAS_sub2_i64 0
38
#define TCG_TARGET_HAS_qemu_st8_i32 0
39
#else
40
#define TCG_TARGET_HAS_qemu_st8_i32 1
41
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/i386/tcg-target.c.inc
44
+++ b/tcg/i386/tcg-target.c.inc
45
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
46
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
47
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
48
#define OPC_SHRD_Ib (0xac | P_EXT)
49
+#define OPC_STC (0xf9)
50
#define OPC_TESTB    (0x84)
51
#define OPC_TESTL    (0x85)
52
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
53
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
54
.out_rri = tgen_addi,
55
};
56
57
+static void tgen_addco(TCGContext *s, TCGType type,
58
+ TCGReg a0, TCGReg a1, TCGReg a2)
59
+{
60
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
61
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a2);
62
+}
63
+
64
+static void tgen_addco_imm(TCGContext *s, TCGType type,
65
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
66
+{
67
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
68
+ tgen_arithi(s, ARITH_ADD + rexw, a0, a2, true);
69
+}
70
+
71
static const TCGOutOpBinary outop_addco = {
72
- .base.static_constraint = C_NotImplemented,
73
+ .base.static_constraint = C_O1_I2(r, 0, re),
74
+ .out_rrr = tgen_addco,
75
+ .out_rri = tgen_addco_imm,
76
+};
77
+
78
+static void tgen_addcio(TCGContext *s, TCGType type,
79
+ TCGReg a0, TCGReg a1, TCGReg a2)
80
+{
81
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
82
+ tgen_arithr(s, ARITH_ADC + rexw, a0, a2);
83
+}
84
+
85
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
86
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
87
+{
88
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
89
+ tgen_arithi(s, ARITH_ADC + rexw, a0, a2, true);
90
+}
91
+
92
+static const TCGOutOpBinary outop_addcio = {
93
+ .base.static_constraint = C_O1_I2(r, 0, re),
94
+ .out_rrr = tgen_addcio,
95
+ .out_rri = tgen_addcio_imm,
96
};
97
98
static const TCGOutOpAddSubCarry outop_addci = {
99
- .base.static_constraint = C_NotImplemented,
100
-};
101
-
102
-static const TCGOutOpBinary outop_addcio = {
103
- .base.static_constraint = C_NotImplemented,
104
+ .base.static_constraint = C_O1_I2(r, 0, re),
105
+ .out_rrr = tgen_addcio,
106
+ .out_rri = tgen_addcio_imm,
107
};
108
109
static void tcg_out_set_carry(TCGContext *s)
110
{
111
- g_assert_not_reached();
112
+ tcg_out8(s, OPC_STC);
113
}
114
115
static void tgen_and(TCGContext *s, TCGType type,
116
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_shr = {
117
};
118
119
static void tgen_sub(TCGContext *s, TCGType type,
120
- TCGReg a0, TCGReg a1, TCGReg a2)
121
+ TCGReg a0, TCGReg a1, TCGReg a2)
122
{
123
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
124
tgen_arithr(s, ARITH_SUB + rexw, a0, a2);
125
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
126
.out_rrr = tgen_sub,
127
};
128
129
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
130
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
131
+{
132
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
133
+ tgen_arithi(s, ARITH_SUB + rexw, a0, a2, 1);
134
+}
135
+
136
static const TCGOutOpAddSubCarry outop_subbo = {
137
- .base.static_constraint = C_NotImplemented,
138
+ .base.static_constraint = C_O1_I2(r, 0, re),
139
+ .out_rrr = tgen_sub,
140
+ .out_rri = tgen_subbo_rri,
141
};
142
143
-static const TCGOutOpAddSubCarry outop_subbi = {
144
- .base.static_constraint = C_NotImplemented,
145
-};
146
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
147
+ TCGReg a0, TCGReg a1, TCGReg a2)
148
+{
149
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
150
+ tgen_arithr(s, ARITH_SBB + rexw, a0, a2);
151
+}
152
+
153
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
154
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
155
+{
156
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
157
+ tgen_arithi(s, ARITH_SBB + rexw, a0, a2, 1);
158
+}
159
160
static const TCGOutOpAddSubCarry outop_subbio = {
161
- .base.static_constraint = C_NotImplemented,
162
+ .base.static_constraint = C_O1_I2(r, 0, re),
163
+ .out_rrr = tgen_subbio_rrr,
164
+ .out_rri = tgen_subbio_rri,
165
};
166
167
+#define outop_subbi outop_subbio
168
+
169
static void tcg_out_set_borrow(TCGContext *s)
170
{
171
- g_assert_not_reached();
172
+ tcg_out8(s, OPC_STC);
173
}
174
175
static void tgen_xor(TCGContext *s, TCGType type,
176
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
177
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
178
break;
179
180
- OP_32_64(add2):
181
- if (const_args[4]) {
182
- tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
183
- } else {
184
- tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
185
- }
186
- if (const_args[5]) {
187
- tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
188
- } else {
189
- tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
190
- }
191
- break;
192
- OP_32_64(sub2):
193
- if (const_args[4]) {
194
- tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
195
- } else {
196
- tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
197
- }
198
- if (const_args[5]) {
199
- tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
200
- } else {
201
- tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
202
- }
203
- break;
204
-
205
#if TCG_TARGET_REG_BITS == 64
206
case INDEX_op_ld32s_i64:
207
tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
208
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
209
case INDEX_op_st_i64:
210
return C_O0_I2(re, r);
211
212
- case INDEX_op_add2_i32:
213
- case INDEX_op_add2_i64:
214
- case INDEX_op_sub2_i32:
215
- case INDEX_op_sub2_i64:
216
- return C_N1_O1_I4(r, r, 0, 1, re, re);
217
-
218
case INDEX_op_qemu_ld_i32:
219
return C_O1_I1(r, L);
220
221
--
222
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Using addci with two zeros as input in order to capture the value
2
of the carry-in bit is common. Special case this with sbb+neg so
3
that we do not have to load 0 into a register first.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target-con-set.h | 1 +
9
tcg/i386/tcg-target.c.inc | 46 ++++++++++++++++++++++++++++++++---
10
2 files changed, 44 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target-con-set.h
15
+++ b/tcg/i386/tcg-target-con-set.h
16
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, L, L)
17
C_O1_I2(r, r, r)
18
C_O1_I2(r, r, re)
19
C_O1_I2(r, r, ri)
20
+C_O1_I2(r, rO, re)
21
C_O1_I2(x, x, x)
22
C_N1_I2(r, r, r)
23
C_N1_I2(r, r, rW)
24
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/i386/tcg-target.c.inc
27
+++ b/tcg/i386/tcg-target.c.inc
28
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_addcio = {
29
.out_rri = tgen_addcio_imm,
30
};
31
32
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
33
+ TCGReg a0, TCGReg a1, TCGReg a2)
34
+{
35
+ /* Because "0O" is not a valid constraint, we must match ourselves. */
36
+ if (a0 == a2) {
37
+ tgen_addcio(s, type, a0, a0, a1);
38
+ } else {
39
+ tcg_out_mov(s, type, a0, a1);
40
+ tgen_addcio(s, type, a0, a0, a2);
41
+ }
42
+}
43
+
44
+static void tgen_addci_rri(TCGContext *s, TCGType type,
45
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
46
+{
47
+ tcg_out_mov(s, type, a0, a1);
48
+ tgen_addcio_imm(s, type, a0, a0, a2);
49
+}
50
+
51
+static void tgen_addci_rir(TCGContext *s, TCGType type,
52
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
53
+{
54
+ tgen_addci_rri(s, type, a0, a2, a1);
55
+}
56
+
57
+static void tgen_addci_rii(TCGContext *s, TCGType type, TCGReg a0,
58
+ tcg_target_long a1, tcg_target_long a2)
59
+{
60
+ if (a2 == 0) {
61
+ /* Implement 0 + 0 + C with -(x - x - c). */
62
+ tgen_arithr(s, ARITH_SBB, a0, a0);
63
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, a0);
64
+ } else {
65
+ tcg_out_movi(s, type, a0, a2);
66
+ tgen_addcio_imm(s, type, a0, a0, a1);
67
+ }
68
+}
69
+
70
static const TCGOutOpAddSubCarry outop_addci = {
71
- .base.static_constraint = C_O1_I2(r, 0, re),
72
- .out_rrr = tgen_addcio,
73
- .out_rri = tgen_addcio_imm,
74
+ .base.static_constraint = C_O1_I2(r, rO, re),
75
+ .out_rrr = tgen_addci_rrr,
76
+ .out_rri = tgen_addci_rri,
77
+ .out_rir = tgen_addci_rir,
78
+ .out_rii = tgen_addci_rii,
79
};
80
81
static void tcg_out_set_carry(TCGContext *s)
82
--
83
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Create a function for performing an add with carry-in
2
and producing carry out. The carry-out result is boolean.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op-common.h | 4 ++
8
include/tcg/tcg-op.h | 2 +
9
tcg/tcg-op.c | 95 +++++++++++++++++++++++++++++++++++++
10
3 files changed, 101 insertions(+)
11
12
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg-op-common.h
15
+++ b/include/tcg/tcg-op-common.h
16
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
17
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
18
void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
19
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
20
+void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co,
21
+ TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci);
22
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
23
void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
24
void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
25
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
26
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
27
void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
28
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
29
+void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co,
30
+ TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci);
31
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
32
void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
33
void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
34
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/tcg/tcg-op.h
37
+++ b/include/tcg/tcg-op.h
38
@@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
39
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
40
#define tcg_gen_add2_tl tcg_gen_add2_i64
41
#define tcg_gen_sub2_tl tcg_gen_sub2_i64
42
+#define tcg_gen_addcio_tl tcg_gen_addcio_i64
43
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64
44
#define tcg_gen_muls2_tl tcg_gen_muls2_i64
45
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i64
46
@@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
47
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
48
#define tcg_gen_add2_tl tcg_gen_add2_i32
49
#define tcg_gen_sub2_tl tcg_gen_sub2_i32
50
+#define tcg_gen_addcio_tl tcg_gen_addcio_i32
51
#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32
52
#define tcg_gen_muls2_tl tcg_gen_muls2_i32
53
#define tcg_gen_mulsu2_tl tcg_gen_mulsu2_i32
54
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/tcg-op.c
57
+++ b/tcg/tcg-op.c
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
59
}
60
}
61
62
+void tcg_gen_addcio_i32(TCGv_i32 r, TCGv_i32 co,
63
+ TCGv_i32 a, TCGv_i32 b, TCGv_i32 ci)
64
+{
65
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
66
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
67
+ TCGv_i32 zero = tcg_constant_i32(0);
68
+ TCGv_i32 mone = tcg_constant_i32(-1);
69
+
70
+ tcg_gen_op3_i32(INDEX_op_addco, t0, ci, mone);
71
+ tcg_gen_op3_i32(INDEX_op_addcio, r, a, b);
72
+ tcg_gen_op3_i32(INDEX_op_addci, co, zero, zero);
73
+ tcg_temp_free_i32(t0);
74
+ } else {
75
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
76
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
77
+
78
+ tcg_gen_add_i32(t0, a, b);
79
+ tcg_gen_setcond_i32(TCG_COND_LTU, t1, t0, a);
80
+ tcg_gen_add_i32(r, t0, ci);
81
+ tcg_gen_setcond_i32(TCG_COND_LTU, t0, r, t0);
82
+ tcg_gen_or_i32(co, t0, t1);
83
+
84
+ tcg_temp_free_i32(t0);
85
+ tcg_temp_free_i32(t1);
86
+ }
87
+}
88
+
89
void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
90
TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh)
91
{
92
@@ -XXX,XX +XXX,XX @@ void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
93
}
94
}
95
96
+void tcg_gen_addcio_i64(TCGv_i64 r, TCGv_i64 co,
97
+ TCGv_i64 a, TCGv_i64 b, TCGv_i64 ci)
98
+{
99
+ if (TCG_TARGET_REG_BITS == 64) {
100
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I64, 0)) {
101
+ TCGv_i64 discard = tcg_temp_ebb_new_i64();
102
+ TCGv_i64 zero = tcg_constant_i64(0);
103
+ TCGv_i64 mone = tcg_constant_i64(-1);
104
+
105
+ tcg_gen_op3_i64(INDEX_op_addco, discard, ci, mone);
106
+ tcg_gen_op3_i64(INDEX_op_addcio, r, a, b);
107
+ tcg_gen_op3_i64(INDEX_op_addci, co, zero, zero);
108
+ tcg_temp_free_i64(discard);
109
+ } else {
110
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
111
+ TCGv_i64 t1 = tcg_temp_ebb_new_i64();
112
+
113
+ tcg_gen_add_i64(t0, a, b);
114
+ tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, a);
115
+ tcg_gen_add_i64(r, t0, ci);
116
+ tcg_gen_setcond_i64(TCG_COND_LTU, t0, r, t0);
117
+ tcg_gen_or_i64(co, t0, t1);
118
+
119
+ tcg_temp_free_i64(t0);
120
+ tcg_temp_free_i64(t1);
121
+ }
122
+ } else {
123
+ if (tcg_op_supported(INDEX_op_addci, TCG_TYPE_I32, 0)) {
124
+ TCGv_i32 discard = tcg_temp_ebb_new_i32();
125
+ TCGv_i32 zero = tcg_constant_i32(0);
126
+ TCGv_i32 mone = tcg_constant_i32(-1);
127
+
128
+ tcg_gen_op3_i32(INDEX_op_addco, discard, TCGV_LOW(ci), mone);
129
+ tcg_gen_op3_i32(INDEX_op_addcio, discard, TCGV_HIGH(ci), mone);
130
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_LOW(r),
131
+ TCGV_LOW(a), TCGV_LOW(b));
132
+ tcg_gen_op3_i32(INDEX_op_addcio, TCGV_HIGH(r),
133
+ TCGV_HIGH(a), TCGV_HIGH(b));
134
+ tcg_gen_op3_i32(INDEX_op_addci, TCGV_LOW(co), zero, zero);
135
+ tcg_temp_free_i32(discard);
136
+ } else {
137
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
138
+ TCGv_i32 c0 = tcg_temp_ebb_new_i32();
139
+ TCGv_i32 c1 = tcg_temp_ebb_new_i32();
140
+
141
+ tcg_gen_or_i32(c1, TCGV_LOW(ci), TCGV_HIGH(ci));
142
+ tcg_gen_setcondi_i32(TCG_COND_NE, c1, c1, 0);
143
+
144
+ tcg_gen_add_i32(t0, TCGV_LOW(a), TCGV_LOW(b));
145
+ tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_LOW(a));
146
+ tcg_gen_add_i32(TCGV_LOW(r), t0, c1);
147
+ tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_LOW(r), c1);
148
+ tcg_gen_or_i32(c1, c1, c0);
149
+
150
+ tcg_gen_add_i32(t0, TCGV_HIGH(a), TCGV_HIGH(b));
151
+ tcg_gen_setcond_i32(TCG_COND_LTU, c0, t0, TCGV_HIGH(a));
152
+ tcg_gen_add_i32(TCGV_HIGH(r), t0, c1);
153
+ tcg_gen_setcond_i32(TCG_COND_LTU, c1, TCGV_HIGH(r), c1);
154
+ tcg_gen_or_i32(TCGV_LOW(co), c0, c1);
155
+
156
+ tcg_temp_free_i32(t0);
157
+ tcg_temp_free_i32(c0);
158
+ tcg_temp_free_i32(c1);
159
+ }
160
+ tcg_gen_movi_i32(TCGV_HIGH(co), 0);
161
+ }
162
+}
163
+
164
void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
165
TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
166
{
167
--
168
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/arm/tcg/translate-a64.c | 8 ++------
6
target/arm/tcg/translate.c | 17 +++--------------
7
2 files changed, 5 insertions(+), 20 deletions(-)
8
1
9
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/tcg/translate-a64.c
12
+++ b/target/arm/tcg/translate-a64.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
14
TCGv_i64 cf_64 = tcg_temp_new_i64();
15
TCGv_i64 vf_64 = tcg_temp_new_i64();
16
TCGv_i64 tmp = tcg_temp_new_i64();
17
- TCGv_i64 zero = tcg_constant_i64(0);
18
19
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
20
- tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
21
- tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
22
+ tcg_gen_addcio_i64(result, cf_64, t0, t1, cf_64);
23
tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
24
gen_set_NZ64(result);
25
26
@@ -XXX,XX +XXX,XX @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
27
TCGv_i32 t0_32 = tcg_temp_new_i32();
28
TCGv_i32 t1_32 = tcg_temp_new_i32();
29
TCGv_i32 tmp = tcg_temp_new_i32();
30
- TCGv_i32 zero = tcg_constant_i32(0);
31
32
tcg_gen_extrl_i64_i32(t0_32, t0);
33
tcg_gen_extrl_i64_i32(t1_32, t1);
34
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
35
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
36
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0_32, t1_32, cpu_CF);
37
38
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
39
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
40
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/arm/tcg/translate.c
43
+++ b/target/arm/tcg/translate.c
44
@@ -XXX,XX +XXX,XX @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
45
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
46
{
47
TCGv_i32 tmp = tcg_temp_new_i32();
48
- if (tcg_op_supported(INDEX_op_add2_i32, TCG_TYPE_I32, 0)) {
49
- tcg_gen_movi_i32(tmp, 0);
50
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
51
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
52
- } else {
53
- TCGv_i64 q0 = tcg_temp_new_i64();
54
- TCGv_i64 q1 = tcg_temp_new_i64();
55
- tcg_gen_extu_i32_i64(q0, t0);
56
- tcg_gen_extu_i32_i64(q1, t1);
57
- tcg_gen_add_i64(q0, q0, q1);
58
- tcg_gen_extu_i32_i64(q1, cpu_CF);
59
- tcg_gen_add_i64(q0, q0, q1);
60
- tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
61
- }
62
+
63
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF);
64
+
65
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
66
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
67
tcg_gen_xor_i32(tmp, t0, t1);
68
--
69
2.43.0
70
71
diff view generated by jsdifflib
Deleted patch
1
Use this in do_add, do_sub, and do_ds, all of which need
2
add with carry-in and carry-out.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hppa/translate.c | 17 ++++++-----------
9
1 file changed, 6 insertions(+), 11 deletions(-)
10
11
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hppa/translate.c
14
+++ b/target/hppa/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
16
cb_msb = tcg_temp_new_i64();
17
cb = tcg_temp_new_i64();
18
19
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
20
if (is_c) {
21
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
22
- get_psw_carry(ctx, d), ctx->zero);
23
+ tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d));
24
+ } else {
25
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
26
}
27
tcg_gen_xor_i64(cb, in1, in2);
28
tcg_gen_xor_i64(cb, cb, dest);
29
@@ -XXX,XX +XXX,XX @@ static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
30
if (is_b) {
31
/* DEST,C = IN1 + ~IN2 + C. */
32
tcg_gen_not_i64(cb, in2);
33
- tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
34
- get_psw_carry(ctx, d), ctx->zero);
35
- tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
36
+ tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d));
37
tcg_gen_xor_i64(cb, cb, in1);
38
tcg_gen_xor_i64(cb, cb, dest);
39
} else {
40
@@ -XXX,XX +XXX,XX @@ static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
41
tcg_gen_xor_i64(add2, in2, addc);
42
tcg_gen_andi_i64(addc, addc, 1);
43
44
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
45
- tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
46
- addc, ctx->zero);
47
+ tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc);
48
49
/* Write back the result register. */
50
save_gpr(ctx, a->t, dest);
51
@@ -XXX,XX +XXX,XX @@ static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
52
TCGv_i64 cb = tcg_temp_new_i64();
53
TCGv_i64 cb_msb = tcg_temp_new_i64();
54
55
- tcg_gen_movi_i64(cb_msb, 0);
56
- tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
57
+ tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
58
tcg_gen_xor_i64(cb, in1, in2);
59
tcg_gen_xor_i64(cb, cb, dest);
60
cb_cond = get_carry(ctx, d, cb, cb_msb);
61
--
62
2.43.0
63
64
diff view generated by jsdifflib
Deleted patch
1
Use this in gen_addc and gen_rsubc, both of which need
2
add with carry-in and carry-out.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/microblaze/translate.c | 10 ++--------
9
1 file changed, 2 insertions(+), 8 deletions(-)
10
11
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/microblaze/translate.c
14
+++ b/target/microblaze/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
16
/* Input and output carry. */
17
static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
18
{
19
- TCGv_i32 zero = tcg_constant_i32(0);
20
- TCGv_i32 tmp = tcg_temp_new_i32();
21
-
22
- tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
23
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
24
+ tcg_gen_addcio_i32(out, cpu_msr_c, ina, inb, cpu_msr_c);
25
}
26
27
/* Input carry, but no output carry. */
28
@@ -XXX,XX +XXX,XX @@ static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
29
/* Input and output carry. */
30
static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
31
{
32
- TCGv_i32 zero = tcg_constant_i32(0);
33
TCGv_i32 tmp = tcg_temp_new_i32();
34
35
tcg_gen_not_i32(tmp, ina);
36
- tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
37
- tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
38
+ tcg_gen_addcio_i32(out, cpu_msr_c, tmp, inb, cpu_msr_c);
39
}
40
41
/* No input or output carry. */
42
--
43
2.43.0
44
45
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/openrisc/translate.c | 3 +--
6
1 file changed, 1 insertion(+), 2 deletions(-)
7
1
8
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/openrisc/translate.c
11
+++ b/target/openrisc/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
13
TCGv t0 = tcg_temp_new();
14
TCGv res = tcg_temp_new();
15
16
- tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, cpu_sr_cy, dc->zero);
17
- tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, dc->zero);
18
+ tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy);
19
tcg_gen_xor_tl(cpu_sr_ov, srca, srcb);
20
tcg_gen_xor_tl(t0, res, srcb);
21
tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov);
22
--
23
2.43.0
24
25
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/s390x/tcg/translate.c | 6 +-----
6
1 file changed, 1 insertion(+), 5 deletions(-)
7
1
8
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/s390x/tcg/translate.c
11
+++ b/target/s390x/tcg/translate.c
12
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
13
static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
14
{
15
compute_carry(s);
16
-
17
- TCGv_i64 zero = tcg_constant_i64(0);
18
- tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
19
- tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
20
-
21
+ tcg_gen_addcio_i64(o->out, cc_src, o->in1, o->in2, cc_src);
22
return DISAS_NEXT;
23
}
24
25
--
26
2.43.0
27
28
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/sh4/translate.c | 10 ++--------
6
1 file changed, 2 insertions(+), 8 deletions(-)
7
1
8
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/sh4/translate.c
11
+++ b/target/sh4/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
13
tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
14
return;
15
case 0x300e: /* addc Rm,Rn */
16
- {
17
- TCGv t0, t1;
18
- t0 = tcg_constant_tl(0);
19
- t1 = tcg_temp_new();
20
- tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
21
- tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
22
- REG(B11_8), t0, t1, cpu_sr_t);
23
- }
24
+ tcg_gen_addcio_i32(REG(B11_8), cpu_sr_t,
25
+ REG(B11_8), REG(B7_4), cpu_sr_t);
26
return;
27
case 0x300f: /* addv Rm,Rn */
28
{
29
--
30
2.43.0
31
32
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/sparc/translate.c | 3 +--
5
1 file changed, 1 insertion(+), 2 deletions(-)
6
1
7
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/sparc/translate.c
10
+++ b/target/sparc/translate.c
11
@@ -XXX,XX +XXX,XX @@ static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
12
TCGv z = tcg_constant_tl(0);
13
14
if (cin) {
15
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
16
- tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
17
+ tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin);
18
} else {
19
tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
20
}
21
--
22
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/tricore/translate.c | 8 ++------
6
1 file changed, 2 insertions(+), 6 deletions(-)
7
1
8
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/tricore/translate.c
11
+++ b/target/tricore/translate.c
12
@@ -XXX,XX +XXX,XX @@ static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
13
14
static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
15
{
16
- TCGv carry = tcg_temp_new_i32();
17
- TCGv t0 = tcg_temp_new_i32();
18
+ TCGv t0 = tcg_temp_new_i32();
19
TCGv result = tcg_temp_new_i32();
20
21
- tcg_gen_movi_tl(t0, 0);
22
- tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
23
/* Addition, carry and set C/V/SV bits */
24
- tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
25
- tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
26
+ tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C);
27
/* calc V bit */
28
tcg_gen_xor_tl(cpu_PSW_V, result, r1);
29
tcg_gen_xor_tl(t0, r1, r2);
30
--
31
2.43.0
32
33
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/aarch64/tcg-target-con-set.h | 3 +-
5
tcg/aarch64/tcg-target-has.h | 8 +-
6
tcg/aarch64/tcg-target.c.inc | 227 ++++++++++++++++++++-----------
7
3 files changed, 150 insertions(+), 88 deletions(-)
8
1
9
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/aarch64/tcg-target-con-set.h
12
+++ b/tcg/aarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rAL)
14
C_O1_I2(r, r, rC)
15
C_O1_I2(r, r, ri)
16
C_O1_I2(r, r, rL)
17
+C_O1_I2(r, rZ, rA)
18
+C_O1_I2(r, rz, rMZ)
19
C_O1_I2(r, rz, rz)
20
C_O1_I2(r, rZ, rZ)
21
C_O1_I2(w, 0, w)
22
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wZ)
23
C_O1_I3(w, w, w, w)
24
C_O1_I4(r, r, rC, rz, rz)
25
C_O2_I1(r, r, r)
26
-C_O2_I4(r, r, rz, rz, rA, rMZ)
27
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/aarch64/tcg-target-has.h
30
+++ b/tcg/aarch64/tcg-target-has.h
31
@@ -XXX,XX +XXX,XX @@
32
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
33
34
/* optional instructions */
35
-#define TCG_TARGET_HAS_add2_i32 1
36
-#define TCG_TARGET_HAS_sub2_i32 1
37
+#define TCG_TARGET_HAS_add2_i32 0
38
+#define TCG_TARGET_HAS_sub2_i32 0
39
#define TCG_TARGET_HAS_extr_i64_i32 0
40
#define TCG_TARGET_HAS_qemu_st8_i32 0
41
42
-#define TCG_TARGET_HAS_add2_i64 1
43
-#define TCG_TARGET_HAS_sub2_i64 1
44
+#define TCG_TARGET_HAS_add2_i64 0
45
+#define TCG_TARGET_HAS_sub2_i64 0
46
47
/*
48
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
49
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/aarch64/tcg-target.c.inc
52
+++ b/tcg/aarch64/tcg-target.c.inc
53
@@ -XXX,XX +XXX,XX @@ typedef enum {
54
55
/* Add/subtract with carry instructions. */
56
I3503_ADC = 0x1a000000,
57
+ I3503_ADCS = 0x3a000000,
58
I3503_SBC = 0x5a000000,
59
+ I3503_SBCS = 0x7a000000,
60
61
/* Conditional select instructions. */
62
I3506_CSEL = 0x1a800000,
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
64
tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
65
}
66
67
-static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
68
- TCGReg rh, TCGReg al, TCGReg ah,
69
- tcg_target_long bl, tcg_target_long bh,
70
- bool const_bl, bool const_bh, bool sub)
71
-{
72
- TCGReg orig_rl = rl;
73
- AArch64Insn insn;
74
-
75
- if (rl == ah || (!const_bh && rl == bh)) {
76
- rl = TCG_REG_TMP0;
77
- }
78
-
79
- if (const_bl) {
80
- if (bl < 0) {
81
- bl = -bl;
82
- insn = sub ? I3401_ADDSI : I3401_SUBSI;
83
- } else {
84
- insn = sub ? I3401_SUBSI : I3401_ADDSI;
85
- }
86
-
87
- if (unlikely(al == TCG_REG_XZR)) {
88
- /* ??? We want to allow al to be zero for the benefit of
89
- negation via subtraction. However, that leaves open the
90
- possibility of adding 0+const in the low part, and the
91
- immediate add instructions encode XSP not XZR. Don't try
92
- anything more elaborate here than loading another zero. */
93
- al = TCG_REG_TMP0;
94
- tcg_out_movi(s, ext, al, 0);
95
- }
96
- tcg_out_insn_3401(s, insn, ext, rl, al, bl);
97
- } else {
98
- tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
99
- }
100
-
101
- insn = I3503_ADC;
102
- if (const_bh) {
103
- /* Note that the only two constants we support are 0 and -1, and
104
- that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
105
- if ((bh != 0) ^ sub) {
106
- insn = I3503_SBC;
107
- }
108
- bh = TCG_REG_XZR;
109
- } else if (sub) {
110
- insn = I3503_SBC;
111
- }
112
- tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
113
-
114
- tcg_out_mov(s, ext, orig_rl, rl);
115
-}
116
-
117
static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
118
{
119
static const uint32_t sync[] = {
120
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
121
.out_rri = tgen_addi,
122
};
123
124
+static void tgen_addco(TCGContext *s, TCGType type,
125
+ TCGReg a0, TCGReg a1, TCGReg a2)
126
+{
127
+ tcg_out_insn(s, 3502, ADDS, type, a0, a1, a2);
128
+}
129
+
130
+static void tgen_addco_imm(TCGContext *s, TCGType type,
131
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
132
+{
133
+ if (a2 >= 0) {
134
+ tcg_out_insn(s, 3401, ADDSI, type, a0, a1, a2);
135
+ } else {
136
+ tcg_out_insn(s, 3401, SUBSI, type, a0, a1, -a2);
137
+ }
138
+}
139
+
140
static const TCGOutOpBinary outop_addco = {
141
- .base.static_constraint = C_NotImplemented,
142
+ .base.static_constraint = C_O1_I2(r, r, rA),
143
+ .out_rrr = tgen_addco,
144
+ .out_rri = tgen_addco_imm,
145
};
146
147
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
148
+ TCGReg a0, TCGReg a1, TCGReg a2)
149
+{
150
+ tcg_out_insn(s, 3503, ADC, type, a0, a1, a2);
151
+}
152
+
153
+static void tgen_addci_rri(TCGContext *s, TCGType type,
154
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
155
+{
156
+ /*
157
+ * Note that the only two constants we support are 0 and -1, and
158
+ * that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa.
159
+ */
160
+ if (a2) {
161
+ tcg_out_insn(s, 3503, SBC, type, a0, a1, TCG_REG_XZR);
162
+ } else {
163
+ tcg_out_insn(s, 3503, ADC, type, a0, a1, TCG_REG_XZR);
164
+ }
165
+}
166
+
167
static const TCGOutOpAddSubCarry outop_addci = {
168
- .base.static_constraint = C_NotImplemented,
169
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
170
+ .out_rrr = tgen_addci_rrr,
171
+ .out_rri = tgen_addci_rri,
172
};
173
174
+static void tgen_addcio(TCGContext *s, TCGType type,
175
+ TCGReg a0, TCGReg a1, TCGReg a2)
176
+{
177
+ tcg_out_insn(s, 3503, ADCS, type, a0, a1, a2);
178
+}
179
+
180
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
181
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
182
+{
183
+ /* Use SBCS w/0 for ADCS w/-1 -- see above. */
184
+ if (a2) {
185
+ tcg_out_insn(s, 3503, SBCS, type, a0, a1, TCG_REG_XZR);
186
+ } else {
187
+ tcg_out_insn(s, 3503, ADCS, type, a0, a1, TCG_REG_XZR);
188
+ }
189
+}
190
+
191
static const TCGOutOpBinary outop_addcio = {
192
- .base.static_constraint = C_NotImplemented,
193
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
194
+ .out_rrr = tgen_addcio,
195
+ .out_rri = tgen_addcio_imm,
196
};
197
198
static void tcg_out_set_carry(TCGContext *s)
199
{
200
- g_assert_not_reached();
201
+ tcg_out_insn(s, 3502, SUBS, TCG_TYPE_I32,
202
+ TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
203
}
204
205
static void tgen_and(TCGContext *s, TCGType type,
206
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
207
.out_rrr = tgen_sub,
208
};
209
210
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
211
+ TCGReg a0, TCGReg a1, TCGReg a2)
212
+{
213
+ tcg_out_insn(s, 3502, SUBS, type, a0, a1, a2);
214
+}
215
+
216
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
217
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
218
+{
219
+ if (a2 >= 0) {
220
+ tcg_out_insn(s, 3401, SUBSI, type, a0, a1, a2);
221
+ } else {
222
+ tcg_out_insn(s, 3401, ADDSI, type, a0, a1, -a2);
223
+ }
224
+}
225
+
226
+static void tgen_subbo_rir(TCGContext *s, TCGType type,
227
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
228
+{
229
+ tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, a2);
230
+}
231
+
232
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
233
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
234
+{
235
+ if (a2 == 0) {
236
+ tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, TCG_REG_XZR);
237
+ return;
238
+ }
239
+
240
+ /*
241
+ * We want to allow a1 to be zero for the benefit of negation via
242
+ * subtraction. However, that leaves open the possibility of
243
+ * adding 0 +/- const, and the immediate add/sub instructions
244
+ * encode XSP not XZR. Since we have 0 - non-zero, borrow is
245
+ * always set.
246
+ */
247
+ tcg_out_movi(s, type, a0, -a2);
248
+ tcg_out_set_borrow(s);
249
+}
250
+
251
static const TCGOutOpAddSubCarry outop_subbo = {
252
- .base.static_constraint = C_NotImplemented,
253
+ .base.static_constraint = C_O1_I2(r, rZ, rA),
254
+ .out_rrr = tgen_subbo_rrr,
255
+ .out_rri = tgen_subbo_rri,
256
+ .out_rir = tgen_subbo_rir,
257
+ .out_rii = tgen_subbo_rii,
258
};
259
260
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
261
+ TCGReg a0, TCGReg a1, TCGReg a2)
262
+{
263
+ tcg_out_insn(s, 3503, SBC, type, a0, a1, a2);
264
+}
265
+
266
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
267
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
268
+{
269
+ tgen_addci_rri(s, type, a0, a1, ~a2);
270
+}
271
+
272
static const TCGOutOpAddSubCarry outop_subbi = {
273
- .base.static_constraint = C_NotImplemented,
274
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
275
+ .out_rrr = tgen_subbi_rrr,
276
+ .out_rri = tgen_subbi_rri,
277
};
278
279
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
280
+ TCGReg a0, TCGReg a1, TCGReg a2)
281
+{
282
+ tcg_out_insn(s, 3503, SBCS, type, a0, a1, a2);
283
+}
284
+
285
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
286
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
287
+{
288
+ tgen_addcio_imm(s, type, a0, a1, ~a2);
289
+}
290
+
291
static const TCGOutOpAddSubCarry outop_subbio = {
292
- .base.static_constraint = C_NotImplemented,
293
+ .base.static_constraint = C_O1_I2(r, rz, rMZ),
294
+ .out_rrr = tgen_subbio_rrr,
295
+ .out_rri = tgen_subbio_rri,
296
};
297
298
static void tcg_out_set_borrow(TCGContext *s)
299
{
300
- g_assert_not_reached();
301
+ tcg_out_insn(s, 3502, ADDS, TCG_TYPE_I32,
302
+ TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
303
}
304
305
static void tgen_xor(TCGContext *s, TCGType type,
306
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
307
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
308
break;
309
310
- case INDEX_op_add2_i32:
311
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
312
- (int32_t)args[4], args[5], const_args[4],
313
- const_args[5], false);
314
- break;
315
- case INDEX_op_add2_i64:
316
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
317
- args[5], const_args[4], const_args[5], false);
318
- break;
319
- case INDEX_op_sub2_i32:
320
- tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, a2, args[3],
321
- (int32_t)args[4], args[5], const_args[4],
322
- const_args[5], true);
323
- break;
324
- case INDEX_op_sub2_i64:
325
- tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, a2, args[3], args[4],
326
- args[5], const_args[4], const_args[5], true);
327
- break;
328
-
329
case INDEX_op_mb:
330
tcg_out_mb(s, a0);
331
break;
332
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
333
case INDEX_op_qemu_st_i128:
334
return C_O0_I3(rz, rz, r);
335
336
- case INDEX_op_add2_i32:
337
- case INDEX_op_add2_i64:
338
- case INDEX_op_sub2_i32:
339
- case INDEX_op_sub2_i64:
340
- return C_O2_I4(r, r, rz, rz, rA, rMZ);
341
-
342
case INDEX_op_add_vec:
343
case INDEX_op_sub_vec:
344
case INDEX_op_mul_vec:
345
--
346
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/arm/tcg-target-con-set.h | 4 +-
5
tcg/arm/tcg-target-has.h | 4 +-
6
tcg/arm/tcg-target.c.inc | 212 ++++++++++++++++++++++++++---------
7
3 files changed, 161 insertions(+), 59 deletions(-)
8
1
9
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/arm/tcg-target-con-set.h
12
+++ b/tcg/arm/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK)
14
C_O1_I2(r, r, rIN)
15
C_O1_I2(r, r, ri)
16
C_O1_I2(r, rI, r)
17
+C_O1_I2(r, rI, rIK)
18
+C_O1_I2(r, rI, rIN)
19
C_O1_I2(r, rZ, rZ)
20
C_O1_I2(w, 0, w)
21
C_O1_I2(w, w, w)
22
@@ -XXX,XX +XXX,XX @@ C_O1_I4(r, r, rIN, rIK, 0)
23
C_O2_I1(e, p, q)
24
C_O2_I2(e, p, q, q)
25
C_O2_I2(r, r, r, r)
26
-C_O2_I4(r, r, r, r, rIN, rIK)
27
-C_O2_I4(r, r, rI, rI, rIN, rIK)
28
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/arm/tcg-target-has.h
31
+++ b/tcg/arm/tcg-target-has.h
32
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
33
#endif
34
35
/* optional instructions */
36
-#define TCG_TARGET_HAS_add2_i32 1
37
-#define TCG_TARGET_HAS_sub2_i32 1
38
+#define TCG_TARGET_HAS_add2_i32 0
39
+#define TCG_TARGET_HAS_sub2_i32 0
40
#define TCG_TARGET_HAS_qemu_st8_i32 0
41
42
#define TCG_TARGET_HAS_qemu_ldst_i128 0
43
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/arm/tcg-target.c.inc
46
+++ b/tcg/arm/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ typedef enum {
48
INSN_DMB_ISH = 0xf57ff05b,
49
INSN_DMB_MCR = 0xee070fba,
50
51
+ INSN_MSRI_CPSR = 0x0360f000,
52
+
53
/* Architected nop introduced in v6k. */
54
/* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
55
also Just So Happened to do nothing on pre-v6k so that we
56
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
57
.out_rri = tgen_addi,
58
};
59
60
+static void tgen_addco(TCGContext *s, TCGType type,
61
+ TCGReg a0, TCGReg a1, TCGReg a2)
62
+{
63
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD | TO_CPSR,
64
+ a0, a1, a2, SHIFT_IMM_LSL(0));
65
+}
66
+
67
+static void tgen_addco_imm(TCGContext *s, TCGType type,
68
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
69
+{
70
+ tcg_out_dat_IN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
71
+ a0, a1, a2);
72
+}
73
+
74
static const TCGOutOpBinary outop_addco = {
75
- .base.static_constraint = C_NotImplemented,
76
+ .base.static_constraint = C_O1_I2(r, r, rIN),
77
+ .out_rrr = tgen_addco,
78
+ .out_rri = tgen_addco_imm,
79
};
80
81
+static void tgen_addci(TCGContext *s, TCGType type,
82
+ TCGReg a0, TCGReg a1, TCGReg a2)
83
+{
84
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADC, a0, a1, a2, SHIFT_IMM_LSL(0));
85
+}
86
+
87
+static void tgen_addci_imm(TCGContext *s, TCGType type,
88
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
89
+{
90
+ tcg_out_dat_IK(s, COND_AL, ARITH_ADC, ARITH_SBC, a0, a1, a2);
91
+}
92
+
93
static const TCGOutOpAddSubCarry outop_addci = {
94
- .base.static_constraint = C_NotImplemented,
95
+ .base.static_constraint = C_O1_I2(r, r, rIK),
96
+ .out_rrr = tgen_addci,
97
+ .out_rri = tgen_addci_imm,
98
};
99
100
+static void tgen_addcio(TCGContext *s, TCGType type,
101
+ TCGReg a0, TCGReg a1, TCGReg a2)
102
+{
103
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADC | TO_CPSR,
104
+ a0, a1, a2, SHIFT_IMM_LSL(0));
105
+}
106
+
107
+static void tgen_addcio_imm(TCGContext *s, TCGType type,
108
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
109
+{
110
+ tcg_out_dat_IK(s, COND_AL, ARITH_ADC | TO_CPSR, ARITH_SBC | TO_CPSR,
111
+ a0, a1, a2);
112
+}
113
+
114
static const TCGOutOpBinary outop_addcio = {
115
- .base.static_constraint = C_NotImplemented,
116
+ .base.static_constraint = C_O1_I2(r, r, rIK),
117
+ .out_rrr = tgen_addcio,
118
+ .out_rri = tgen_addcio_imm,
119
};
120
121
+/* Set C to @c; NZVQ all set to 0. */
122
+static void tcg_out_movi_apsr_c(TCGContext *s, bool c)
123
+{
124
+ int imm12 = encode_imm_nofail(c << 29);
125
+ tcg_out32(s, (COND_AL << 28) | INSN_MSRI_CPSR | 0x80000 | imm12);
126
+}
127
+
128
static void tcg_out_set_carry(TCGContext *s)
129
{
130
- g_assert_not_reached();
131
+ tcg_out_movi_apsr_c(s, 1);
132
}
133
134
static void tgen_and(TCGContext *s, TCGType type,
135
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
136
.out_rir = tgen_subfi,
137
};
138
139
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
140
+ TCGReg a0, TCGReg a1, TCGReg a2)
141
+{
142
+ tcg_out_dat_reg(s, COND_AL, ARITH_SUB | TO_CPSR,
143
+ a0, a1, a2, SHIFT_IMM_LSL(0));
144
+}
145
+
146
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
147
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
148
+{
149
+ tcg_out_dat_IN(s, COND_AL, ARITH_SUB | TO_CPSR, ARITH_ADD | TO_CPSR,
150
+ a0, a1, a2);
151
+}
152
+
153
+static void tgen_subbo_rir(TCGContext *s, TCGType type,
154
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
155
+{
156
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSB | TO_CPSR,
157
+ a0, a2, encode_imm_nofail(a1));
158
+}
159
+
160
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
161
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
162
+{
163
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
164
+ tgen_subbo_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
165
+}
166
+
167
static const TCGOutOpAddSubCarry outop_subbo = {
168
- .base.static_constraint = C_NotImplemented,
169
+ .base.static_constraint = C_O1_I2(r, rI, rIN),
170
+ .out_rrr = tgen_subbo_rrr,
171
+ .out_rri = tgen_subbo_rri,
172
+ .out_rir = tgen_subbo_rir,
173
+ .out_rii = tgen_subbo_rii,
174
};
175
176
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
177
+ TCGReg a0, TCGReg a1, TCGReg a2)
178
+{
179
+ tcg_out_dat_reg(s, COND_AL, ARITH_SBC,
180
+ a0, a1, a2, SHIFT_IMM_LSL(0));
181
+}
182
+
183
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
184
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
185
+{
186
+ tcg_out_dat_IK(s, COND_AL, ARITH_SBC, ARITH_ADC, a0, a1, a2);
187
+}
188
+
189
+static void tgen_subbi_rir(TCGContext *s, TCGType type,
190
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
191
+{
192
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSC, a0, a2, encode_imm_nofail(a1));
193
+}
194
+
195
+static void tgen_subbi_rii(TCGContext *s, TCGType type,
196
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
197
+{
198
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
199
+ tgen_subbi_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
200
+}
201
+
202
static const TCGOutOpAddSubCarry outop_subbi = {
203
- .base.static_constraint = C_NotImplemented,
204
+ .base.static_constraint = C_O1_I2(r, rI, rIK),
205
+ .out_rrr = tgen_subbi_rrr,
206
+ .out_rri = tgen_subbi_rri,
207
+ .out_rir = tgen_subbi_rir,
208
+ .out_rii = tgen_subbi_rii,
209
};
210
211
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
212
+ TCGReg a0, TCGReg a1, TCGReg a2)
213
+{
214
+ tcg_out_dat_reg(s, COND_AL, ARITH_SBC | TO_CPSR,
215
+ a0, a1, a2, SHIFT_IMM_LSL(0));
216
+}
217
+
218
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
219
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
220
+{
221
+ tcg_out_dat_IK(s, COND_AL, ARITH_SBC | TO_CPSR, ARITH_ADC | TO_CPSR,
222
+ a0, a1, a2);
223
+}
224
+
225
+static void tgen_subbio_rir(TCGContext *s, TCGType type,
226
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
227
+{
228
+ tcg_out_dat_imm(s, COND_AL, ARITH_RSC | TO_CPSR,
229
+ a0, a2, encode_imm_nofail(a1));
230
+}
231
+
232
+static void tgen_subbio_rii(TCGContext *s, TCGType type,
233
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
234
+{
235
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, a2);
236
+ tgen_subbio_rir(s, TCG_TYPE_I32, a0, a1, TCG_REG_TMP);
237
+}
238
+
239
static const TCGOutOpAddSubCarry outop_subbio = {
240
- .base.static_constraint = C_NotImplemented,
241
+ .base.static_constraint = C_O1_I2(r, rI, rIK),
242
+ .out_rrr = tgen_subbio_rrr,
243
+ .out_rri = tgen_subbio_rri,
244
+ .out_rir = tgen_subbio_rir,
245
+ .out_rii = tgen_subbio_rii,
246
};
247
248
static void tcg_out_set_borrow(TCGContext *s)
249
{
250
- g_assert_not_reached();
251
+ tcg_out_movi_apsr_c(s, 0); /* borrow = !carry */
252
}
253
254
static void tgen_xor(TCGContext *s, TCGType type,
255
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
256
const TCGArg args[TCG_MAX_OP_ARGS],
257
const int const_args[TCG_MAX_OP_ARGS])
258
{
259
- TCGArg a0, a1, a2, a3, a4, a5;
260
-
261
switch (opc) {
262
case INDEX_op_goto_ptr:
263
tcg_out_b_reg(s, COND_AL, args[0]);
264
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
265
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
266
break;
267
268
- case INDEX_op_add2_i32:
269
- a0 = args[0], a1 = args[1], a2 = args[2];
270
- a3 = args[3], a4 = args[4], a5 = args[5];
271
- if (a0 == a3 || (a0 == a5 && !const_args[5])) {
272
- a0 = TCG_REG_TMP;
273
- }
274
- tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
275
- a0, a2, a4, const_args[4]);
276
- tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
277
- a1, a3, a5, const_args[5]);
278
- tcg_out_mov_reg(s, COND_AL, args[0], a0);
279
- break;
280
- case INDEX_op_sub2_i32:
281
- a0 = args[0], a1 = args[1], a2 = args[2];
282
- a3 = args[3], a4 = args[4], a5 = args[5];
283
- if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
284
- a0 = TCG_REG_TMP;
285
- }
286
- if (const_args[2]) {
287
- if (const_args[4]) {
288
- tcg_out_movi32(s, COND_AL, a0, a4);
289
- a4 = a0;
290
- }
291
- tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
292
- } else {
293
- tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
294
- ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
295
- }
296
- if (const_args[3]) {
297
- if (const_args[5]) {
298
- tcg_out_movi32(s, COND_AL, a1, a5);
299
- a5 = a1;
300
- }
301
- tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
302
- } else {
303
- tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
304
- a1, a3, a5, const_args[5]);
305
- }
306
- tcg_out_mov_reg(s, COND_AL, args[0], a0);
307
- break;
308
-
309
case INDEX_op_qemu_ld_i32:
310
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
311
break;
312
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
313
case INDEX_op_st_i32:
314
return C_O0_I2(r, r);
315
316
- case INDEX_op_add2_i32:
317
- return C_O2_I4(r, r, r, r, rIN, rIK);
318
- case INDEX_op_sub2_i32:
319
- return C_O2_I4(r, r, rI, rI, rIN, rIK);
320
case INDEX_op_qemu_ld_i32:
321
return C_O1_I1(r, q);
322
case INDEX_op_qemu_ld_i64:
323
--
324
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Tested-by: Nicholas Piggin <npiggin@gmail.com>
2
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/ppc/tcg-target-con-set.h | 5 +-
6
tcg/ppc/tcg-target-con-str.h | 1 +
7
tcg/ppc/tcg-target-has.h | 11 +-
8
tcg/ppc/tcg-target.c.inc | 227 ++++++++++++++++++++++-------------
9
4 files changed, 154 insertions(+), 90 deletions(-)
10
1
11
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target-con-set.h
14
+++ b/tcg/ppc/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rC)
16
C_O1_I2(r, r, rI)
17
C_O1_I2(r, r, rT)
18
C_O1_I2(r, r, rU)
19
+C_O1_I2(r, r, rZM)
20
C_O1_I2(r, r, rZW)
21
+C_O1_I2(r, rI, rN)
22
+C_O1_I2(r, rZM, rZM)
23
C_O1_I2(v, v, v)
24
C_O1_I3(v, v, v, v)
25
C_O1_I4(v, v, v, vZM, v)
26
@@ -XXX,XX +XXX,XX @@ C_O1_I4(r, r, r, rU, rC)
27
C_O2_I1(r, r, r)
28
C_N1O1_I1(o, m, r)
29
C_O2_I2(r, r, r, r)
30
-C_O2_I4(r, r, rI, rZM, r, r)
31
-C_O2_I4(r, r, r, r, rI, rZM)
32
diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/ppc/tcg-target-con-str.h
35
+++ b/tcg/ppc/tcg-target-con-str.h
36
@@ -XXX,XX +XXX,XX @@ REGS('v', ALL_VECTOR_REGS)
37
CONST('C', TCG_CT_CONST_CMP)
38
CONST('I', TCG_CT_CONST_S16)
39
CONST('M', TCG_CT_CONST_MONE)
40
+CONST('N', TCG_CT_CONST_N16)
41
CONST('T', TCG_CT_CONST_S32)
42
CONST('U', TCG_CT_CONST_U32)
43
CONST('W', TCG_CT_CONST_WSZ)
44
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/ppc/tcg-target-has.h
47
+++ b/tcg/ppc/tcg-target-has.h
48
@@ -XXX,XX +XXX,XX @@
49
50
/* optional instructions */
51
#define TCG_TARGET_HAS_qemu_st8_i32 0
52
-
53
-#if TCG_TARGET_REG_BITS == 64
54
#define TCG_TARGET_HAS_add2_i32 0
55
#define TCG_TARGET_HAS_sub2_i32 0
56
+
57
+#if TCG_TARGET_REG_BITS == 64
58
#define TCG_TARGET_HAS_extr_i64_i32 0
59
-#define TCG_TARGET_HAS_add2_i64 1
60
-#define TCG_TARGET_HAS_sub2_i64 1
61
-#else
62
-#define TCG_TARGET_HAS_add2_i32 1
63
-#define TCG_TARGET_HAS_sub2_i32 1
64
+#define TCG_TARGET_HAS_add2_i64 0
65
+#define TCG_TARGET_HAS_sub2_i64 0
66
#endif
67
68
#define TCG_TARGET_HAS_qemu_ldst_i128 \
69
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/ppc/tcg-target.c.inc
72
+++ b/tcg/ppc/tcg-target.c.inc
73
@@ -XXX,XX +XXX,XX @@
74
/* Shorthand for size of a register. */
75
#define SZR (TCG_TARGET_REG_BITS / 8)
76
77
-#define TCG_CT_CONST_S16 0x100
78
-#define TCG_CT_CONST_U16 0x200
79
-#define TCG_CT_CONST_S32 0x400
80
-#define TCG_CT_CONST_U32 0x800
81
-#define TCG_CT_CONST_ZERO 0x1000
82
-#define TCG_CT_CONST_MONE 0x2000
83
-#define TCG_CT_CONST_WSZ 0x4000
84
-#define TCG_CT_CONST_CMP 0x8000
85
+#define TCG_CT_CONST_S16 0x00100
86
+#define TCG_CT_CONST_U16 0x00200
87
+#define TCG_CT_CONST_N16 0x00400
88
+#define TCG_CT_CONST_S32 0x00800
89
+#define TCG_CT_CONST_U32 0x01000
90
+#define TCG_CT_CONST_ZERO 0x02000
91
+#define TCG_CT_CONST_MONE 0x04000
92
+#define TCG_CT_CONST_WSZ 0x08000
93
+#define TCG_CT_CONST_CMP 0x10000
94
95
#define ALL_GENERAL_REGS 0xffffffffu
96
#define ALL_VECTOR_REGS 0xffffffff00000000ull
97
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t sval, int ct,
98
if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
99
return 1;
100
}
101
+ if ((ct & TCG_CT_CONST_N16) && -sval == (int16_t)-sval) {
102
+ return 1;
103
+ }
104
if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
105
return 1;
106
}
107
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
108
.out_rri = tgen_addi,
109
};
110
111
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
112
+ TCGReg a0, TCGReg a1, TCGReg a2)
113
+{
114
+ tcg_out32(s, ADDC | TAB(a0, a1, a2));
115
+}
116
+
117
+static void tgen_addco_rri(TCGContext *s, TCGType type,
118
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
119
+{
120
+ tcg_out32(s, ADDIC | TAI(a0, a1, a2));
121
+}
122
+
123
+static TCGConstraintSetIndex cset_addco(TCGType type, unsigned flags)
124
+{
125
+ /*
126
+ * Note that the CA bit is defined based on the word size of the
127
+ * environment. So in 64-bit mode it's always carry-out of bit 63.
128
+ * The fallback code using deposit works just as well for TCG_TYPE_I32.
129
+ */
130
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, rI) : C_NotImplemented;
131
+}
132
+
133
static const TCGOutOpBinary outop_addco = {
134
- .base.static_constraint = C_NotImplemented,
135
+ .base.static_constraint = C_Dynamic,
136
+ .base.dynamic_constraint = cset_addco,
137
+ .out_rrr = tgen_addco_rrr,
138
+ .out_rri = tgen_addco_rri,
139
+};
140
+
141
+static void tgen_addcio_rrr(TCGContext *s, TCGType type,
142
+ TCGReg a0, TCGReg a1, TCGReg a2)
143
+{
144
+ tcg_out32(s, ADDE | TAB(a0, a1, a2));
145
+}
146
+
147
+static void tgen_addcio_rri(TCGContext *s, TCGType type,
148
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
149
+{
150
+ tcg_out32(s, (a2 ? ADDME : ADDZE) | RT(a0) | RA(a1));
151
+}
152
+
153
+static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
154
+{
155
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, rZM) : C_NotImplemented;
156
+}
157
+
158
+static const TCGOutOpBinary outop_addcio = {
159
+ .base.static_constraint = C_Dynamic,
160
+ .base.dynamic_constraint = cset_addcio,
161
+ .out_rrr = tgen_addcio_rrr,
162
+ .out_rri = tgen_addcio_rri,
163
};
164
165
static const TCGOutOpAddSubCarry outop_addci = {
166
- .base.static_constraint = C_NotImplemented,
167
-};
168
-
169
-static const TCGOutOpBinary outop_addcio = {
170
- .base.static_constraint = C_NotImplemented,
171
+ .base.static_constraint = C_Dynamic,
172
+ .base.dynamic_constraint = cset_addcio,
173
+ .out_rrr = tgen_addcio_rrr,
174
+ .out_rri = tgen_addcio_rri,
175
};
176
177
static void tcg_out_set_carry(TCGContext *s)
178
{
179
- g_assert_not_reached();
180
+ tcg_out32(s, SUBFC | TAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_R0));
181
}
182
183
static void tgen_and(TCGContext *s, TCGType type,
184
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
185
.out_rir = tgen_subfi,
186
};
187
188
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
189
+ TCGReg a0, TCGReg a1, TCGReg a2)
190
+{
191
+ tcg_out32(s, SUBFC | TAB(a0, a2, a1));
192
+}
193
+
194
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
195
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
196
+{
197
+ if (a2 == 0) {
198
+ tcg_out_movi(s, type, TCG_REG_R0, 0);
199
+ tgen_subbo_rrr(s, type, a0, a1, TCG_REG_R0);
200
+ } else {
201
+ tgen_addco_rri(s, type, a0, a1, -a2);
202
+ }
203
+}
204
+
205
+/* The underlying insn for subfi is subfic. */
206
+#define tgen_subbo_rir tgen_subfi
207
+
208
+static void tgen_subbo_rii(TCGContext *s, TCGType type,
209
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
210
+{
211
+ tcg_out_movi(s, type, TCG_REG_R0, a2);
212
+ tgen_subbo_rir(s, type, a0, a1, TCG_REG_R0);
213
+}
214
+
215
+static TCGConstraintSetIndex cset_subbo(TCGType type, unsigned flags)
216
+{
217
+ /* Recall that the CA bit is defined based on the host word size. */
218
+ return type == TCG_TYPE_REG ? C_O1_I2(r, rI, rN) : C_NotImplemented;
219
+}
220
+
221
static const TCGOutOpAddSubCarry outop_subbo = {
222
- .base.static_constraint = C_NotImplemented,
223
+ .base.static_constraint = C_Dynamic,
224
+ .base.dynamic_constraint = cset_subbo,
225
+ .out_rrr = tgen_subbo_rrr,
226
+ .out_rri = tgen_subbo_rri,
227
+ .out_rir = tgen_subbo_rir,
228
+ .out_rii = tgen_subbo_rii,
229
};
230
231
-static const TCGOutOpAddSubCarry outop_subbi = {
232
- .base.static_constraint = C_NotImplemented,
233
-};
234
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
235
+ TCGReg a0, TCGReg a1, TCGReg a2)
236
+{
237
+ tcg_out32(s, SUBFE | TAB(a0, a2, a1));
238
+}
239
+
240
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
241
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
242
+{
243
+ tgen_addcio_rri(s, type, a0, a1, ~a2);
244
+}
245
+
246
+static void tgen_subbio_rir(TCGContext *s, TCGType type,
247
+ TCGReg a0, tcg_target_long a1, TCGReg a2)
248
+{
249
+ tcg_debug_assert(a1 == 0 || a1 == -1);
250
+ tcg_out32(s, (a1 ? SUBFME : SUBFZE) | RT(a0) | RA(a2));
251
+}
252
+
253
+static void tgen_subbio_rii(TCGContext *s, TCGType type,
254
+ TCGReg a0, tcg_target_long a1, tcg_target_long a2)
255
+{
256
+ tcg_out_movi(s, type, TCG_REG_R0, a2);
257
+ tgen_subbio_rir(s, type, a0, a1, TCG_REG_R0);
258
+}
259
+
260
+static TCGConstraintSetIndex cset_subbio(TCGType type, unsigned flags)
261
+{
262
+ return type == TCG_TYPE_REG ? C_O1_I2(r, rZM, rZM) : C_NotImplemented;
263
+}
264
265
static const TCGOutOpAddSubCarry outop_subbio = {
266
- .base.static_constraint = C_NotImplemented,
267
+ .base.static_constraint = C_Dynamic,
268
+ .base.dynamic_constraint = cset_subbio,
269
+ .out_rrr = tgen_subbio_rrr,
270
+ .out_rri = tgen_subbio_rri,
271
+ .out_rir = tgen_subbio_rir,
272
+ .out_rii = tgen_subbio_rii,
273
};
274
275
+#define outop_subbi outop_subbio
276
+
277
static void tcg_out_set_borrow(TCGContext *s)
278
{
279
- g_assert_not_reached();
280
+ /* borrow = !carry */
281
+ tcg_out32(s, ADDIC | TAI(TCG_REG_R0, TCG_REG_R0, 0));
282
}
283
284
static void tgen_xor(TCGContext *s, TCGType type,
285
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
286
const TCGArg args[TCG_MAX_OP_ARGS],
287
const int const_args[TCG_MAX_OP_ARGS])
288
{
289
- TCGArg a0, a1;
290
-
291
switch (opc) {
292
case INDEX_op_goto_ptr:
293
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
294
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
295
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
296
break;
297
298
-#if TCG_TARGET_REG_BITS == 64
299
- case INDEX_op_add2_i64:
300
-#else
301
- case INDEX_op_add2_i32:
302
-#endif
303
- /* Note that the CA bit is defined based on the word size of the
304
- environment. So in 64-bit mode it's always carry-out of bit 63.
305
- The fallback code using deposit works just as well for 32-bit. */
306
- a0 = args[0], a1 = args[1];
307
- if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
308
- a0 = TCG_REG_R0;
309
- }
310
- if (const_args[4]) {
311
- tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
312
- } else {
313
- tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
314
- }
315
- if (const_args[5]) {
316
- tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
317
- } else {
318
- tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
319
- }
320
- if (a0 != args[0]) {
321
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
322
- }
323
- break;
324
-
325
-#if TCG_TARGET_REG_BITS == 64
326
- case INDEX_op_sub2_i64:
327
-#else
328
- case INDEX_op_sub2_i32:
329
-#endif
330
- a0 = args[0], a1 = args[1];
331
- if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
332
- a0 = TCG_REG_R0;
333
- }
334
- if (const_args[2]) {
335
- tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
336
- } else {
337
- tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
338
- }
339
- if (const_args[3]) {
340
- tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
341
- } else {
342
- tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
343
- }
344
- if (a0 != args[0]) {
345
- tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
346
- }
347
- break;
348
-
349
case INDEX_op_mb:
350
tcg_out_mb(s, args[0]);
351
break;
352
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
353
case INDEX_op_st_i64:
354
return C_O0_I2(r, r);
355
356
- case INDEX_op_add2_i64:
357
- case INDEX_op_add2_i32:
358
- return C_O2_I4(r, r, r, r, rI, rZM);
359
- case INDEX_op_sub2_i64:
360
- case INDEX_op_sub2_i32:
361
- return C_O2_I4(r, r, rI, rZM, r, r);
362
-
363
case INDEX_op_qemu_ld_i32:
364
return C_O1_I1(r, r);
365
case INDEX_op_qemu_ld_i64:
366
--
367
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Do not clobber flags if they're live. Required in order
2
to perform register allocation on add/sub carry opcodes.
3
LA and AGHI are the same size, so use LA unconditionally.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/s390x/tcg-target.c.inc | 35 +++++++++++++++++++++--------------
9
1 file changed, 21 insertions(+), 14 deletions(-)
10
11
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/s390x/tcg-target.c.inc
14
+++ b/tcg/s390x/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
16
if (pc_off == (int32_t)pc_off) {
17
tcg_out_insn(s, RIL, LARL, ret, pc_off);
18
if (sval & 1) {
19
- tcg_out_insn(s, RI, AGHI, ret, 1);
20
+ tcg_out_insn(s, RX, LA, ret, ret, TCG_REG_NONE, 1);
21
}
22
return;
23
}
24
25
- /* Otherwise, load it by parts. */
26
- i = is_const_p16((uint32_t)uval);
27
- if (i >= 0) {
28
- tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
29
- } else {
30
- tcg_out_insn(s, RIL, LLILF, ret, uval);
31
- }
32
- uval >>= 32;
33
- i = is_const_p16(uval);
34
- if (i >= 0) {
35
- tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
36
- } else {
37
- tcg_out_insn(s, RIL, OIHF, ret, uval);
38
+ if (!s->carry_live) {
39
+ /* Load by parts, at most 2 instructions. */
40
+ i = is_const_p16((uint32_t)uval);
41
+ if (i >= 0) {
42
+ tcg_out_insn_RI(s, li_insns[i], ret, uval >> (i * 16));
43
+ } else {
44
+ tcg_out_insn(s, RIL, LLILF, ret, uval);
45
+ }
46
+ uval >>= 32;
47
+ i = is_const_p16(uval);
48
+ if (i >= 0) {
49
+ tcg_out_insn_RI(s, oi_insns[i + 2], ret, uval >> (i * 16));
50
+ } else {
51
+ tcg_out_insn(s, RIL, OIHF, ret, uval);
52
+ }
53
+ return;
54
}
55
+
56
+ /* Otherwise, stuff it in the constant pool. */
57
+ tcg_out_insn(s, RIL, LGRL, ret, 0);
58
+ new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
59
}
60
61
/* Emit a load/store type instruction. Inputs are:
62
--
63
2.43.0
diff view generated by jsdifflib
Deleted patch
1
We were using S32 | U32 for add2/sub2. But the ALGFI and SLGFI
2
insns that implement this both have uint32_t immediates.
3
This makes the composite range balanced and
4
enables use of -0xffffffff ... -0x80000001.
5
1
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/s390x/tcg-target-con-set.h | 2 +-
10
tcg/s390x/tcg-target-con-str.h | 1 +
11
tcg/s390x/tcg-target.c.inc | 8 ++++++--
12
3 files changed, 8 insertions(+), 3 deletions(-)
13
14
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/s390x/tcg-target-con-set.h
17
+++ b/tcg/s390x/tcg-target-con-set.h
18
@@ -XXX,XX +XXX,XX @@ C_O2_I2(o, m, 0, r)
19
C_O2_I2(o, m, r, r)
20
C_O2_I3(o, m, 0, 1, r)
21
C_N1_O1_I4(r, r, 0, 1, ri, r)
22
-C_N1_O1_I4(r, r, 0, 1, rJU, r)
23
+C_N1_O1_I4(r, r, 0, 1, rUV, r)
24
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/s390x/tcg-target-con-str.h
27
+++ b/tcg/s390x/tcg-target-con-str.h
28
@@ -XXX,XX +XXX,XX @@ CONST('M', TCG_CT_CONST_M1)
29
CONST('N', TCG_CT_CONST_INV)
30
CONST('R', TCG_CT_CONST_INVRISBG)
31
CONST('U', TCG_CT_CONST_U32)
32
+CONST('V', TCG_CT_CONST_N32)
33
CONST('Z', TCG_CT_CONST_ZERO)
34
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tcg/s390x/tcg-target.c.inc
37
+++ b/tcg/s390x/tcg-target.c.inc
38
@@ -XXX,XX +XXX,XX @@
39
#define TCG_CT_CONST_INVRISBG (1 << 14)
40
#define TCG_CT_CONST_CMP (1 << 15)
41
#define TCG_CT_CONST_M1 (1 << 16)
42
+#define TCG_CT_CONST_N32 (1 << 17)
43
44
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
45
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
46
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
47
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
48
return true;
49
}
50
- if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
51
+ if ((ct & TCG_CT_CONST_U32) && uval <= UINT32_MAX) {
52
+ return true;
53
+ }
54
+ if ((ct & TCG_CT_CONST_N32) && -uval <= UINT32_MAX) {
55
return true;
56
}
57
if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
58
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
59
60
case INDEX_op_add2_i64:
61
case INDEX_op_sub2_i64:
62
- return C_N1_O1_I4(r, r, 0, 1, rJU, r);
63
+ return C_N1_O1_I4(r, r, 0, 1, rUV, r);
64
65
case INDEX_op_st_vec:
66
return C_O0_I2(v, r);
67
--
68
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/s390x/tcg-target-con-set.h | 4 +-
5
tcg/s390x/tcg-target-has.h | 8 +-
6
tcg/s390x/tcg-target.c.inc | 153 +++++++++++++++++++--------------
7
3 files changed, 96 insertions(+), 69 deletions(-)
8
1
9
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/s390x/tcg-target-con-set.h
12
+++ b/tcg/s390x/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
14
C_O1_I1(v, r)
15
C_O1_I1(v, v)
16
C_O1_I1(v, vr)
17
+C_O1_I2(r, 0, r)
18
C_O1_I2(r, 0, ri)
19
C_O1_I2(r, 0, rI)
20
C_O1_I2(r, 0, rJ)
21
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
22
C_O1_I2(r, r, rJ)
23
C_O1_I2(r, r, rK)
24
C_O1_I2(r, r, rNKR)
25
+C_O1_I2(r, r, rUV)
26
C_O1_I2(r, rZ, r)
27
C_O1_I2(v, v, r)
28
C_O1_I2(v, v, v)
29
@@ -XXX,XX +XXX,XX @@ C_O2_I1(o, m, r)
30
C_O2_I2(o, m, 0, r)
31
C_O2_I2(o, m, r, r)
32
C_O2_I3(o, m, 0, 1, r)
33
-C_N1_O1_I4(r, r, 0, 1, ri, r)
34
-C_N1_O1_I4(r, r, 0, 1, rUV, r)
35
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/s390x/tcg-target-has.h
38
+++ b/tcg/s390x/tcg-target-has.h
39
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
40
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
41
42
/* optional instructions */
43
-#define TCG_TARGET_HAS_add2_i32 1
44
-#define TCG_TARGET_HAS_sub2_i32 1
45
+#define TCG_TARGET_HAS_add2_i32 0
46
+#define TCG_TARGET_HAS_sub2_i32 0
47
#define TCG_TARGET_HAS_extr_i64_i32 0
48
#define TCG_TARGET_HAS_qemu_st8_i32 0
49
50
-#define TCG_TARGET_HAS_add2_i64 1
51
-#define TCG_TARGET_HAS_sub2_i64 1
52
+#define TCG_TARGET_HAS_add2_i64 0
53
+#define TCG_TARGET_HAS_sub2_i64 0
54
55
#define TCG_TARGET_HAS_qemu_ldst_i128 1
56
57
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/s390x/tcg-target.c.inc
60
+++ b/tcg/s390x/tcg-target.c.inc
61
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
62
RRE_SLBGR = 0xb989,
63
RRE_XGR = 0xb982,
64
65
+ RRFa_ALRK = 0xb9fa,
66
+ RRFa_ALGRK = 0xb9ea,
67
RRFa_MGRK = 0xb9ec,
68
RRFa_MSRKC = 0xb9fd,
69
RRFa_MSGRKC = 0xb9ed,
70
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
71
.out_rri = tgen_addi,
72
};
73
74
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
75
+ TCGReg a0, TCGReg a1, TCGReg a2)
76
+{
77
+ if (type != TCG_TYPE_I32) {
78
+ tcg_out_insn(s, RRFa, ALGRK, a0, a1, a2);
79
+ } else if (a0 == a1) {
80
+ tcg_out_insn(s, RR, ALR, a0, a2);
81
+ } else {
82
+ tcg_out_insn(s, RRFa, ALRK, a0, a1, a2);
83
+ }
84
+}
85
+
86
+static void tgen_addco_rri(TCGContext *s, TCGType type,
87
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
88
+{
89
+ tcg_out_mov(s, type, a0, a1);
90
+ if (type == TCG_TYPE_I32) {
91
+ tcg_out_insn(s, RIL, ALFI, a0, a2);
92
+ } else if (a2 >= 0) {
93
+ tcg_out_insn(s, RIL, ALGFI, a0, a2);
94
+ } else {
95
+ tcg_out_insn(s, RIL, SLGFI, a0, -a2);
96
+ }
97
+}
98
+
99
static const TCGOutOpBinary outop_addco = {
100
- .base.static_constraint = C_NotImplemented,
101
+ .base.static_constraint = C_O1_I2(r, r, rUV),
102
+ .out_rrr = tgen_addco_rrr,
103
+ .out_rri = tgen_addco_rri,
104
+};
105
+
106
+static void tgen_addcio(TCGContext *s, TCGType type,
107
+ TCGReg a0, TCGReg a1, TCGReg a2)
108
+{
109
+ if (type == TCG_TYPE_I32) {
110
+ tcg_out_insn(s, RRE, ALCR, a0, a2);
111
+ } else {
112
+ tcg_out_insn(s, RRE, ALCGR, a0, a2);
113
+ }
114
+}
115
+
116
+static const TCGOutOpBinary outop_addcio = {
117
+ .base.static_constraint = C_O1_I2(r, 0, r),
118
+ .out_rrr = tgen_addcio,
119
};
120
121
static const TCGOutOpAddSubCarry outop_addci = {
122
- .base.static_constraint = C_NotImplemented,
123
-};
124
-
125
-static const TCGOutOpBinary outop_addcio = {
126
- .base.static_constraint = C_NotImplemented,
127
+ .base.static_constraint = C_O1_I2(r, 0, r),
128
+ .out_rrr = tgen_addcio,
129
};
130
131
static void tcg_out_set_carry(TCGContext *s)
132
{
133
- g_assert_not_reached();
134
+ tcg_out_insn(s, RR, SLR, TCG_REG_R0, TCG_REG_R0); /* cc = 2 */
135
}
136
137
static void tgen_and(TCGContext *s, TCGType type,
138
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
139
.out_rrr = tgen_sub,
140
};
141
142
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
143
+ TCGReg a0, TCGReg a1, TCGReg a2)
144
+{
145
+ if (type != TCG_TYPE_I32) {
146
+ tcg_out_insn(s, RRFa, SLGRK, a0, a1, a2);
147
+ } else if (a0 == a1) {
148
+ tcg_out_insn(s, RR, SLR, a0, a2);
149
+ } else {
150
+ tcg_out_insn(s, RRFa, SLRK, a0, a1, a2);
151
+ }
152
+}
153
+
154
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
155
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
156
+{
157
+ tcg_out_mov(s, type, a0, a1);
158
+ if (type == TCG_TYPE_I32) {
159
+ tcg_out_insn(s, RIL, SLFI, a0, a2);
160
+ } else if (a2 >= 0) {
161
+ tcg_out_insn(s, RIL, SLGFI, a0, a2);
162
+ } else {
163
+ tcg_out_insn(s, RIL, ALGFI, a0, -a2);
164
+ }
165
+}
166
+
167
static const TCGOutOpAddSubCarry outop_subbo = {
168
- .base.static_constraint = C_NotImplemented,
169
+ .base.static_constraint = C_O1_I2(r, r, rUV),
170
+ .out_rrr = tgen_subbo_rrr,
171
+ .out_rri = tgen_subbo_rri,
172
};
173
174
-static const TCGOutOpAddSubCarry outop_subbi = {
175
- .base.static_constraint = C_NotImplemented,
176
-};
177
+static void tgen_subbio(TCGContext *s, TCGType type,
178
+ TCGReg a0, TCGReg a1, TCGReg a2)
179
+{
180
+ if (type == TCG_TYPE_I32) {
181
+ tcg_out_insn(s, RRE, SLBR, a0, a2);
182
+ } else {
183
+ tcg_out_insn(s, RRE, SLBGR, a0, a2);
184
+ }
185
+}
186
187
static const TCGOutOpAddSubCarry outop_subbio = {
188
- .base.static_constraint = C_NotImplemented,
189
+ .base.static_constraint = C_O1_I2(r, 0, r),
190
+ .out_rrr = tgen_subbio,
191
};
192
193
+#define outop_subbi outop_subbio
194
+
195
static void tcg_out_set_borrow(TCGContext *s)
196
{
197
- g_assert_not_reached();
198
+ tcg_out_insn(s, RR, CLR, TCG_REG_R0, TCG_REG_R0); /* cc = 0 */
199
}
200
201
static void tgen_xor(TCGContext *s, TCGType type,
202
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
203
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
204
break;
205
206
- case INDEX_op_add2_i32:
207
- if (const_args[4]) {
208
- tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
209
- } else {
210
- tcg_out_insn(s, RR, ALR, args[0], args[4]);
211
- }
212
- tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
213
- break;
214
- case INDEX_op_sub2_i32:
215
- if (const_args[4]) {
216
- tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
217
- } else {
218
- tcg_out_insn(s, RR, SLR, args[0], args[4]);
219
- }
220
- tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
221
- break;
222
-
223
case INDEX_op_br:
224
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
225
break;
226
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
227
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
228
break;
229
230
- case INDEX_op_add2_i64:
231
- if (const_args[4]) {
232
- if ((int64_t)args[4] >= 0) {
233
- tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
234
- } else {
235
- tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
236
- }
237
- } else {
238
- tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
239
- }
240
- tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
241
- break;
242
- case INDEX_op_sub2_i64:
243
- if (const_args[4]) {
244
- if ((int64_t)args[4] >= 0) {
245
- tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
246
- } else {
247
- tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
248
- }
249
- } else {
250
- tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
251
- }
252
- tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
253
- break;
254
-
255
case INDEX_op_mb:
256
/* The host memory model is quite strong, we simply need to
257
serialize the instruction stream. */
258
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
259
case INDEX_op_qemu_st_i128:
260
return C_O0_I3(o, m, r);
261
262
- case INDEX_op_add2_i32:
263
- case INDEX_op_sub2_i32:
264
- return C_N1_O1_I4(r, r, 0, 1, ri, r);
265
-
266
- case INDEX_op_add2_i64:
267
- case INDEX_op_sub2_i64:
268
- return C_N1_O1_I4(r, r, 0, 1, rUV, r);
269
-
270
case INDEX_op_st_vec:
271
return C_O0_I2(v, r);
272
case INDEX_op_ld_vec:
273
--
274
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/s390x/tcg-target.c.inc | 22 +++++++++++++++++++++-
5
1 file changed, 21 insertions(+), 1 deletion(-)
6
1
7
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.c.inc
10
+++ b/tcg/s390x/tcg-target.c.inc
11
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
12
RIEc_CLGIJ = 0xec7d,
13
RIEc_CLIJ = 0xec7f,
14
15
+ RIEd_ALHSIK = 0xecda,
16
+ RIEd_ALGHSIK = 0xecdb,
17
+
18
RIEf_RISBG = 0xec55,
19
20
RIEg_LOCGHI = 0xec46,
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
22
tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
23
}
24
25
+static void tcg_out_insn_RIEd(TCGContext *s, S390Opcode op,
26
+ TCGReg r1, TCGReg r3, int i2)
27
+{
28
+ tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
29
+ tcg_out16(s, i2);
30
+ tcg_out16(s, op & 0xff);
31
+}
32
+
33
static void tcg_out_insn_RIEg(TCGContext *s, S390Opcode op, TCGReg r1,
34
- int i2, int m3)
35
+ int i2, int m3)
36
{
37
tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
38
tcg_out32(s, (i2 << 16) | (op & 0xff));
39
@@ -XXX,XX +XXX,XX @@ static void tgen_addco_rrr(TCGContext *s, TCGType type,
40
static void tgen_addco_rri(TCGContext *s, TCGType type,
41
TCGReg a0, TCGReg a1, tcg_target_long a2)
42
{
43
+ if (a2 == (int16_t)a2) {
44
+ if (type == TCG_TYPE_I32) {
45
+ tcg_out_insn(s, RIEd, ALHSIK, a0, a1, a2);
46
+ } else {
47
+ tcg_out_insn(s, RIEd, ALGHSIK, a0, a1, a2);
48
+ }
49
+ return;
50
+ }
51
+
52
tcg_out_mov(s, type, a0, a1);
53
if (type == TCG_TYPE_I32) {
54
tcg_out_insn(s, RIL, ALFI, a0, a2);
55
--
56
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/sparc64/tcg-target-con-set.h | 3 +-
5
tcg/sparc64/tcg-target-has.h | 8 +-
6
tcg/sparc64/tcg-target.c.inc | 300 ++++++++++++++++++++-----------
7
3 files changed, 201 insertions(+), 110 deletions(-)
8
1
9
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/sparc64/tcg-target-con-set.h
12
+++ b/tcg/sparc64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ C_O0_I2(r, rJ)
14
C_O1_I1(r, r)
15
C_O1_I2(r, r, r)
16
C_O1_I2(r, r, rJ)
17
+C_O1_I2(r, rz, rJ)
18
+C_O1_I2(r, rz, rz)
19
C_O1_I4(r, r, rJ, rI, 0)
20
C_O2_I2(r, r, r, r)
21
-C_O2_I4(r, r, rz, rz, rJ, rJ)
22
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/sparc64/tcg-target-has.h
25
+++ b/tcg/sparc64/tcg-target-has.h
26
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
27
#endif
28
29
/* optional instructions */
30
-#define TCG_TARGET_HAS_add2_i32 1
31
-#define TCG_TARGET_HAS_sub2_i32 1
32
+#define TCG_TARGET_HAS_add2_i32 0
33
+#define TCG_TARGET_HAS_sub2_i32 0
34
#define TCG_TARGET_HAS_qemu_st8_i32 0
35
36
#define TCG_TARGET_HAS_extr_i64_i32 0
37
-#define TCG_TARGET_HAS_add2_i64 1
38
-#define TCG_TARGET_HAS_sub2_i64 1
39
+#define TCG_TARGET_HAS_add2_i64 0
40
+#define TCG_TARGET_HAS_sub2_i64 0
41
42
#define TCG_TARGET_HAS_qemu_ldst_i128 0
43
44
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/sparc64/tcg-target.c.inc
47
+++ b/tcg/sparc64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
49
#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
50
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
51
#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
52
+#define ARITH_ADDCCC (INSN_OP(2) | INSN_OP3(0x18))
53
#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
54
+#define ARITH_SUBCCC (INSN_OP(2) | INSN_OP3(0x1c))
55
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
56
#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
57
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
58
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
59
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
60
61
#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
62
+#define ARITH_ADDXCCC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x13))
63
#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
64
65
#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
66
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
67
68
#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
69
#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
70
+#define WRCCR (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(2))
71
#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
72
#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
73
#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
75
}
76
77
static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
78
-             int32_t val2, int val2const, int op)
79
+ int32_t val2, int val2const, int op)
80
{
81
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
82
| (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
84
}
85
c1 = TCG_REG_G0, c2const = 0;
86
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
87
-    break;
88
+ break;
89
90
case TCG_COND_TSTEQ:
91
case TCG_COND_TSTNE:
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
93
c1 = TCG_REG_G0;
94
c2 = TCG_REG_T1, c2const = 0;
95
cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
96
-    break;
97
+ break;
98
99
case TCG_COND_GTU:
100
case TCG_COND_LEU:
101
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
102
.out = tgen_movcond,
103
};
104
105
-static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
106
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
107
- int32_t bh, int bhconst, int opl, int oph)
108
-{
109
- TCGReg tmp = TCG_REG_T1;
110
-
111
- /* Note that the low parts are fully consumed before tmp is set. */
112
- if (rl != ah && (bhconst || rl != bh)) {
113
- tmp = rl;
114
- }
115
-
116
- tcg_out_arithc(s, tmp, al, bl, blconst, opl);
117
- tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
118
- tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
119
-}
120
-
121
-static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
122
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
123
- int32_t bh, int bhconst, bool is_sub)
124
-{
125
- TCGReg tmp = TCG_REG_T1;
126
-
127
- /* Note that the low parts are fully consumed before tmp is set. */
128
- if (rl != ah && (bhconst || rl != bh)) {
129
- tmp = rl;
130
- }
131
-
132
- tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
133
-
134
- if (use_vis3_instructions && !is_sub) {
135
- /* Note that ADDXC doesn't accept immediates. */
136
- if (bhconst && bh != 0) {
137
- tcg_out_movi_s13(s, TCG_REG_T2, bh);
138
- bh = TCG_REG_T2;
139
- }
140
- tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
141
- } else if (bh == TCG_REG_G0) {
142
-    /* If we have a zero, we can perform the operation in two insns,
143
- with the arithmetic first, and a conditional move into place. */
144
-    if (rh == ah) {
145
- tcg_out_arithi(s, TCG_REG_T2, ah, 1,
146
-             is_sub ? ARITH_SUB : ARITH_ADD);
147
- tcg_out_movcc(s, COND_CS, MOVCC_XCC, rh, TCG_REG_T2, 0);
148
-    } else {
149
- tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
150
- tcg_out_movcc(s, COND_CC, MOVCC_XCC, rh, ah, 0);
151
-    }
152
- } else {
153
- /*
154
- * Otherwise adjust BH as if there is carry into T2.
155
- * Note that constant BH is constrained to 11 bits for the MOVCC,
156
- * so the adjustment fits 12 bits.
157
- */
158
- if (bhconst) {
159
- tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
160
- } else {
161
- tcg_out_arithi(s, TCG_REG_T2, bh, 1,
162
- is_sub ? ARITH_SUB : ARITH_ADD);
163
- }
164
- /* ... smoosh T2 back to original BH if carry is clear ... */
165
- tcg_out_movcc(s, COND_CC, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
166
-    /* ... and finally perform the arithmetic with the new operand. */
167
- tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
168
- }
169
-
170
- tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
171
-}
172
-
173
static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
174
bool in_prologue, bool tail_call)
175
{
176
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
177
.out_rri = tgen_addi,
178
};
179
180
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
181
+ TCGReg a0, TCGReg a1, TCGReg a2)
182
+{
183
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCC);
184
+}
185
+
186
+static void tgen_addco_rri(TCGContext *s, TCGType type,
187
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
188
+{
189
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCC);
190
+}
191
+
192
static const TCGOutOpBinary outop_addco = {
193
- .base.static_constraint = C_NotImplemented,
194
+ .base.static_constraint = C_O1_I2(r, r, rJ),
195
+ .out_rrr = tgen_addco_rrr,
196
+ .out_rri = tgen_addco_rri,
197
};
198
199
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
200
+ TCGReg a0, TCGReg a1, TCGReg a2)
201
+{
202
+ if (type == TCG_TYPE_I32) {
203
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDC);
204
+ } else if (use_vis3_instructions) {
205
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXC);
206
+ } else {
207
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
208
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
209
+ /* Select the correct result based on actual carry value. */
210
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
211
+ }
212
+}
213
+
214
+static void tgen_addci_rri(TCGContext *s, TCGType type,
215
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
216
+{
217
+ if (type == TCG_TYPE_I32) {
218
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDC);
219
+ return;
220
+ }
221
+ /* !use_vis3_instructions */
222
+ if (a2 != 0) {
223
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
224
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
225
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
226
+ } else if (a0 == a1) {
227
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_ADD);
228
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
229
+ } else {
230
+ tcg_out_arithi(s, a0, a1, 1, ARITH_ADD);
231
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
232
+ }
233
+}
234
+
235
+static TCGConstraintSetIndex cset_addci(TCGType type, unsigned flags)
236
+{
237
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
238
+ /* Note that ADDXC doesn't accept immediates. */
239
+ return C_O1_I2(r, rz, rz);
240
+ }
241
+ return C_O1_I2(r, rz, rJ);
242
+}
243
+
244
static const TCGOutOpAddSubCarry outop_addci = {
245
- .base.static_constraint = C_NotImplemented,
246
+ .base.static_constraint = C_Dynamic,
247
+ .base.dynamic_constraint = cset_addci,
248
+ .out_rrr = tgen_addci_rrr,
249
+ .out_rri = tgen_addci_rri,
250
};
251
252
+/* Copy %xcc.c to %icc.c */
253
+static void tcg_out_dup_xcc_c(TCGContext *s)
254
+{
255
+ if (use_vis3_instructions) {
256
+ tcg_out_arith(s, TCG_REG_T1, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
257
+ } else {
258
+ tcg_out_movi_s13(s, TCG_REG_T1, 0);
259
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, TCG_REG_T1, 1, true);
260
+ }
261
+ /* Write carry-in into %icc via {0,1} + -1. */
262
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, -1, ARITH_ADDCC);
263
+}
264
+
265
+static void tgen_addcio_rrr(TCGContext *s, TCGType type,
266
+ TCGReg a0, TCGReg a1, TCGReg a2)
267
+{
268
+ if (type != TCG_TYPE_I32) {
269
+ if (use_vis3_instructions) {
270
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXCCC);
271
+ return;
272
+ }
273
+ tcg_out_dup_xcc_c(s);
274
+ }
275
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCCC);
276
+}
277
+
278
+static void tgen_addcio_rri(TCGContext *s, TCGType type,
279
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
280
+{
281
+ if (type != TCG_TYPE_I32) {
282
+ /* !use_vis3_instructions */
283
+ tcg_out_dup_xcc_c(s);
284
+ }
285
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCCC);
286
+}
287
+
288
+static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
289
+{
290
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
291
+ /* Note that ADDXCCC doesn't accept immediates. */
292
+ return C_O1_I2(r, rz, rz);
293
+ }
294
+ return C_O1_I2(r, rz, rJ);
295
+}
296
+
297
static const TCGOutOpBinary outop_addcio = {
298
- .base.static_constraint = C_NotImplemented,
299
+ .base.static_constraint = C_Dynamic,
300
+ .base.dynamic_constraint = cset_addcio,
301
+ .out_rrr = tgen_addcio_rrr,
302
+ .out_rri = tgen_addcio_rri,
303
};
304
305
static void tcg_out_set_carry(TCGContext *s)
306
{
307
- g_assert_not_reached();
308
+ /* 0x11 -> xcc = nzvC, icc = nzvC */
309
+ tcg_out_arithi(s, 0, TCG_REG_G0, 0x11, WRCCR);
310
}
311
312
static void tgen_and(TCGContext *s, TCGType type,
313
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
314
.out_rrr = tgen_sub,
315
};
316
317
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
318
+ TCGReg a0, TCGReg a1, TCGReg a2)
319
+{
320
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCC);
321
+}
322
+
323
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
324
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
325
+{
326
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCC);
327
+}
328
+
329
static const TCGOutOpAddSubCarry outop_subbo = {
330
- .base.static_constraint = C_NotImplemented,
331
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
332
+ .out_rrr = tgen_subbo_rrr,
333
+ .out_rri = tgen_subbo_rri,
334
};
335
336
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
337
+ TCGReg a0, TCGReg a1, TCGReg a2)
338
+{
339
+ /* TODO: OSA 2015 added SUBXC */
340
+ if (type == TCG_TYPE_I32) {
341
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBC);
342
+ } else {
343
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
344
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
345
+ /* Select the correct result based on actual borrow value. */
346
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
347
+ }
348
+}
349
+
350
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
351
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
352
+{
353
+ if (type == TCG_TYPE_I32) {
354
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBC);
355
+ } else if (a2 != 0) {
356
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
357
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
358
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
359
+ } else if (a0 == a1) {
360
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_SUB);
361
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
362
+ } else {
363
+ tcg_out_arithi(s, a0, a1, 1, ARITH_SUB);
364
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
365
+ }
366
+}
367
+
368
static const TCGOutOpAddSubCarry outop_subbi = {
369
- .base.static_constraint = C_NotImplemented,
370
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
371
+ .out_rrr = tgen_subbi_rrr,
372
+ .out_rri = tgen_subbi_rri,
373
};
374
375
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
376
+ TCGReg a0, TCGReg a1, TCGReg a2)
377
+{
378
+ if (type != TCG_TYPE_I32) {
379
+ /* TODO: OSA 2015 added SUBXCCC */
380
+ tcg_out_dup_xcc_c(s);
381
+ }
382
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCCC);
383
+}
384
+
385
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
386
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
387
+{
388
+ if (type != TCG_TYPE_I32) {
389
+ tcg_out_dup_xcc_c(s);
390
+ }
391
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCCC);
392
+}
393
+
394
static const TCGOutOpAddSubCarry outop_subbio = {
395
- .base.static_constraint = C_NotImplemented,
396
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
397
+ .out_rrr = tgen_subbio_rrr,
398
+ .out_rri = tgen_subbio_rri,
399
};
400
401
static void tcg_out_set_borrow(TCGContext *s)
402
{
403
- g_assert_not_reached();
404
+ tcg_out_set_carry(s); /* borrow == carry */
405
}
406
407
static void tgen_xor(TCGContext *s, TCGType type,
408
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
409
tcg_out_ldst(s, a0, a1, a2, STW);
410
break;
411
412
- case INDEX_op_add2_i32:
413
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
414
- args[4], const_args[4], args[5], const_args[5],
415
- ARITH_ADDCC, ARITH_ADDC);
416
- break;
417
- case INDEX_op_sub2_i32:
418
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
419
- args[4], const_args[4], args[5], const_args[5],
420
- ARITH_SUBCC, ARITH_SUBC);
421
- break;
422
-
423
case INDEX_op_qemu_ld_i32:
424
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
425
break;
426
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
427
tcg_out_ldst(s, a0, a1, a2, STX);
428
break;
429
430
- case INDEX_op_add2_i64:
431
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
432
- const_args[4], args[5], const_args[5], false);
433
- break;
434
- case INDEX_op_sub2_i64:
435
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
436
- const_args[4], args[5], const_args[5], true);
437
- break;
438
-
439
case INDEX_op_mb:
440
tcg_out_mb(s, a0);
441
break;
442
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
443
case INDEX_op_qemu_st_i64:
444
return C_O0_I2(rz, r);
445
446
- case INDEX_op_add2_i32:
447
- case INDEX_op_add2_i64:
448
- case INDEX_op_sub2_i32:
449
- case INDEX_op_sub2_i64:
450
- return C_O2_I4(r, r, rz, rz, rJ, rJ);
451
-
452
default:
453
return C_NotImplemented;
454
}
455
--
456
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tci/tcg-target-has.h | 8 +--
5
tcg/tci.c | 120 +++++++++++++++++------------------
6
tcg/tci/tcg-target-opc.h.inc | 1 +
7
tcg/tci/tcg-target.c.inc | 97 +++++++++++++++++-----------
8
4 files changed, 125 insertions(+), 101 deletions(-)
9
1
10
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tci/tcg-target-has.h
13
+++ b/tcg/tci/tcg-target-has.h
14
@@ -XXX,XX +XXX,XX @@
15
#define TCG_TARGET_HAS_H
16
17
#define TCG_TARGET_HAS_qemu_st8_i32 0
18
-#define TCG_TARGET_HAS_add2_i32 1
19
-#define TCG_TARGET_HAS_sub2_i32 1
20
+#define TCG_TARGET_HAS_add2_i32 0
21
+#define TCG_TARGET_HAS_sub2_i32 0
22
23
#if TCG_TARGET_REG_BITS == 64
24
#define TCG_TARGET_HAS_extr_i64_i32 0
25
-#define TCG_TARGET_HAS_add2_i64 1
26
-#define TCG_TARGET_HAS_sub2_i64 1
27
+#define TCG_TARGET_HAS_add2_i64 0
28
+#define TCG_TARGET_HAS_sub2_i64 0
29
#endif /* TCG_TARGET_REG_BITS == 64 */
30
31
#define TCG_TARGET_HAS_qemu_ldst_i128 0
32
diff --git a/tcg/tci.c b/tcg/tci.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tci.c
35
+++ b/tcg/tci.c
36
@@ -XXX,XX +XXX,XX @@ static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
37
*c5 = extract32(insn, 28, 4);
38
}
39
40
-static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
41
- TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
42
-{
43
- *r0 = extract32(insn, 8, 4);
44
- *r1 = extract32(insn, 12, 4);
45
- *r2 = extract32(insn, 16, 4);
46
- *r3 = extract32(insn, 20, 4);
47
- *r4 = extract32(insn, 24, 4);
48
- *r5 = extract32(insn, 28, 4);
49
-}
50
-
51
static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
52
{
53
bool result = false;
54
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
55
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
56
uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
57
/ sizeof(uint64_t)];
58
+ bool carry = false;
59
60
regs[TCG_AREG0] = (tcg_target_ulong)env;
61
regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
62
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
63
for (;;) {
64
uint32_t insn;
65
TCGOpcode opc;
66
- TCGReg r0, r1, r2, r3, r4, r5;
67
+ TCGReg r0, r1, r2, r3, r4;
68
tcg_target_ulong t1;
69
TCGCond condition;
70
uint8_t pos, len;
71
uint32_t tmp32;
72
uint64_t tmp64, taddr;
73
- uint64_t T1, T2;
74
MemOpIdx oi;
75
int32_t ofs;
76
void *ptr;
77
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
78
#if TCG_TARGET_REG_BITS == 32
79
case INDEX_op_setcond2_i32:
80
tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
81
- T1 = tci_uint64(regs[r2], regs[r1]);
82
- T2 = tci_uint64(regs[r4], regs[r3]);
83
- regs[r0] = tci_compare64(T1, T2, condition);
84
+ regs[r0] = tci_compare64(tci_uint64(regs[r2], regs[r1]),
85
+ tci_uint64(regs[r4], regs[r3]),
86
+ condition);
87
break;
88
#elif TCG_TARGET_REG_BITS == 64
89
case INDEX_op_setcond:
90
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
91
tci_args_rl(insn, tb_ptr, &r0, &ptr);
92
regs[r0] = *(tcg_target_ulong *)ptr;
93
break;
94
+ case INDEX_op_tci_setcarry:
95
+ carry = true;
96
+ break;
97
98
/* Load/store operations (32 bit). */
99
100
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
101
tci_args_rr(insn, &r0, &r1);
102
regs[r0] = ctpop_tr(regs[r1]);
103
break;
104
+ case INDEX_op_addco:
105
+ tci_args_rrr(insn, &r0, &r1, &r2);
106
+ t1 = regs[r1] + regs[r2];
107
+ carry = t1 < regs[r1];
108
+ regs[r0] = t1;
109
+ break;
110
+ case INDEX_op_addci:
111
+ tci_args_rrr(insn, &r0, &r1, &r2);
112
+ regs[r0] = regs[r1] + regs[r2] + carry;
113
+ break;
114
+ case INDEX_op_addcio:
115
+ tci_args_rrr(insn, &r0, &r1, &r2);
116
+ if (carry) {
117
+ t1 = regs[r1] + regs[r2] + 1;
118
+ carry = t1 <= regs[r1];
119
+ } else {
120
+ t1 = regs[r1] + regs[r2];
121
+ carry = t1 < regs[r1];
122
+ }
123
+ regs[r0] = t1;
124
+ break;
125
+ case INDEX_op_subbo:
126
+ tci_args_rrr(insn, &r0, &r1, &r2);
127
+ carry = regs[r1] < regs[r2];
128
+ regs[r0] = regs[r1] - regs[r2];
129
+ break;
130
+ case INDEX_op_subbi:
131
+ tci_args_rrr(insn, &r0, &r1, &r2);
132
+ regs[r0] = regs[r1] - regs[r2] - carry;
133
+ break;
134
+ case INDEX_op_subbio:
135
+ tci_args_rrr(insn, &r0, &r1, &r2);
136
+ if (carry) {
137
+ carry = regs[r1] <= regs[r2];
138
+ regs[r0] = regs[r1] - regs[r2] - 1;
139
+ } else {
140
+ carry = regs[r1] < regs[r2];
141
+ regs[r0] = regs[r1] - regs[r2];
142
+ }
143
+ break;
144
case INDEX_op_muls2:
145
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
146
#if TCG_TARGET_REG_BITS == 32
147
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
148
tb_ptr = ptr;
149
}
150
break;
151
-#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
152
- case INDEX_op_add2_i32:
153
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
154
- T1 = tci_uint64(regs[r3], regs[r2]);
155
- T2 = tci_uint64(regs[r5], regs[r4]);
156
- tci_write_reg64(regs, r1, r0, T1 + T2);
157
- break;
158
-#endif
159
-#if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
160
- case INDEX_op_sub2_i32:
161
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
162
- T1 = tci_uint64(regs[r3], regs[r2]);
163
- T2 = tci_uint64(regs[r5], regs[r4]);
164
- tci_write_reg64(regs, r1, r0, T1 - T2);
165
- break;
166
-#endif
167
case INDEX_op_bswap16:
168
tci_args_rr(insn, &r0, &r1);
169
regs[r0] = bswap16(regs[r1]);
170
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
171
tci_args_rrr(insn, &r0, &r1, &r2);
172
regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
173
break;
174
-#if TCG_TARGET_HAS_add2_i64
175
- case INDEX_op_add2_i64:
176
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
177
- T1 = regs[r2] + regs[r4];
178
- T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
179
- regs[r0] = T1;
180
- regs[r1] = T2;
181
- break;
182
-#endif
183
-#if TCG_TARGET_HAS_add2_i64
184
- case INDEX_op_sub2_i64:
185
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
186
- T1 = regs[r2] - regs[r4];
187
- T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
188
- regs[r0] = T1;
189
- regs[r1] = T2;
190
- break;
191
-#endif
192
193
/* Shift/rotate operations (64 bit). */
194
195
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
196
const char *op_name;
197
uint32_t insn;
198
TCGOpcode op;
199
- TCGReg r0, r1, r2, r3, r4, r5;
200
+ TCGReg r0, r1, r2, r3, r4;
201
tcg_target_ulong i1;
202
int32_t s2;
203
TCGCond c;
204
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
205
op_name, str_r(r0), ptr);
206
break;
207
208
+ case INDEX_op_tci_setcarry:
209
+ info->fprintf_func(info->stream, "%-12s", op_name);
210
+ break;
211
+
212
case INDEX_op_ld8u_i32:
213
case INDEX_op_ld8u_i64:
214
case INDEX_op_ld8s_i32:
215
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
216
break;
217
218
case INDEX_op_add:
219
+ case INDEX_op_addci:
220
+ case INDEX_op_addcio:
221
+ case INDEX_op_addco:
222
case INDEX_op_and:
223
case INDEX_op_andc:
224
case INDEX_op_clz:
225
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
226
case INDEX_op_shl:
227
case INDEX_op_shr:
228
case INDEX_op_sub:
229
+ case INDEX_op_subbi:
230
+ case INDEX_op_subbio:
231
+ case INDEX_op_subbo:
232
case INDEX_op_xor:
233
case INDEX_op_tci_ctz32:
234
case INDEX_op_tci_clz32:
235
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
236
str_r(r2), str_r(r3));
237
break;
238
239
- case INDEX_op_add2_i32:
240
- case INDEX_op_add2_i64:
241
- case INDEX_op_sub2_i32:
242
- case INDEX_op_sub2_i64:
243
- tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
244
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
245
- op_name, str_r(r0), str_r(r1), str_r(r2),
246
- str_r(r3), str_r(r4), str_r(r5));
247
- break;
248
-
249
case INDEX_op_qemu_ld_i64:
250
case INDEX_op_qemu_st_i64:
251
if (TCG_TARGET_REG_BITS == 32) {
252
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
253
index XXXXXXX..XXXXXXX 100644
254
--- a/tcg/tci/tcg-target-opc.h.inc
255
+++ b/tcg/tci/tcg-target-opc.h.inc
256
@@ -XXX,XX +XXX,XX @@
257
/* These opcodes for use between the tci generator and interpreter. */
258
DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
259
DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
260
+DEF(tci_setcarry, 0, 0, 0, TCG_OPF_NOT_PRESENT)
261
DEF(tci_clz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
262
DEF(tci_ctz32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
263
DEF(tci_divs32, 1, 2, 0, TCG_OPF_NOT_PRESENT)
264
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/tcg/tci/tcg-target.c.inc
267
+++ b/tcg/tci/tcg-target.c.inc
268
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
269
case INDEX_op_st_i64:
270
return C_O0_I2(r, r);
271
272
- case INDEX_op_add2_i32:
273
- case INDEX_op_add2_i64:
274
- case INDEX_op_sub2_i32:
275
- case INDEX_op_sub2_i64:
276
- return C_O2_I4(r, r, r, r, r, r);
277
-
278
case INDEX_op_qemu_ld_i32:
279
return C_O1_I1(r, r);
280
case INDEX_op_qemu_ld_i64:
281
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
282
tcg_out32(s, insn);
283
}
284
285
-static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
286
- TCGReg r0, TCGReg r1, TCGReg r2,
287
- TCGReg r3, TCGReg r4, TCGReg r5)
288
-{
289
- tcg_insn_unit insn = 0;
290
-
291
- insn = deposit32(insn, 0, 8, op);
292
- insn = deposit32(insn, 8, 4, r0);
293
- insn = deposit32(insn, 12, 4, r1);
294
- insn = deposit32(insn, 16, 4, r2);
295
- insn = deposit32(insn, 20, 4, r3);
296
- insn = deposit32(insn, 24, 4, r4);
297
- insn = deposit32(insn, 28, 4, r5);
298
- tcg_out32(s, insn);
299
-}
300
-
301
static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
302
TCGReg base, intptr_t offset)
303
{
304
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBinary outop_add = {
305
.out_rrr = tgen_add,
306
};
307
308
+static TCGConstraintSetIndex cset_addsubcarry(TCGType type, unsigned flags)
309
+{
310
+ return type == TCG_TYPE_REG ? C_O1_I2(r, r, r) : C_NotImplemented;
311
+}
312
+
313
+static void tgen_addco(TCGContext *s, TCGType type,
314
+ TCGReg a0, TCGReg a1, TCGReg a2)
315
+{
316
+ tcg_out_op_rrr(s, INDEX_op_addco, a0, a1, a2);
317
+}
318
+
319
static const TCGOutOpBinary outop_addco = {
320
- .base.static_constraint = C_NotImplemented,
321
+ .base.static_constraint = C_Dynamic,
322
+ .base.dynamic_constraint = cset_addsubcarry,
323
+ .out_rrr = tgen_addco,
324
};
325
326
+static void tgen_addci(TCGContext *s, TCGType type,
327
+ TCGReg a0, TCGReg a1, TCGReg a2)
328
+{
329
+ tcg_out_op_rrr(s, INDEX_op_addci, a0, a1, a2);
330
+}
331
+
332
static const TCGOutOpAddSubCarry outop_addci = {
333
- .base.static_constraint = C_NotImplemented,
334
+ .base.static_constraint = C_Dynamic,
335
+ .base.dynamic_constraint = cset_addsubcarry,
336
+ .out_rrr = tgen_addci,
337
};
338
339
+static void tgen_addcio(TCGContext *s, TCGType type,
340
+ TCGReg a0, TCGReg a1, TCGReg a2)
341
+{
342
+ tcg_out_op_rrr(s, INDEX_op_addcio, a0, a1, a2);
343
+}
344
+
345
static const TCGOutOpBinary outop_addcio = {
346
- .base.static_constraint = C_NotImplemented,
347
+ .base.static_constraint = C_Dynamic,
348
+ .base.dynamic_constraint = cset_addsubcarry,
349
+ .out_rrr = tgen_addcio,
350
};
351
352
static void tcg_out_set_carry(TCGContext *s)
353
{
354
- g_assert_not_reached();
355
+ tcg_out_op_v(s, INDEX_op_tci_setcarry);
356
}
357
358
static void tgen_and(TCGContext *s, TCGType type,
359
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSubtract outop_sub = {
360
.out_rrr = tgen_sub,
361
};
362
363
+static void tgen_subbo(TCGContext *s, TCGType type,
364
+ TCGReg a0, TCGReg a1, TCGReg a2)
365
+{
366
+ tcg_out_op_rrr(s, INDEX_op_subbo, a0, a1, a2);
367
+}
368
+
369
static const TCGOutOpAddSubCarry outop_subbo = {
370
- .base.static_constraint = C_NotImplemented,
371
+ .base.static_constraint = C_Dynamic,
372
+ .base.dynamic_constraint = cset_addsubcarry,
373
+ .out_rrr = tgen_subbo,
374
};
375
376
+static void tgen_subbi(TCGContext *s, TCGType type,
377
+ TCGReg a0, TCGReg a1, TCGReg a2)
378
+{
379
+ tcg_out_op_rrr(s, INDEX_op_subbi, a0, a1, a2);
380
+}
381
+
382
static const TCGOutOpAddSubCarry outop_subbi = {
383
- .base.static_constraint = C_NotImplemented,
384
+ .base.static_constraint = C_Dynamic,
385
+ .base.dynamic_constraint = cset_addsubcarry,
386
+ .out_rrr = tgen_subbi,
387
};
388
389
+static void tgen_subbio(TCGContext *s, TCGType type,
390
+ TCGReg a0, TCGReg a1, TCGReg a2)
391
+{
392
+ tcg_out_op_rrr(s, INDEX_op_subbio, a0, a1, a2);
393
+}
394
+
395
static const TCGOutOpAddSubCarry outop_subbio = {
396
- .base.static_constraint = C_NotImplemented,
397
+ .base.static_constraint = C_Dynamic,
398
+ .base.dynamic_constraint = cset_addsubcarry,
399
+ .out_rrr = tgen_subbio,
400
};
401
402
static void tcg_out_set_borrow(TCGContext *s)
403
{
404
- g_assert_not_reached();
405
+ tcg_out_op_v(s, INDEX_op_tci_setcarry); /* borrow == carry */
406
}
407
408
static void tgen_xor(TCGContext *s, TCGType type,
409
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
410
tcg_out_ldst(s, opc, args[0], args[1], args[2]);
411
break;
412
413
- CASE_32_64(add2)
414
- CASE_32_64(sub2)
415
- tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
416
- args[3], args[4], args[5]);
417
- break;
418
-
419
case INDEX_op_qemu_ld_i64:
420
case INDEX_op_qemu_st_i64:
421
if (TCG_TARGET_REG_BITS == 32) {
422
--
423
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Split these functions out from tcg_out_op.
2
Call it directly from tcg_gen_code.
3
1
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 4 ++++
8
tcg/aarch64/tcg-target.c.inc | 6 +-----
9
tcg/arm/tcg-target.c.inc | 8 +++++---
10
tcg/i386/tcg-target.c.inc | 8 +++++---
11
tcg/loongarch64/tcg-target.c.inc | 12 ++++++------
12
tcg/mips/tcg-target.c.inc | 10 +++++-----
13
tcg/ppc/tcg-target.c.inc | 26 ++++++++++++--------------
14
tcg/riscv/tcg-target.c.inc | 11 ++++++-----
15
tcg/s390x/tcg-target.c.inc | 9 +++++----
16
tcg/sparc64/tcg-target.c.inc | 10 ++++++----
17
tcg/tci/tcg-target.c.inc | 9 +++++----
18
11 files changed, 60 insertions(+), 53 deletions(-)
19
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
25
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
26
static void tcg_out_goto_tb(TCGContext *s, int which);
27
static void tcg_out_mb(TCGContext *s, unsigned bar);
28
+static void tcg_out_br(TCGContext *s, TCGLabel *l);
29
static void tcg_out_set_carry(TCGContext *s);
30
static void tcg_out_set_borrow(TCGContext *s);
31
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
32
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
33
case INDEX_op_goto_tb:
34
tcg_out_goto_tb(s, op->args[0]);
35
break;
36
+ case INDEX_op_br:
37
+ tcg_out_br(s, arg_label(op->args[0]));
38
+ break;
39
case INDEX_op_mb:
40
tcg_out_mb(s, op->args[0]);
41
break;
42
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/aarch64/tcg-target.c.inc
45
+++ b/tcg/aarch64/tcg-target.c.inc
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
47
tcg_out_call_int(s, target);
48
}
49
50
-static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
51
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
52
{
53
if (!l->has_value) {
54
tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
56
tcg_out_insn(s, 3207, BR, a0);
57
break;
58
59
- case INDEX_op_br:
60
- tcg_out_goto_label(s, arg_label(a0));
61
- break;
62
-
63
case INDEX_op_ld8u_i32:
64
case INDEX_op_ld8u_i64:
65
tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
66
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
67
index XXXXXXX..XXXXXXX 100644
68
--- a/tcg/arm/tcg-target.c.inc
69
+++ b/tcg/arm/tcg-target.c.inc
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
71
}
72
}
73
74
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
75
+{
76
+ tcg_out_goto_label(s, COND_AL, l);
77
+}
78
+
79
static void tcg_out_mb(TCGContext *s, unsigned a0)
80
{
81
if (use_armv7_instructions) {
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
83
case INDEX_op_goto_ptr:
84
tcg_out_b_reg(s, COND_AL, args[0]);
85
break;
86
- case INDEX_op_br:
87
- tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
88
- break;
89
90
case INDEX_op_ld8u_i32:
91
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
92
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/i386/tcg-target.c.inc
95
+++ b/tcg/i386/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
97
}
98
}
99
100
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
101
+{
102
+ tcg_out_jxx(s, JCC_JMP, l, 0);
103
+}
104
+
105
static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
106
TCGArg arg2, int const_arg2, int rexw)
107
{
108
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
109
/* jmp to the given host address (could be epilogue) */
110
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
111
break;
112
- case INDEX_op_br:
113
- tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
114
- break;
115
OP_32_64(ld8u):
116
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
117
tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
118
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/loongarch64/tcg-target.c.inc
121
+++ b/tcg/loongarch64/tcg-target.c.inc
122
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpMovcond outop_movcond = {
123
* Branch helpers
124
*/
125
126
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
127
+{
128
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, l, 0);
129
+ tcg_out_opc_b(s, 0);
130
+}
131
+
132
static const struct {
133
LoongArchInsn op;
134
bool swap;
135
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
136
tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
137
break;
138
139
- case INDEX_op_br:
140
- tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
141
- 0);
142
- tcg_out_opc_b(s, 0);
143
- break;
144
-
145
case INDEX_op_ld8s_i32:
146
case INDEX_op_ld8s_i64:
147
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
148
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
149
index XXXXXXX..XXXXXXX 100644
150
--- a/tcg/mips/tcg-target.c.inc
151
+++ b/tcg/mips/tcg-target.c.inc
152
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpBrcond outop_brcond = {
153
.out_rr = tgen_brcond,
154
};
155
156
+void tcg_out_br(TCGContext *s, TCGLabel *l)
157
+{
158
+ tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, l);
159
+}
160
+
161
static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret,
162
TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
163
{
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
165
tcg_out_nop(s);
166
}
167
break;
168
- case INDEX_op_br:
169
- tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ,
170
- TCG_REG_ZERO, TCG_REG_ZERO, arg_label(a0));
171
- break;
172
-
173
case INDEX_op_ld8u_i32:
174
case INDEX_op_ld8u_i64:
175
i1 = OPC_LBU;
176
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/ppc/tcg-target.c.inc
179
+++ b/tcg/ppc/tcg-target.c.inc
180
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpSetcond outop_negsetcond = {
181
.out_rri = tgen_negsetcondi,
182
};
183
184
+void tcg_out_br(TCGContext *s, TCGLabel *l)
185
+{
186
+ uint32_t insn = B;
187
+
188
+ if (l->has_value) {
189
+ insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
190
+ } else {
191
+ tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
192
+ }
193
+ tcg_out32(s, insn);
194
+}
195
+
196
static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
197
{
198
tcg_out32(s, tcg_to_bc[cond] | bd);
199
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
200
tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
201
tcg_out32(s, BCCTR | BO_ALWAYS);
202
break;
203
- case INDEX_op_br:
204
- {
205
- TCGLabel *l = arg_label(args[0]);
206
- uint32_t insn = B;
207
-
208
- if (l->has_value) {
209
- insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
210
- l->u.value_ptr);
211
- } else {
212
- tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
213
- }
214
- tcg_out32(s, insn);
215
- }
216
- break;
217
case INDEX_op_ld8u_i32:
218
case INDEX_op_ld8u_i64:
219
tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
220
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
221
index XXXXXXX..XXXXXXX 100644
222
--- a/tcg/riscv/tcg-target.c.inc
223
+++ b/tcg/riscv/tcg-target.c.inc
224
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
225
tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
226
}
227
228
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
229
+{
230
+ tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, l, 0);
231
+ tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
232
+}
233
+
234
static const struct {
235
RISCVInsn op;
236
bool swap;
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
238
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
239
break;
240
241
- case INDEX_op_br:
242
- tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
243
- tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
244
- break;
245
-
246
case INDEX_op_ld8u_i32:
247
case INDEX_op_ld8u_i64:
248
tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
249
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
250
index XXXXXXX..XXXXXXX 100644
251
--- a/tcg/s390x/tcg-target.c.inc
252
+++ b/tcg/s390x/tcg-target.c.inc
253
@@ -XXX,XX +XXX,XX @@ static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
254
}
255
}
256
257
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
258
+{
259
+ tgen_branch(s, S390_CC_ALWAYS, l);
260
+}
261
+
262
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
263
TCGReg r1, TCGReg r2, TCGLabel *l)
264
{
265
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
266
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
267
break;
268
269
- case INDEX_op_br:
270
- tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
271
- break;
272
-
273
case INDEX_op_qemu_ld_i32:
274
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
275
break;
276
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tcg/sparc64/tcg-target.c.inc
279
+++ b/tcg/sparc64/tcg-target.c.inc
280
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
281
tcg_out_bpcc0(s, scond, flags, off19);
282
}
283
284
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
285
+{
286
+ tcg_out_bpcc(s, COND_A, BPCC_PT, l);
287
+ tcg_out_nop(s);
288
+}
289
+
290
static void tcg_out_cmp(TCGContext *s, TCGCond cond,
291
TCGReg c1, int32_t c2, int c2const)
292
{
293
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
294
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
295
tcg_out_mov_delay(s, TCG_REG_TB, a0);
296
break;
297
- case INDEX_op_br:
298
- tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
299
- tcg_out_nop(s);
300
- break;
301
302
#define OP_32_64(x) \
303
glue(glue(case INDEX_op_, x), _i32): \
304
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
305
index XXXXXXX..XXXXXXX 100644
306
--- a/tcg/tci/tcg-target.c.inc
307
+++ b/tcg/tci/tcg-target.c.inc
308
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, unsigned a0)
309
tcg_out_op_v(s, INDEX_op_mb);
310
}
311
312
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
313
+{
314
+ tcg_out_op_l(s, INDEX_op_br, l);
315
+}
316
+
317
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
318
const TCGArg args[TCG_MAX_OP_ARGS],
319
const int const_args[TCG_MAX_OP_ARGS])
320
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
321
tcg_out_op_r(s, opc, args[0]);
322
break;
323
324
- case INDEX_op_br:
325
- tcg_out_op_l(s, opc, arg_label(args[0]));
326
- break;
327
-
328
CASE_32_64(ld8u)
329
CASE_32_64(ld8s)
330
CASE_32_64(ld16u)
331
--
332
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
From: "Emilio G. Cota" <cota@braap.org>
2
3
Updates can come from other threads, so readers that do not
4
take tlb_lock must use atomic_read to avoid undefined
5
behaviour (UB).
6
7
This completes the conversion to tlb_lock. This conversion results
8
on average in no performance loss, as the following experiments
9
(run on an Intel i7-6700K CPU @ 4.00GHz) show.
10
11
1. aarch64 bootup+shutdown test:
12
13
- Before:
14
Performance counter stats for 'taskset -c 0 ../img/aarch64/die.sh' (10 runs):
15
16
7487.087786 task-clock (msec) # 0.998 CPUs utilized ( +- 0.12% )
17
31,574,905,303 cycles # 4.217 GHz ( +- 0.12% )
18
57,097,908,812 instructions # 1.81 insns per cycle ( +- 0.08% )
19
10,255,415,367 branches # 1369.747 M/sec ( +- 0.08% )
20
173,278,962 branch-misses # 1.69% of all branches ( +- 0.18% )
21
22
7.504481349 seconds time elapsed ( +- 0.14% )
23
24
- After:
25
Performance counter stats for 'taskset -c 0 ../img/aarch64/die.sh' (10 runs):
26
27
7462.441328 task-clock (msec) # 0.998 CPUs utilized ( +- 0.07% )
28
31,478,476,520 cycles # 4.218 GHz ( +- 0.07% )
29
57,017,330,084 instructions # 1.81 insns per cycle ( +- 0.05% )
30
10,251,929,667 branches # 1373.804 M/sec ( +- 0.05% )
31
173,023,787 branch-misses # 1.69% of all branches ( +- 0.11% )
32
33
7.474970463 seconds time elapsed ( +- 0.07% )
34
35
2. SPEC06int:
36
SPEC06int (test set)
37
[Y axis: Speedup over master]
38
1.15 +-+----+------+------+------+------+------+-------+------+------+------+------+------+------+----+-+
39
| |
40
1.1 +-+.................................+++.............................+ tlb-lock-v2 (m+++x) +-+
41
| +++ | +++ tlb-lock-v3 (spinl|ck) |
42
| +++ | | +++ +++ | | |
43
1.05 +-+....+++...........####.........|####.+++.|......|.....###....+++...........+++....###.........+-+
44
| ### ++#| # |# |# ***### +++### +++#+# | +++ | #|# ### |
45
1 +-+++***+#++++####+++#++#++++++++++#++#+*+*++#++++#+#+****+#++++###++++###++++###++++#+#++++#+#+++-+
46
| *+* # #++# *** # #### *** # * *++# ****+# *| * # ****|# |# # #|# #+# # # |
47
0.95 +-+..*.*.#....#..#.*|*..#...#..#.*|*..#.*.*..#.*|.*.#.*++*.#.*++*+#.****.#....#+#....#.#..++#.#..+-+
48
| * * # # # *|* # # # *|* # * * # *++* # * * # * * # * |* # ++# # # # *** # |
49
| * * # ++# # *+* # # # *|* # * * # * * # * * # * * # *++* # **** # ++# # * * # |
50
0.9 +-+..*.*.#...|#..#.*.*..#.++#..#.*|*..#.*.*..#.*..*.#.*..*.#.*..*.#.*..*.#.*.|*.#...|#.#..*.*.#..+-+
51
| * * # *** # * * # |# # *+* # * * # * * # * * # * * # * * # *++* # |# # * * # |
52
0.85 +-+..*.*.#..*|*..#.*.*..#.***..#.*.*..#.*.*..#.*..*.#.*..*.#.*..*.#.*..*.#.*..*.#.****.#..*.*.#..+-+
53
| * * # *+* # * * # *|* # * * # * * # * * # * * # * * # * * # * * # * |* # * * # |
54
| * * # * * # * * # *+* # * * # * * # * * # * * # * * # * * # * * # * |* # * * # |
55
0.8 +-+..*.*.#..*.*..#.*.*..#.*.*..#.*.*..#.*.*..#.*..*.#.*..*.#.*..*.#.*..*.#.*..*.#.*++*.#..*.*.#..+-+
56
| * * # * * # * * # * * # * * # * * # * * # * * # * * # * * # * * # * * # * * # |
57
0.75 +-+--***##--***###-***###-***###-***###-***###-****##-****##-****##-****##-****##-****##--***##--+-+
58
400.perlben401.bzip2403.gcc429.m445.gob456.hmme45462.libqua464.h26471.omnet473483.xalancbmkgeomean
59
60
png: https://imgur.com/a/BHzpPTW
61
62
Notes:
63
- tlb-lock-v2 corresponds to an implementation with a mutex.
64
- tlb-lock-v3 corresponds to the current implementation, i.e.
65
a spinlock and a single lock acquisition in tlb_set_page_with_attrs.
66
67
Signed-off-by: Emilio G. Cota <cota@braap.org>
68
Message-Id: <20181016153840.25877-1-cota@braap.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
69
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
70
---
4
tcg/tcg.c | 46 +++++++++++
71
accel/tcg/softmmu_template.h | 12 ++++++------
5
tcg/aarch64/tcg-target.c.inc | 113 ++++++++++++++++-----------
72
include/exec/cpu_ldst.h | 11 ++++++++++-
6
tcg/arm/tcg-target.c.inc | 126 ++++++++++++++++---------------
73
include/exec/cpu_ldst_template.h | 2 +-
7
tcg/i386/tcg-target.c.inc | 112 ++++++++++++++++-----------
74
accel/tcg/cputlb.c | 19 +++++++++++++------
8
tcg/loongarch64/tcg-target.c.inc | 104 +++++++++++++++----------
75
4 files changed, 30 insertions(+), 14 deletions(-)
9
tcg/mips/tcg-target.c.inc | 108 ++++++++++++++++----------
76
10
tcg/ppc/tcg-target.c.inc | 110 +++++++++++++++++----------
77
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
11
tcg/riscv/tcg-target.c.inc | 107 ++++++++++++++++----------
78
index XXXXXXX..XXXXXXX 100644
12
tcg/s390x/tcg-target.c.inc | 122 +++++++++++++++++-------------
79
--- a/accel/tcg/softmmu_template.h
13
tcg/sparc64/tcg-target.c.inc | 101 ++++++++++++++++---------
80
+++ b/accel/tcg/softmmu_template.h
14
tcg/tci/tcg-target.c.inc | 91 ++++++++++++++++------
81
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
15
11 files changed, 721 insertions(+), 419 deletions(-)
82
uintptr_t mmu_idx = get_mmuidx(oi);
16
83
uintptr_t index = tlb_index(env, mmu_idx, addr);
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
84
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
18
index XXXXXXX..XXXXXXX 100644
85
- target_ulong tlb_addr = entry->addr_write;
19
--- a/tcg/tcg.c
86
+ target_ulong tlb_addr = tlb_addr_write(entry);
20
+++ b/tcg/tcg.c
87
unsigned a_bits = get_alignment_bits(get_memop(oi));
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpExtract2 {
88
uintptr_t haddr;
22
TCGReg a2, unsigned shr);
89
23
} TCGOutOpExtract2;
90
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
24
91
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
25
+typedef struct TCGOutOpLoad {
92
mmu_idx, retaddr);
26
+ TCGOutOp base;
27
+ void (*out)(TCGContext *s, TCGType type, TCGReg dest,
28
+ TCGReg base, intptr_t offset);
29
+} TCGOutOpLoad;
30
+
31
typedef struct TCGOutOpMovcond {
32
TCGOutOp base;
33
void (*out)(TCGContext *s, TCGType type, TCGCond cond,
34
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp outop_goto_ptr = {
35
.static_constraint = C_O0_I1(r),
36
};
37
38
+static const TCGOutOpLoad outop_ld = {
39
+ .base.static_constraint = C_O1_I1(r, r),
40
+ .out = tcg_out_ld,
41
+};
42
+
43
/*
44
* Register V as the TCGOutOp for O.
45
* This verifies that V is of type T, otherwise give a nice compiler error.
46
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
47
OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
48
OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
49
OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
50
+ OUTOP(INDEX_op_ld8u_i32, TCGOutOpLoad, outop_ld8u),
51
+ OUTOP(INDEX_op_ld8u_i64, TCGOutOpLoad, outop_ld8u),
52
+ OUTOP(INDEX_op_ld8s_i32, TCGOutOpLoad, outop_ld8s),
53
+ OUTOP(INDEX_op_ld8s_i64, TCGOutOpLoad, outop_ld8s),
54
+ OUTOP(INDEX_op_ld16u_i32, TCGOutOpLoad, outop_ld16u),
55
+ OUTOP(INDEX_op_ld16u_i64, TCGOutOpLoad, outop_ld16u),
56
+ OUTOP(INDEX_op_ld16s_i32, TCGOutOpLoad, outop_ld16s),
57
+ OUTOP(INDEX_op_ld16s_i64, TCGOutOpLoad, outop_ld16s),
58
+ OUTOP(INDEX_op_ld_i32, TCGOutOpLoad, outop_ld),
59
+ OUTOP(INDEX_op_ld_i64, TCGOutOpLoad, outop_ld),
60
OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
61
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
62
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
63
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
64
OUTOP(INDEX_op_extu_i32_i64, TCGOutOpUnary, outop_extu_i32_i64),
65
OUTOP(INDEX_op_extrl_i64_i32, TCGOutOpUnary, outop_extrl_i64_i32),
66
OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
67
+ OUTOP(INDEX_op_ld32u_i64, TCGOutOpLoad, outop_ld32u),
68
+ OUTOP(INDEX_op_ld32s_i64, TCGOutOpLoad, outop_ld32s),
69
#endif
70
};
71
72
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
73
}
93
}
74
break;
94
- tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
75
95
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
76
+ case INDEX_op_ld32u_i64:
96
}
77
+ case INDEX_op_ld32s_i64:
97
78
+ tcg_debug_assert(type == TCG_TYPE_I64);
98
/* Handle an IO access. */
79
+ /* fall through */
99
@@ -XXX,XX +XXX,XX @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
80
+ case INDEX_op_ld8u_i32:
100
cannot evict the first. */
81
+ case INDEX_op_ld8u_i64:
101
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
82
+ case INDEX_op_ld8s_i32:
102
entry2 = tlb_entry(env, mmu_idx, page2);
83
+ case INDEX_op_ld8s_i64:
103
- if (!tlb_hit_page(entry2->addr_write, page2)
84
+ case INDEX_op_ld16u_i32:
104
+ if (!tlb_hit_page(tlb_addr_write(entry2), page2)
85
+ case INDEX_op_ld16u_i64:
105
&& !VICTIM_TLB_HIT(addr_write, page2)) {
86
+ case INDEX_op_ld16s_i32:
106
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
87
+ case INDEX_op_ld16s_i64:
107
mmu_idx, retaddr);
88
+ case INDEX_op_ld_i32:
108
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
89
+ case INDEX_op_ld_i64:
109
uintptr_t mmu_idx = get_mmuidx(oi);
90
+ {
110
uintptr_t index = tlb_index(env, mmu_idx, addr);
91
+ const TCGOutOpLoad *out =
111
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
92
+ container_of(all_outop[op->opc], TCGOutOpLoad, base);
112
- target_ulong tlb_addr = entry->addr_write;
93
+
113
+ target_ulong tlb_addr = tlb_addr_write(entry);
94
+ tcg_debug_assert(!const_args[1]);
114
unsigned a_bits = get_alignment_bits(get_memop(oi));
95
+ out->out(s, type, new_args[0], new_args[1], new_args[2]);
115
uintptr_t haddr;
96
+ }
116
97
+ break;
117
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
98
+
118
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
99
case INDEX_op_muls2:
119
mmu_idx, retaddr);
100
case INDEX_op_mulu2:
120
}
101
{
121
- tlb_addr = entry->addr_write & ~TLB_INVALID_MASK;
102
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
122
+ tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
103
index XXXXXXX..XXXXXXX 100644
123
}
104
--- a/tcg/aarch64/tcg-target.c.inc
124
105
+++ b/tcg/aarch64/tcg-target.c.inc
125
/* Handle an IO access. */
106
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
126
@@ -XXX,XX +XXX,XX @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
107
.out_rrr = tgen_extract2,
127
cannot evict the first. */
108
};
128
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
109
129
entry2 = tlb_entry(env, mmu_idx, page2);
110
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
130
- if (!tlb_hit_page(entry2->addr_write, page2)
111
+ TCGReg base, ptrdiff_t offset)
131
+ if (!tlb_hit_page(tlb_addr_write(entry2), page2)
132
&& !VICTIM_TLB_HIT(addr_write, page2)) {
133
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
134
mmu_idx, retaddr);
135
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/include/exec/cpu_ldst.h
138
+++ b/include/exec/cpu_ldst.h
139
@@ -XXX,XX +XXX,XX @@ extern __thread uintptr_t helper_retaddr;
140
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
141
#include "tcg.h"
142
143
+static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
112
+{
144
+{
113
+ tcg_out_ldst(s, I3312_LDRB, dest, base, offset, 0);
145
+#if TCG_OVERSIZED_GUEST
146
+ return entry->addr_write;
147
+#else
148
+ return atomic_read(&entry->addr_write);
149
+#endif
114
+}
150
+}
115
+
151
+
116
+static const TCGOutOpLoad outop_ld8u = {
152
/* Find the TLB index corresponding to the mmu_idx + address pair. */
117
+ .base.static_constraint = C_O1_I1(r, r),
153
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
118
+ .out = tgen_ld8u,
154
target_ulong addr)
119
+};
155
@@ -XXX,XX +XXX,XX @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
156
tlb_addr = tlbentry->addr_read;
157
break;
158
case 1:
159
- tlb_addr = tlbentry->addr_write;
160
+ tlb_addr = tlb_addr_write(tlbentry);
161
break;
162
case 2:
163
tlb_addr = tlbentry->addr_code;
164
diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/include/exec/cpu_ldst_template.h
167
+++ b/include/exec/cpu_ldst_template.h
168
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
169
addr = ptr;
170
mmu_idx = CPU_MMU_INDEX;
171
entry = tlb_entry(env, mmu_idx, addr);
172
- if (unlikely(entry->addr_write !=
173
+ if (unlikely(tlb_addr_write(entry) !=
174
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
175
oi = make_memop_idx(SHIFT, mmu_idx);
176
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
177
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
178
index XXXXXXX..XXXXXXX 100644
179
--- a/accel/tcg/cputlb.c
180
+++ b/accel/tcg/cputlb.c
181
@@ -XXX,XX +XXX,XX @@ static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
182
target_ulong page)
183
{
184
return tlb_hit_page(tlb_entry->addr_read, page) ||
185
- tlb_hit_page(tlb_entry->addr_write, page) ||
186
+ tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
187
tlb_hit_page(tlb_entry->addr_code, page);
188
}
189
190
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
191
tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
192
193
entry = tlb_entry(env, mmu_idx, addr);
194
- tlb_addr = entry->addr_write;
195
+ tlb_addr = tlb_addr_write(entry);
196
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
197
/* RAM access */
198
uintptr_t haddr = addr + entry->addend;
199
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
200
assert_cpu_is_self(ENV_GET_CPU(env));
201
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
202
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
203
- target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
204
+ target_ulong cmp;
120
+
205
+
121
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
206
+ /* elt_ofs might correspond to .addr_write, so use atomic_read */
122
+ TCGReg base, ptrdiff_t offset)
207
+#if TCG_OVERSIZED_GUEST
123
+{
208
+ cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
124
+ AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSBW : I3312_LDRSBX;
209
+#else
125
+ tcg_out_ldst(s, insn, dest, base, offset, 0);
210
+ cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
126
+}
127
+
128
+static const TCGOutOpLoad outop_ld8s = {
129
+ .base.static_constraint = C_O1_I1(r, r),
130
+ .out = tgen_ld8s,
131
+};
132
+
133
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
134
+ TCGReg base, ptrdiff_t offset)
135
+{
136
+ tcg_out_ldst(s, I3312_LDRH, dest, base, offset, 1);
137
+}
138
+
139
+static const TCGOutOpLoad outop_ld16u = {
140
+ .base.static_constraint = C_O1_I1(r, r),
141
+ .out = tgen_ld16u,
142
+};
143
+
144
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
145
+ TCGReg base, ptrdiff_t offset)
146
+{
147
+ AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSHW : I3312_LDRSHX;
148
+ tcg_out_ldst(s, insn, dest, base, offset, 1);
149
+}
150
+
151
+static const TCGOutOpLoad outop_ld16s = {
152
+ .base.static_constraint = C_O1_I1(r, r),
153
+ .out = tgen_ld16s,
154
+};
155
+
156
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
157
+ TCGReg base, ptrdiff_t offset)
158
+{
159
+ tcg_out_ldst(s, I3312_LDRW, dest, base, offset, 2);
160
+}
161
+
162
+static const TCGOutOpLoad outop_ld32u = {
163
+ .base.static_constraint = C_O1_I1(r, r),
164
+ .out = tgen_ld32u,
165
+};
166
+
167
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
168
+ TCGReg base, ptrdiff_t offset)
169
+{
170
+ tcg_out_ldst(s, I3312_LDRSWX, dest, base, offset, 2);
171
+}
172
+
173
+static const TCGOutOpLoad outop_ld32s = {
174
+ .base.static_constraint = C_O1_I1(r, r),
175
+ .out = tgen_ld32s,
176
+};
177
+
178
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
179
const TCGArg args[TCG_MAX_OP_ARGS],
180
const int const_args[TCG_MAX_OP_ARGS])
181
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
182
TCGArg a2 = args[2];
183
184
switch (opc) {
185
- case INDEX_op_ld8u_i32:
186
- case INDEX_op_ld8u_i64:
187
- tcg_out_ldst(s, I3312_LDRB, a0, a1, a2, 0);
188
- break;
189
- case INDEX_op_ld8s_i32:
190
- tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2, 0);
191
- break;
192
- case INDEX_op_ld8s_i64:
193
- tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2, 0);
194
- break;
195
- case INDEX_op_ld16u_i32:
196
- case INDEX_op_ld16u_i64:
197
- tcg_out_ldst(s, I3312_LDRH, a0, a1, a2, 1);
198
- break;
199
- case INDEX_op_ld16s_i32:
200
- tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2, 1);
201
- break;
202
- case INDEX_op_ld16s_i64:
203
- tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2, 1);
204
- break;
205
- case INDEX_op_ld_i32:
206
- case INDEX_op_ld32u_i64:
207
- tcg_out_ldst(s, I3312_LDRW, a0, a1, a2, 2);
208
- break;
209
- case INDEX_op_ld32s_i64:
210
- tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2, 2);
211
- break;
212
- case INDEX_op_ld_i64:
213
- tcg_out_ldst(s, I3312_LDRX, a0, a1, a2, 3);
214
- break;
215
-
216
case INDEX_op_st8_i32:
217
case INDEX_op_st8_i64:
218
tcg_out_ldst(s, I3312_STRB, a0, a1, a2, 0);
219
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
220
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
221
{
222
switch (op) {
223
- case INDEX_op_ld8u_i32:
224
- case INDEX_op_ld8s_i32:
225
- case INDEX_op_ld16u_i32:
226
- case INDEX_op_ld16s_i32:
227
- case INDEX_op_ld_i32:
228
- case INDEX_op_ld8u_i64:
229
- case INDEX_op_ld8s_i64:
230
- case INDEX_op_ld16u_i64:
231
- case INDEX_op_ld16s_i64:
232
- case INDEX_op_ld32u_i64:
233
- case INDEX_op_ld32s_i64:
234
- case INDEX_op_ld_i64:
235
- return C_O1_I1(r, r);
236
-
237
case INDEX_op_st8_i32:
238
case INDEX_op_st16_i32:
239
case INDEX_op_st_i32:
240
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/arm/tcg-target.c.inc
243
+++ b/tcg/arm/tcg-target.c.inc
244
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st32(TCGContext *s, ARMCond cond,
245
tcg_out_st32_12(s, cond, rd, rn, offset);
246
}
247
248
-static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
249
- TCGReg rd, TCGReg rn, int32_t offset)
250
-{
251
- if (offset > 0xff || offset < -0xff) {
252
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
253
- tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
254
- } else
255
- tcg_out_ld16u_8(s, cond, rd, rn, offset);
256
-}
257
-
258
-static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
259
- TCGReg rd, TCGReg rn, int32_t offset)
260
-{
261
- if (offset > 0xff || offset < -0xff) {
262
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
263
- tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
264
- } else
265
- tcg_out_ld16s_8(s, cond, rd, rn, offset);
266
-}
267
-
268
static void tcg_out_st16(TCGContext *s, ARMCond cond,
269
TCGReg rd, TCGReg rn, int32_t offset)
270
{
271
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st16(TCGContext *s, ARMCond cond,
272
tcg_out_st16_8(s, cond, rd, rn, offset);
273
}
274
275
-static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
276
- TCGReg rd, TCGReg rn, int32_t offset)
277
-{
278
- if (offset > 0xfff || offset < -0xfff) {
279
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
280
- tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
281
- } else
282
- tcg_out_ld8_12(s, cond, rd, rn, offset);
283
-}
284
-
285
-static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
286
- TCGReg rd, TCGReg rn, int32_t offset)
287
-{
288
- if (offset > 0xff || offset < -0xff) {
289
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
290
- tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
291
- } else
292
- tcg_out_ld8s_8(s, cond, rd, rn, offset);
293
-}
294
-
295
static void tcg_out_st8(TCGContext *s, ARMCond cond,
296
TCGReg rd, TCGReg rn, int32_t offset)
297
{
298
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
299
.out_rrr = tgen_extract2,
300
};
301
302
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg rd,
303
+ TCGReg rn, ptrdiff_t offset)
304
+{
305
+ if (offset > 0xfff || offset < -0xfff) {
306
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
307
+ tcg_out_ld8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
308
+ } else {
309
+ tcg_out_ld8_12(s, COND_AL, rd, rn, offset);
310
+ }
311
+}
312
+
313
+static const TCGOutOpLoad outop_ld8u = {
314
+ .base.static_constraint = C_O1_I1(r, r),
315
+ .out = tgen_ld8u,
316
+};
317
+
318
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg rd,
319
+ TCGReg rn, ptrdiff_t offset)
320
+{
321
+ if (offset > 0xff || offset < -0xff) {
322
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
323
+ tcg_out_ld8s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
324
+ } else {
325
+ tcg_out_ld8s_8(s, COND_AL, rd, rn, offset);
326
+ }
327
+}
328
+
329
+static const TCGOutOpLoad outop_ld8s = {
330
+ .base.static_constraint = C_O1_I1(r, r),
331
+ .out = tgen_ld8s,
332
+};
333
+
334
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg rd,
335
+ TCGReg rn, ptrdiff_t offset)
336
+{
337
+ if (offset > 0xff || offset < -0xff) {
338
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
339
+ tcg_out_ld16u_r(s, COND_AL, rd, rn, TCG_REG_TMP);
340
+ } else {
341
+ tcg_out_ld16u_8(s, COND_AL, rd, rn, offset);
342
+ }
343
+}
344
+
345
+static const TCGOutOpLoad outop_ld16u = {
346
+ .base.static_constraint = C_O1_I1(r, r),
347
+ .out = tgen_ld16u,
348
+};
349
+
350
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg rd,
351
+ TCGReg rn, ptrdiff_t offset)
352
+{
353
+ if (offset > 0xff || offset < -0xff) {
354
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
355
+ tcg_out_ld16s_r(s, COND_AL, rd, rn, TCG_REG_TMP);
356
+ } else {
357
+ tcg_out_ld16s_8(s, COND_AL, rd, rn, offset);
358
+ }
359
+}
360
+
361
+static const TCGOutOpLoad outop_ld16s = {
362
+ .base.static_constraint = C_O1_I1(r, r),
363
+ .out = tgen_ld16s,
364
+};
365
+
366
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
367
const TCGArg args[TCG_MAX_OP_ARGS],
368
const int const_args[TCG_MAX_OP_ARGS])
369
{
370
switch (opc) {
371
- case INDEX_op_ld8u_i32:
372
- tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
373
- break;
374
- case INDEX_op_ld8s_i32:
375
- tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
376
- break;
377
- case INDEX_op_ld16u_i32:
378
- tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
379
- break;
380
- case INDEX_op_ld16s_i32:
381
- tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
382
- break;
383
- case INDEX_op_ld_i32:
384
- tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
385
- break;
386
case INDEX_op_st8_i32:
387
tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
388
break;
389
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
390
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
391
{
392
switch (op) {
393
- case INDEX_op_ld8u_i32:
394
- case INDEX_op_ld8s_i32:
395
- case INDEX_op_ld16u_i32:
396
- case INDEX_op_ld16s_i32:
397
- case INDEX_op_ld_i32:
398
- return C_O1_I1(r, r);
399
-
400
case INDEX_op_st8_i32:
401
case INDEX_op_st16_i32:
402
case INDEX_op_st_i32:
403
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
404
index XXXXXXX..XXXXXXX 100644
405
--- a/tcg/i386/tcg-target.c.inc
406
+++ b/tcg/i386/tcg-target.c.inc
407
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
408
.out_rrr = tgen_extract2,
409
};
410
411
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
412
+ TCGReg base, ptrdiff_t offset)
413
+{
414
+ tcg_out_modrm_offset(s, OPC_MOVZBL, dest, base, offset);
415
+}
416
+
417
+static const TCGOutOpLoad outop_ld8u = {
418
+ .base.static_constraint = C_O1_I1(r, r),
419
+ .out = tgen_ld8u,
420
+};
421
+
422
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
423
+ TCGReg base, ptrdiff_t offset)
424
+{
425
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
426
+ tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, dest, base, offset);
427
+}
428
+
429
+static const TCGOutOpLoad outop_ld8s = {
430
+ .base.static_constraint = C_O1_I1(r, r),
431
+ .out = tgen_ld8s,
432
+};
433
+
434
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
435
+ TCGReg base, ptrdiff_t offset)
436
+{
437
+ tcg_out_modrm_offset(s, OPC_MOVZWL, dest, base, offset);
438
+}
439
+
440
+static const TCGOutOpLoad outop_ld16u = {
441
+ .base.static_constraint = C_O1_I1(r, r),
442
+ .out = tgen_ld16u,
443
+};
444
+
445
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
446
+ TCGReg base, ptrdiff_t offset)
447
+{
448
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
449
+ tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, dest, base, offset);
450
+}
451
+
452
+static const TCGOutOpLoad outop_ld16s = {
453
+ .base.static_constraint = C_O1_I1(r, r),
454
+ .out = tgen_ld16s,
455
+};
456
+
457
+#if TCG_TARGET_REG_BITS == 64
458
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
459
+ TCGReg base, ptrdiff_t offset)
460
+{
461
+ tcg_out_modrm_offset(s, OPC_MOVL_GvEv, dest, base, offset);
462
+}
463
+
464
+static const TCGOutOpLoad outop_ld32u = {
465
+ .base.static_constraint = C_O1_I1(r, r),
466
+ .out = tgen_ld32u,
467
+};
468
+
469
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
470
+ TCGReg base, ptrdiff_t offset)
471
+{
472
+ tcg_out_modrm_offset(s, OPC_MOVSLQ, dest, base, offset);
473
+}
474
+
475
+static const TCGOutOpLoad outop_ld32s = {
476
+ .base.static_constraint = C_O1_I1(r, r),
477
+ .out = tgen_ld32s,
478
+};
479
+#endif
211
+#endif
480
212
481
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
213
if (cmp == page) {
482
const TCGArg args[TCG_MAX_OP_ARGS],
214
/* Found entry in victim tlb, swap tlb and iotlb. */
483
const int const_args[TCG_MAX_OP_ARGS])
215
@@ -XXX,XX +XXX,XX @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
484
{
216
uintptr_t index = tlb_index(env, mmu_idx, addr);
485
TCGArg a0, a1, a2;
217
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
486
- int rexw;
218
487
219
- if (!tlb_hit(entry->addr_write, addr)) {
488
#if TCG_TARGET_REG_BITS == 64
220
+ if (!tlb_hit(tlb_addr_write(entry), addr)) {
489
# define OP_32_64(x) \
221
/* TLB entry is for a different page */
490
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
222
if (!VICTIM_TLB_HIT(addr_write, addr)) {
491
a0 = args[0];
223
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
492
a1 = args[1];
224
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
493
a2 = args[2];
225
size_t mmu_idx = get_mmuidx(oi);
494
- rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
226
uintptr_t index = tlb_index(env, mmu_idx, addr);
495
227
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
496
switch (opc) {
228
- target_ulong tlb_addr = tlbe->addr_write;
497
- OP_32_64(ld8u):
229
+ target_ulong tlb_addr = tlb_addr_write(tlbe);
498
- /* Note that we can ignore REXW for the zero-extend to 64-bit. */
230
TCGMemOp mop = get_memop(oi);
499
- tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
231
int a_bits = get_alignment_bits(mop);
500
- break;
232
int s_bits = mop & MO_SIZE;
501
- OP_32_64(ld8s):
233
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
502
- tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
234
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
503
- break;
235
mmu_idx, retaddr);
504
- OP_32_64(ld16u):
236
}
505
- /* Note that we can ignore REXW for the zero-extend to 64-bit. */
237
- tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
506
- tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
238
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
507
- break;
508
- OP_32_64(ld16s):
509
- tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
510
- break;
511
-#if TCG_TARGET_REG_BITS == 64
512
- case INDEX_op_ld32u_i64:
513
-#endif
514
- case INDEX_op_ld_i32:
515
- tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
516
- break;
517
-
518
OP_32_64(st8):
519
if (const_args[0]) {
520
tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
521
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
522
break;
523
524
#if TCG_TARGET_REG_BITS == 64
525
- case INDEX_op_ld32s_i64:
526
- tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
527
- break;
528
- case INDEX_op_ld_i64:
529
- tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
530
- break;
531
case INDEX_op_st_i64:
532
if (const_args[0]) {
533
tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
534
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
535
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
536
{
537
switch (op) {
538
- case INDEX_op_ld8u_i32:
539
- case INDEX_op_ld8u_i64:
540
- case INDEX_op_ld8s_i32:
541
- case INDEX_op_ld8s_i64:
542
- case INDEX_op_ld16u_i32:
543
- case INDEX_op_ld16u_i64:
544
- case INDEX_op_ld16s_i32:
545
- case INDEX_op_ld16s_i64:
546
- case INDEX_op_ld_i32:
547
- case INDEX_op_ld32u_i64:
548
- case INDEX_op_ld32s_i64:
549
- case INDEX_op_ld_i64:
550
- return C_O1_I1(r, r);
551
-
552
case INDEX_op_st8_i32:
553
case INDEX_op_st8_i64:
554
return C_O0_I2(qi, r);
555
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
556
index XXXXXXX..XXXXXXX 100644
557
--- a/tcg/loongarch64/tcg-target.c.inc
558
+++ b/tcg/loongarch64/tcg-target.c.inc
559
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
560
.base.static_constraint = C_NotImplemented,
561
};
562
563
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
564
+ TCGReg base, ptrdiff_t offset)
565
+{
566
+ tcg_out_ldst(s, OPC_LD_BU, dest, base, offset);
567
+}
568
+
569
+static const TCGOutOpLoad outop_ld8u = {
570
+ .base.static_constraint = C_O1_I1(r, r),
571
+ .out = tgen_ld8u,
572
+};
573
+
574
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
575
+ TCGReg base, ptrdiff_t offset)
576
+{
577
+ tcg_out_ldst(s, OPC_LD_B, dest, base, offset);
578
+}
579
+
580
+static const TCGOutOpLoad outop_ld8s = {
581
+ .base.static_constraint = C_O1_I1(r, r),
582
+ .out = tgen_ld8s,
583
+};
584
+
585
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
586
+ TCGReg base, ptrdiff_t offset)
587
+{
588
+ tcg_out_ldst(s, OPC_LD_HU, dest, base, offset);
589
+}
590
+
591
+static const TCGOutOpLoad outop_ld16u = {
592
+ .base.static_constraint = C_O1_I1(r, r),
593
+ .out = tgen_ld16u,
594
+};
595
+
596
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
597
+ TCGReg base, ptrdiff_t offset)
598
+{
599
+ tcg_out_ldst(s, OPC_LD_H, dest, base, offset);
600
+}
601
+
602
+static const TCGOutOpLoad outop_ld16s = {
603
+ .base.static_constraint = C_O1_I1(r, r),
604
+ .out = tgen_ld16s,
605
+};
606
+
607
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
608
+ TCGReg base, ptrdiff_t offset)
609
+{
610
+ tcg_out_ldst(s, OPC_LD_WU, dest, base, offset);
611
+}
612
+
613
+static const TCGOutOpLoad outop_ld32u = {
614
+ .base.static_constraint = C_O1_I1(r, r),
615
+ .out = tgen_ld32u,
616
+};
617
+
618
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
619
+ TCGReg base, ptrdiff_t offset)
620
+{
621
+ tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
622
+}
623
+
624
+static const TCGOutOpLoad outop_ld32s = {
625
+ .base.static_constraint = C_O1_I1(r, r),
626
+ .out = tgen_ld32s,
627
+};
628
629
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
630
const TCGArg args[TCG_MAX_OP_ARGS],
631
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
632
TCGArg a3 = args[3];
633
634
switch (opc) {
635
- case INDEX_op_ld8s_i32:
636
- case INDEX_op_ld8s_i64:
637
- tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
638
- break;
639
- case INDEX_op_ld8u_i32:
640
- case INDEX_op_ld8u_i64:
641
- tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
642
- break;
643
- case INDEX_op_ld16s_i32:
644
- case INDEX_op_ld16s_i64:
645
- tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
646
- break;
647
- case INDEX_op_ld16u_i32:
648
- case INDEX_op_ld16u_i64:
649
- tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
650
- break;
651
- case INDEX_op_ld_i32:
652
- case INDEX_op_ld32s_i64:
653
- tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
654
- break;
655
- case INDEX_op_ld32u_i64:
656
- tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
657
- break;
658
- case INDEX_op_ld_i64:
659
- tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
660
- break;
661
-
662
case INDEX_op_st8_i32:
663
case INDEX_op_st8_i64:
664
tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
665
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
666
case INDEX_op_qemu_st_i128:
667
return C_O0_I3(r, r, r);
668
669
- case INDEX_op_ld8s_i32:
670
- case INDEX_op_ld8s_i64:
671
- case INDEX_op_ld8u_i32:
672
- case INDEX_op_ld8u_i64:
673
- case INDEX_op_ld16s_i32:
674
- case INDEX_op_ld16s_i64:
675
- case INDEX_op_ld16u_i32:
676
- case INDEX_op_ld16u_i64:
677
- case INDEX_op_ld32s_i64:
678
- case INDEX_op_ld32u_i64:
679
- case INDEX_op_ld_i32:
680
- case INDEX_op_ld_i64:
681
case INDEX_op_qemu_ld_i32:
682
case INDEX_op_qemu_ld_i64:
683
return C_O1_I1(r, r);
684
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
685
index XXXXXXX..XXXXXXX 100644
686
--- a/tcg/mips/tcg-target.c.inc
687
+++ b/tcg/mips/tcg-target.c.inc
688
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
689
.base.static_constraint = C_NotImplemented,
690
};
691
692
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
693
+ TCGReg base, ptrdiff_t offset)
694
+{
695
+ tcg_out_ldst(s, OPC_LBU, dest, base, offset);
696
+}
697
+
698
+static const TCGOutOpLoad outop_ld8u = {
699
+ .base.static_constraint = C_O1_I1(r, r),
700
+ .out = tgen_ld8u,
701
+};
702
+
703
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
704
+ TCGReg base, ptrdiff_t offset)
705
+{
706
+ tcg_out_ldst(s, OPC_LB, dest, base, offset);
707
+}
708
+
709
+static const TCGOutOpLoad outop_ld8s = {
710
+ .base.static_constraint = C_O1_I1(r, r),
711
+ .out = tgen_ld8s,
712
+};
713
+
714
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
715
+ TCGReg base, ptrdiff_t offset)
716
+{
717
+ tcg_out_ldst(s, OPC_LHU, dest, base, offset);
718
+}
719
+
720
+static const TCGOutOpLoad outop_ld16u = {
721
+ .base.static_constraint = C_O1_I1(r, r),
722
+ .out = tgen_ld16u,
723
+};
724
+
725
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
726
+ TCGReg base, ptrdiff_t offset)
727
+{
728
+ tcg_out_ldst(s, OPC_LH, dest, base, offset);
729
+}
730
+
731
+static const TCGOutOpLoad outop_ld16s = {
732
+ .base.static_constraint = C_O1_I1(r, r),
733
+ .out = tgen_ld16s,
734
+};
735
+
736
+#if TCG_TARGET_REG_BITS == 64
737
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
738
+ TCGReg base, ptrdiff_t offset)
739
+{
740
+ tcg_out_ldst(s, OPC_LWU, dest, base, offset);
741
+}
742
+
743
+static const TCGOutOpLoad outop_ld32u = {
744
+ .base.static_constraint = C_O1_I1(r, r),
745
+ .out = tgen_ld32u,
746
+};
747
+
748
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
749
+ TCGReg base, ptrdiff_t offset)
750
+{
751
+ tcg_out_ldst(s, OPC_LW, dest, base, offset);
752
+}
753
+
754
+static const TCGOutOpLoad outop_ld32s = {
755
+ .base.static_constraint = C_O1_I1(r, r),
756
+ .out = tgen_ld32s,
757
+};
758
+#endif
759
+
760
761
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
762
const TCGArg args[TCG_MAX_OP_ARGS],
763
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
764
a2 = args[2];
765
766
switch (opc) {
767
- case INDEX_op_ld8u_i32:
768
- case INDEX_op_ld8u_i64:
769
- i1 = OPC_LBU;
770
- goto do_ldst;
771
- case INDEX_op_ld8s_i32:
772
- case INDEX_op_ld8s_i64:
773
- i1 = OPC_LB;
774
- goto do_ldst;
775
- case INDEX_op_ld16u_i32:
776
- case INDEX_op_ld16u_i64:
777
- i1 = OPC_LHU;
778
- goto do_ldst;
779
- case INDEX_op_ld16s_i32:
780
- case INDEX_op_ld16s_i64:
781
- i1 = OPC_LH;
782
- goto do_ldst;
783
- case INDEX_op_ld_i32:
784
- case INDEX_op_ld32s_i64:
785
- i1 = OPC_LW;
786
- goto do_ldst;
787
- case INDEX_op_ld32u_i64:
788
- i1 = OPC_LWU;
789
- goto do_ldst;
790
- case INDEX_op_ld_i64:
791
- i1 = OPC_LD;
792
- goto do_ldst;
793
case INDEX_op_st8_i32:
794
case INDEX_op_st8_i64:
795
i1 = OPC_SB;
796
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
797
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
798
{
799
switch (op) {
800
- case INDEX_op_ld8u_i32:
801
- case INDEX_op_ld8s_i32:
802
- case INDEX_op_ld16u_i32:
803
- case INDEX_op_ld16s_i32:
804
- case INDEX_op_ld_i32:
805
- case INDEX_op_ld8u_i64:
806
- case INDEX_op_ld8s_i64:
807
- case INDEX_op_ld16u_i64:
808
- case INDEX_op_ld16s_i64:
809
- case INDEX_op_ld32s_i64:
810
- case INDEX_op_ld32u_i64:
811
- case INDEX_op_ld_i64:
812
- return C_O1_I1(r, r);
813
-
814
case INDEX_op_st8_i32:
815
case INDEX_op_st16_i32:
816
case INDEX_op_st_i32:
817
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
818
index XXXXXXX..XXXXXXX 100644
819
--- a/tcg/ppc/tcg-target.c.inc
820
+++ b/tcg/ppc/tcg-target.c.inc
821
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
822
.base.static_constraint = C_NotImplemented,
823
};
824
825
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
826
+ TCGReg base, ptrdiff_t offset)
827
+{
828
+ tcg_out_mem_long(s, LBZ, LBZX, dest, base, offset);
829
+}
830
+
831
+static const TCGOutOpLoad outop_ld8u = {
832
+ .base.static_constraint = C_O1_I1(r, r),
833
+ .out = tgen_ld8u,
834
+};
835
+
836
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
837
+ TCGReg base, ptrdiff_t offset)
838
+{
839
+ tgen_ld8u(s, type, dest, base, offset);
840
+ tcg_out_ext8s(s, type, dest, dest);
841
+}
842
+
843
+static const TCGOutOpLoad outop_ld8s = {
844
+ .base.static_constraint = C_O1_I1(r, r),
845
+ .out = tgen_ld8s,
846
+};
847
+
848
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
849
+ TCGReg base, ptrdiff_t offset)
850
+{
851
+ tcg_out_mem_long(s, LHZ, LHZX, dest, base, offset);
852
+}
853
+
854
+static const TCGOutOpLoad outop_ld16u = {
855
+ .base.static_constraint = C_O1_I1(r, r),
856
+ .out = tgen_ld16u,
857
+};
858
+
859
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
860
+ TCGReg base, ptrdiff_t offset)
861
+{
862
+ tcg_out_mem_long(s, LHA, LHAX, dest, base, offset);
863
+}
864
+
865
+static const TCGOutOpLoad outop_ld16s = {
866
+ .base.static_constraint = C_O1_I1(r, r),
867
+ .out = tgen_ld16s,
868
+};
869
+
870
+#if TCG_TARGET_REG_BITS == 64
871
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
872
+ TCGReg base, ptrdiff_t offset)
873
+{
874
+ tcg_out_mem_long(s, LWZ, LWZX, dest, base, offset);
875
+}
876
+
877
+static const TCGOutOpLoad outop_ld32u = {
878
+ .base.static_constraint = C_O1_I1(r, r),
879
+ .out = tgen_ld32u,
880
+};
881
+
882
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
883
+ TCGReg base, ptrdiff_t offset)
884
+{
885
+ tcg_out_mem_long(s, LWA, LWAX, dest, base, offset);
886
+}
887
+
888
+static const TCGOutOpLoad outop_ld32s = {
889
+ .base.static_constraint = C_O1_I1(r, r),
890
+ .out = tgen_ld32s,
891
+};
892
+#endif
893
+
894
895
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
896
const TCGArg args[TCG_MAX_OP_ARGS],
897
const int const_args[TCG_MAX_OP_ARGS])
898
{
899
switch (opc) {
900
- case INDEX_op_ld8u_i32:
901
- case INDEX_op_ld8u_i64:
902
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
903
- break;
904
- case INDEX_op_ld8s_i32:
905
- case INDEX_op_ld8s_i64:
906
- tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
907
- tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
908
- break;
909
- case INDEX_op_ld16u_i32:
910
- case INDEX_op_ld16u_i64:
911
- tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
912
- break;
913
- case INDEX_op_ld16s_i32:
914
- case INDEX_op_ld16s_i64:
915
- tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
916
- break;
917
- case INDEX_op_ld_i32:
918
- case INDEX_op_ld32u_i64:
919
- tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
920
- break;
921
- case INDEX_op_ld32s_i64:
922
- tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
923
- break;
924
- case INDEX_op_ld_i64:
925
- tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
926
- break;
927
case INDEX_op_st8_i32:
928
case INDEX_op_st8_i64:
929
tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
930
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
931
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
932
{
933
switch (op) {
934
- case INDEX_op_ld8u_i32:
935
- case INDEX_op_ld8s_i32:
936
- case INDEX_op_ld16u_i32:
937
- case INDEX_op_ld16s_i32:
938
- case INDEX_op_ld_i32:
939
- case INDEX_op_ld8u_i64:
940
- case INDEX_op_ld8s_i64:
941
- case INDEX_op_ld16u_i64:
942
- case INDEX_op_ld16s_i64:
943
- case INDEX_op_ld32u_i64:
944
- case INDEX_op_ld32s_i64:
945
- case INDEX_op_ld_i64:
946
- return C_O1_I1(r, r);
947
-
948
case INDEX_op_st8_i32:
949
case INDEX_op_st16_i32:
950
case INDEX_op_st_i32:
951
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
952
index XXXXXXX..XXXXXXX 100644
953
--- a/tcg/riscv/tcg-target.c.inc
954
+++ b/tcg/riscv/tcg-target.c.inc
955
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
956
.base.static_constraint = C_NotImplemented,
957
};
958
959
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
960
+ TCGReg base, ptrdiff_t offset)
961
+{
962
+ tcg_out_ldst(s, OPC_LBU, dest, base, offset);
963
+}
964
+
965
+static const TCGOutOpLoad outop_ld8u = {
966
+ .base.static_constraint = C_O1_I1(r, r),
967
+ .out = tgen_ld8u,
968
+};
969
+
970
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
971
+ TCGReg base, ptrdiff_t offset)
972
+{
973
+ tcg_out_ldst(s, OPC_LB, dest, base, offset);
974
+}
975
+
976
+static const TCGOutOpLoad outop_ld8s = {
977
+ .base.static_constraint = C_O1_I1(r, r),
978
+ .out = tgen_ld8s,
979
+};
980
+
981
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
982
+ TCGReg base, ptrdiff_t offset)
983
+{
984
+ tcg_out_ldst(s, OPC_LHU, dest, base, offset);
985
+}
986
+
987
+static const TCGOutOpLoad outop_ld16u = {
988
+ .base.static_constraint = C_O1_I1(r, r),
989
+ .out = tgen_ld16u,
990
+};
991
+
992
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
993
+ TCGReg base, ptrdiff_t offset)
994
+{
995
+ tcg_out_ldst(s, OPC_LH, dest, base, offset);
996
+}
997
+
998
+static const TCGOutOpLoad outop_ld16s = {
999
+ .base.static_constraint = C_O1_I1(r, r),
1000
+ .out = tgen_ld16s,
1001
+};
1002
+
1003
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
1004
+ TCGReg base, ptrdiff_t offset)
1005
+{
1006
+ tcg_out_ldst(s, OPC_LWU, dest, base, offset);
1007
+}
1008
+
1009
+static const TCGOutOpLoad outop_ld32u = {
1010
+ .base.static_constraint = C_O1_I1(r, r),
1011
+ .out = tgen_ld32u,
1012
+};
1013
+
1014
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
1015
+ TCGReg base, ptrdiff_t offset)
1016
+{
1017
+ tcg_out_ldst(s, OPC_LW, dest, base, offset);
1018
+}
1019
+
1020
+static const TCGOutOpLoad outop_ld32s = {
1021
+ .base.static_constraint = C_O1_I1(r, r),
1022
+ .out = tgen_ld32s,
1023
+};
1024
+
1025
1026
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1027
const TCGArg args[TCG_MAX_OP_ARGS],
1028
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1029
TCGArg a2 = args[2];
1030
1031
switch (opc) {
1032
- case INDEX_op_ld8u_i32:
1033
- case INDEX_op_ld8u_i64:
1034
- tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1035
- break;
1036
- case INDEX_op_ld8s_i32:
1037
- case INDEX_op_ld8s_i64:
1038
- tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1039
- break;
1040
- case INDEX_op_ld16u_i32:
1041
- case INDEX_op_ld16u_i64:
1042
- tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1043
- break;
1044
- case INDEX_op_ld16s_i32:
1045
- case INDEX_op_ld16s_i64:
1046
- tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1047
- break;
1048
- case INDEX_op_ld32u_i64:
1049
- tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1050
- break;
1051
- case INDEX_op_ld_i32:
1052
- case INDEX_op_ld32s_i64:
1053
- tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1054
- break;
1055
- case INDEX_op_ld_i64:
1056
- tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1057
- break;
1058
-
1059
case INDEX_op_st8_i32:
1060
case INDEX_op_st8_i64:
1061
tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1062
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
1063
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1064
{
1065
switch (op) {
1066
- case INDEX_op_ld8u_i32:
1067
- case INDEX_op_ld8s_i32:
1068
- case INDEX_op_ld16u_i32:
1069
- case INDEX_op_ld16s_i32:
1070
- case INDEX_op_ld_i32:
1071
- case INDEX_op_ld8u_i64:
1072
- case INDEX_op_ld8s_i64:
1073
- case INDEX_op_ld16u_i64:
1074
- case INDEX_op_ld16s_i64:
1075
- case INDEX_op_ld32s_i64:
1076
- case INDEX_op_ld32u_i64:
1077
- case INDEX_op_ld_i64:
1078
- return C_O1_I1(r, r);
1079
-
1080
case INDEX_op_st8_i32:
1081
case INDEX_op_st16_i32:
1082
case INDEX_op_st_i32:
1083
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
1084
index XXXXXXX..XXXXXXX 100644
1085
--- a/tcg/s390x/tcg-target.c.inc
1086
+++ b/tcg/s390x/tcg-target.c.inc
1087
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, unsigned a0)
1088
}
239
}
1089
}
240
1090
241
/* Notice an IO access or a needs-MMU-lookup access */
1091
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
1092
+ TCGReg base, ptrdiff_t offset)
1093
+{
1094
+ tcg_out_mem(s, 0, RXY_LLGC, dest, base, TCG_REG_NONE, offset);
1095
+}
1096
+
1097
+static const TCGOutOpLoad outop_ld8u = {
1098
+ .base.static_constraint = C_O1_I1(r, r),
1099
+ .out = tgen_ld8u,
1100
+};
1101
+
1102
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
1103
+ TCGReg base, ptrdiff_t offset)
1104
+{
1105
+ tcg_out_mem(s, 0, RXY_LGB, dest, base, TCG_REG_NONE, offset);
1106
+}
1107
+
1108
+static const TCGOutOpLoad outop_ld8s = {
1109
+ .base.static_constraint = C_O1_I1(r, r),
1110
+ .out = tgen_ld8s,
1111
+};
1112
+
1113
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
1114
+ TCGReg base, ptrdiff_t offset)
1115
+{
1116
+ tcg_out_mem(s, 0, RXY_LLGH, dest, base, TCG_REG_NONE, offset);
1117
+}
1118
+
1119
+static const TCGOutOpLoad outop_ld16u = {
1120
+ .base.static_constraint = C_O1_I1(r, r),
1121
+ .out = tgen_ld16u,
1122
+};
1123
+
1124
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
1125
+ TCGReg base, ptrdiff_t offset)
1126
+{
1127
+ if (type == TCG_TYPE_I32) {
1128
+ tcg_out_mem(s, RX_LH, RXY_LHY, dest, base, TCG_REG_NONE, offset);
1129
+ } else {
1130
+ tcg_out_mem(s, 0, RXY_LGH, dest, base, TCG_REG_NONE, offset);
1131
+ }
1132
+}
1133
+
1134
+static const TCGOutOpLoad outop_ld16s = {
1135
+ .base.static_constraint = C_O1_I1(r, r),
1136
+ .out = tgen_ld16s,
1137
+};
1138
+
1139
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
1140
+ TCGReg base, ptrdiff_t offset)
1141
+{
1142
+ tcg_out_mem(s, 0, RXY_LLGF, dest, base, TCG_REG_NONE, offset);
1143
+}
1144
+
1145
+static const TCGOutOpLoad outop_ld32u = {
1146
+ .base.static_constraint = C_O1_I1(r, r),
1147
+ .out = tgen_ld32u,
1148
+};
1149
+
1150
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
1151
+ TCGReg base, ptrdiff_t offset)
1152
+{
1153
+ tcg_out_mem(s, 0, RXY_LGF, dest, base, TCG_REG_NONE, offset);
1154
+}
1155
+
1156
+static const TCGOutOpLoad outop_ld32s = {
1157
+ .base.static_constraint = C_O1_I1(r, r),
1158
+ .out = tgen_ld32s,
1159
+};
1160
+
1161
# define OP_32_64(x) \
1162
case glue(glue(INDEX_op_,x),_i32): \
1163
case glue(glue(INDEX_op_,x),_i64)
1164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1165
const int const_args[TCG_MAX_OP_ARGS])
1166
{
1167
switch (opc) {
1168
- OP_32_64(ld8u):
1169
- /* ??? LLC (RXY format) is only present with the extended-immediate
1170
- facility, whereas LLGC is always present. */
1171
- tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1172
- break;
1173
-
1174
- OP_32_64(ld8s):
1175
- /* ??? LB is no smaller than LGB, so no point to using it. */
1176
- tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1177
- break;
1178
-
1179
- OP_32_64(ld16u):
1180
- /* ??? LLH (RXY format) is only present with the extended-immediate
1181
- facility, whereas LLGH is always present. */
1182
- tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1183
- break;
1184
-
1185
- case INDEX_op_ld16s_i32:
1186
- tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1187
- break;
1188
-
1189
- case INDEX_op_ld_i32:
1190
- tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1191
- break;
1192
-
1193
OP_32_64(st8):
1194
tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1195
TCG_REG_NONE, args[2]);
1196
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1197
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
1198
break;
1199
1200
- case INDEX_op_ld16s_i64:
1201
- tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1202
- break;
1203
- case INDEX_op_ld32u_i64:
1204
- tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1205
- break;
1206
- case INDEX_op_ld32s_i64:
1207
- tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1208
- break;
1209
- case INDEX_op_ld_i64:
1210
- tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1211
- break;
1212
-
1213
case INDEX_op_st32_i64:
1214
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1215
break;
1216
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
1217
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1218
{
1219
switch (op) {
1220
- case INDEX_op_ld8u_i32:
1221
- case INDEX_op_ld8u_i64:
1222
- case INDEX_op_ld8s_i32:
1223
- case INDEX_op_ld8s_i64:
1224
- case INDEX_op_ld16u_i32:
1225
- case INDEX_op_ld16u_i64:
1226
- case INDEX_op_ld16s_i32:
1227
- case INDEX_op_ld16s_i64:
1228
- case INDEX_op_ld_i32:
1229
- case INDEX_op_ld32u_i64:
1230
- case INDEX_op_ld32s_i64:
1231
- case INDEX_op_ld_i64:
1232
- return C_O1_I1(r, r);
1233
-
1234
case INDEX_op_st8_i32:
1235
case INDEX_op_st8_i64:
1236
case INDEX_op_st16_i32:
1237
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
1238
index XXXXXXX..XXXXXXX 100644
1239
--- a/tcg/sparc64/tcg-target.c.inc
1240
+++ b/tcg/sparc64/tcg-target.c.inc
1241
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpExtract2 outop_extract2 = {
1242
.base.static_constraint = C_NotImplemented,
1243
};
1244
1245
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
1246
+ TCGReg base, ptrdiff_t offset)
1247
+{
1248
+ tcg_out_ldst(s, dest, base, offset, LDUB);
1249
+}
1250
+
1251
+static const TCGOutOpLoad outop_ld8u = {
1252
+ .base.static_constraint = C_O1_I1(r, r),
1253
+ .out = tgen_ld8u,
1254
+};
1255
+
1256
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
1257
+ TCGReg base, ptrdiff_t offset)
1258
+{
1259
+ tcg_out_ldst(s, dest, base, offset, LDSB);
1260
+}
1261
+
1262
+static const TCGOutOpLoad outop_ld8s = {
1263
+ .base.static_constraint = C_O1_I1(r, r),
1264
+ .out = tgen_ld8s,
1265
+};
1266
+
1267
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
1268
+ TCGReg base, ptrdiff_t offset)
1269
+{
1270
+ tcg_out_ldst(s, dest, base, offset, LDUH);
1271
+}
1272
+
1273
+static const TCGOutOpLoad outop_ld16u = {
1274
+ .base.static_constraint = C_O1_I1(r, r),
1275
+ .out = tgen_ld16u,
1276
+};
1277
+
1278
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
1279
+ TCGReg base, ptrdiff_t offset)
1280
+{
1281
+ tcg_out_ldst(s, dest, base, offset, LDSH);
1282
+}
1283
+
1284
+static const TCGOutOpLoad outop_ld16s = {
1285
+ .base.static_constraint = C_O1_I1(r, r),
1286
+ .out = tgen_ld16s,
1287
+};
1288
+
1289
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
1290
+ TCGReg base, ptrdiff_t offset)
1291
+{
1292
+ tcg_out_ldst(s, dest, base, offset, LDUW);
1293
+}
1294
+
1295
+static const TCGOutOpLoad outop_ld32u = {
1296
+ .base.static_constraint = C_O1_I1(r, r),
1297
+ .out = tgen_ld32u,
1298
+};
1299
+
1300
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
1301
+ TCGReg base, ptrdiff_t offset)
1302
+{
1303
+ tcg_out_ldst(s, dest, base, offset, LDSW);
1304
+}
1305
+
1306
+static const TCGOutOpLoad outop_ld32s = {
1307
+ .base.static_constraint = C_O1_I1(r, r),
1308
+ .out = tgen_ld32s,
1309
+};
1310
+
1311
+
1312
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1313
const TCGArg args[TCG_MAX_OP_ARGS],
1314
const int const_args[TCG_MAX_OP_ARGS])
1315
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1316
glue(glue(case INDEX_op_, x), _i32): \
1317
glue(glue(case INDEX_op_, x), _i64)
1318
1319
- OP_32_64(ld8u):
1320
- tcg_out_ldst(s, a0, a1, a2, LDUB);
1321
- break;
1322
- OP_32_64(ld8s):
1323
- tcg_out_ldst(s, a0, a1, a2, LDSB);
1324
- break;
1325
- OP_32_64(ld16u):
1326
- tcg_out_ldst(s, a0, a1, a2, LDUH);
1327
- break;
1328
- OP_32_64(ld16s):
1329
- tcg_out_ldst(s, a0, a1, a2, LDSH);
1330
- break;
1331
- case INDEX_op_ld_i32:
1332
- case INDEX_op_ld32u_i64:
1333
- tcg_out_ldst(s, a0, a1, a2, LDUW);
1334
- break;
1335
OP_32_64(st8):
1336
tcg_out_ldst(s, a0, a1, a2, STB);
1337
break;
1338
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1339
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1340
break;
1341
1342
- case INDEX_op_ld32s_i64:
1343
- tcg_out_ldst(s, a0, a1, a2, LDSW);
1344
- break;
1345
- case INDEX_op_ld_i64:
1346
- tcg_out_ldst(s, a0, a1, a2, LDX);
1347
- break;
1348
case INDEX_op_st_i64:
1349
tcg_out_ldst(s, a0, a1, a2, STX);
1350
break;
1351
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
1352
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1353
{
1354
switch (op) {
1355
- case INDEX_op_ld8u_i32:
1356
- case INDEX_op_ld8u_i64:
1357
- case INDEX_op_ld8s_i32:
1358
- case INDEX_op_ld8s_i64:
1359
- case INDEX_op_ld16u_i32:
1360
- case INDEX_op_ld16u_i64:
1361
- case INDEX_op_ld16s_i32:
1362
- case INDEX_op_ld16s_i64:
1363
- case INDEX_op_ld_i32:
1364
- case INDEX_op_ld32u_i64:
1365
- case INDEX_op_ld32s_i64:
1366
- case INDEX_op_ld_i64:
1367
case INDEX_op_qemu_ld_i32:
1368
case INDEX_op_qemu_ld_i64:
1369
return C_O1_I1(r, r);
1370
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
1371
index XXXXXXX..XXXXXXX 100644
1372
--- a/tcg/tci/tcg-target.c.inc
1373
+++ b/tcg/tci/tcg-target.c.inc
1374
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
1375
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1376
{
1377
switch (op) {
1378
- case INDEX_op_ld8u_i32:
1379
- case INDEX_op_ld8s_i32:
1380
- case INDEX_op_ld16u_i32:
1381
- case INDEX_op_ld16s_i32:
1382
- case INDEX_op_ld_i32:
1383
- case INDEX_op_ld8u_i64:
1384
- case INDEX_op_ld8s_i64:
1385
- case INDEX_op_ld16u_i64:
1386
- case INDEX_op_ld16s_i64:
1387
- case INDEX_op_ld32u_i64:
1388
- case INDEX_op_ld32s_i64:
1389
- case INDEX_op_ld_i64:
1390
- return C_O1_I1(r, r);
1391
-
1392
case INDEX_op_st8_i32:
1393
case INDEX_op_st16_i32:
1394
case INDEX_op_st_i32:
1395
@@ -XXX,XX +XXX,XX @@ static void tcg_out_br(TCGContext *s, TCGLabel *l)
1396
tcg_out_op_l(s, INDEX_op_br, l);
1397
}
1398
1399
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
1400
+ TCGReg base, ptrdiff_t offset)
1401
+{
1402
+ tcg_out_ldst(s, INDEX_op_ld8u_i32, dest, base, offset);
1403
+}
1404
+
1405
+static const TCGOutOpLoad outop_ld8u = {
1406
+ .base.static_constraint = C_O1_I1(r, r),
1407
+ .out = tgen_ld8u,
1408
+};
1409
+
1410
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
1411
+ TCGReg base, ptrdiff_t offset)
1412
+{
1413
+ tcg_out_ldst(s, INDEX_op_ld8s_i32, dest, base, offset);
1414
+}
1415
+
1416
+static const TCGOutOpLoad outop_ld8s = {
1417
+ .base.static_constraint = C_O1_I1(r, r),
1418
+ .out = tgen_ld8s,
1419
+};
1420
+
1421
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
1422
+ TCGReg base, ptrdiff_t offset)
1423
+{
1424
+ tcg_out_ldst(s, INDEX_op_ld16u_i32, dest, base, offset);
1425
+}
1426
+
1427
+static const TCGOutOpLoad outop_ld16u = {
1428
+ .base.static_constraint = C_O1_I1(r, r),
1429
+ .out = tgen_ld16u,
1430
+};
1431
+
1432
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
1433
+ TCGReg base, ptrdiff_t offset)
1434
+{
1435
+ tcg_out_ldst(s, INDEX_op_ld16s_i32, dest, base, offset);
1436
+}
1437
+
1438
+static const TCGOutOpLoad outop_ld16s = {
1439
+ .base.static_constraint = C_O1_I1(r, r),
1440
+ .out = tgen_ld16s,
1441
+};
1442
+
1443
+#if TCG_TARGET_REG_BITS == 64
1444
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
1445
+ TCGReg base, ptrdiff_t offset)
1446
+{
1447
+ tcg_out_ldst(s, INDEX_op_ld32u_i64, dest, base, offset);
1448
+}
1449
+
1450
+static const TCGOutOpLoad outop_ld32u = {
1451
+ .base.static_constraint = C_O1_I1(r, r),
1452
+ .out = tgen_ld32u,
1453
+};
1454
+
1455
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
1456
+ TCGReg base, ptrdiff_t offset)
1457
+{
1458
+ tcg_out_ldst(s, INDEX_op_ld32s_i64, dest, base, offset);
1459
+}
1460
+
1461
+static const TCGOutOpLoad outop_ld32s = {
1462
+ .base.static_constraint = C_O1_I1(r, r),
1463
+ .out = tgen_ld32s,
1464
+};
1465
+#endif
1466
+
1467
+
1468
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1469
const TCGArg args[TCG_MAX_OP_ARGS],
1470
const int const_args[TCG_MAX_OP_ARGS])
1471
{
1472
switch (opc) {
1473
- CASE_32_64(ld8u)
1474
- CASE_32_64(ld8s)
1475
- CASE_32_64(ld16u)
1476
- CASE_32_64(ld16s)
1477
- case INDEX_op_ld_i32:
1478
- CASE_64(ld32u)
1479
- CASE_64(ld32s)
1480
- CASE_64(ld)
1481
CASE_32_64(st8)
1482
CASE_32_64(st16)
1483
case INDEX_op_st_i32:
1484
--
242
--
1485
2.43.0
243
2.17.2
244
245
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 37 ++++++++++
5
tcg/aarch64/tcg-target.c.inc | 52 +++++++-------
6
tcg/arm/tcg-target.c.inc | 72 +++++++++----------
7
tcg/i386/tcg-target.c.inc | 114 ++++++++++++++-----------------
8
tcg/loongarch64/tcg-target.c.inc | 50 +++++++-------
9
tcg/mips/tcg-target.c.inc | 55 ++++++++-------
10
tcg/ppc/tcg-target.c.inc | 52 +++++++-------
11
tcg/riscv/tcg-target.c.inc | 52 +++++++-------
12
tcg/s390x/tcg-target.c.inc | 60 ++++++++--------
13
tcg/sparc64/tcg-target.c.inc | 53 +++++++-------
14
tcg/tci/tcg-target.c.inc | 56 ++++++++-------
15
11 files changed, 341 insertions(+), 312 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpSetcond2 {
22
TCGArg bl, bool const_bl, TCGArg bh, bool const_bh);
23
} TCGOutOpSetcond2;
24
25
+typedef struct TCGOutOpStore {
26
+ TCGOutOp base;
27
+ void (*out_r)(TCGContext *s, TCGType type, TCGReg data,
28
+ TCGReg base, intptr_t offset);
29
+ void (*out_i)(TCGContext *s, TCGType type, tcg_target_long data,
30
+ TCGReg base, intptr_t offset);
31
+} TCGOutOpStore;
32
+
33
typedef struct TCGOutOpSubtract {
34
TCGOutOp base;
35
void (*out_rrr)(TCGContext *s, TCGType type,
36
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
37
OUTOP(INDEX_op_sextract, TCGOutOpExtract, outop_sextract),
38
OUTOP(INDEX_op_shl, TCGOutOpBinary, outop_shl),
39
OUTOP(INDEX_op_shr, TCGOutOpBinary, outop_shr),
40
+ OUTOP(INDEX_op_st_i32, TCGOutOpStore, outop_st),
41
+ OUTOP(INDEX_op_st_i64, TCGOutOpStore, outop_st),
42
+ OUTOP(INDEX_op_st8_i32, TCGOutOpStore, outop_st8),
43
+ OUTOP(INDEX_op_st8_i64, TCGOutOpStore, outop_st8),
44
+ OUTOP(INDEX_op_st16_i32, TCGOutOpStore, outop_st16),
45
+ OUTOP(INDEX_op_st16_i64, TCGOutOpStore, outop_st16),
46
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
47
OUTOP(INDEX_op_subbi, TCGOutOpAddSubCarry, outop_subbi),
48
OUTOP(INDEX_op_subbio, TCGOutOpAddSubCarry, outop_subbio),
49
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
50
OUTOP(INDEX_op_extrh_i64_i32, TCGOutOpUnary, outop_extrh_i64_i32),
51
OUTOP(INDEX_op_ld32u, TCGOutOpLoad, outop_ld32u),
52
OUTOP(INDEX_op_ld32s, TCGOutOpLoad, outop_ld32s),
53
+ OUTOP(INDEX_op_st32_i64, TCGOutOpStore, outop_st),
54
#endif
55
};
56
57
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
58
}
59
break;
60
61
+ case INDEX_op_st32_i64:
62
+ /* Use tcg_op_st w/ I32. */
63
+ type = TCG_TYPE_I32;
64
+ /* fall through */
65
+ case INDEX_op_st_i32:
66
+ case INDEX_op_st_i64:
67
+ case INDEX_op_st8_i32:
68
+ case INDEX_op_st8_i64:
69
+ case INDEX_op_st16_i32:
70
+ case INDEX_op_st16_i64:
71
+ {
72
+ const TCGOutOpStore *out =
73
+ container_of(all_outop[op->opc], TCGOutOpStore, base);
74
+
75
+ if (const_args[0]) {
76
+ out->out_i(s, type, new_args[0], new_args[1], new_args[2]);
77
+ } else {
78
+ out->out_r(s, type, new_args[0], new_args[1], new_args[2]);
79
+ }
80
+ }
81
+ break;
82
+
83
case INDEX_op_brcond:
84
{
85
const TCGOutOpBrcond *out = &outop_brcond;
86
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/aarch64/tcg-target.c.inc
89
+++ b/tcg/aarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
91
.out = tgen_ld32s,
92
};
93
94
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
95
+ TCGReg base, ptrdiff_t offset)
96
+{
97
+ tcg_out_ldst(s, I3312_STRB, data, base, offset, 0);
98
+}
99
+
100
+static const TCGOutOpStore outop_st8 = {
101
+ .base.static_constraint = C_O0_I2(rz, r),
102
+ .out_r = tgen_st8_r,
103
+};
104
+
105
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
106
+ TCGReg base, ptrdiff_t offset)
107
+{
108
+ tcg_out_ldst(s, I3312_STRH, data, base, offset, 0);
109
+}
110
+
111
+static const TCGOutOpStore outop_st16 = {
112
+ .base.static_constraint = C_O0_I2(rz, r),
113
+ .out_r = tgen_st16_r,
114
+};
115
+
116
+static const TCGOutOpStore outop_st = {
117
+ .base.static_constraint = C_O0_I2(rz, r),
118
+ .out_r = tcg_out_st,
119
+};
120
+
121
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
122
const TCGArg args[TCG_MAX_OP_ARGS],
123
const int const_args[TCG_MAX_OP_ARGS])
124
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
125
TCGArg a2 = args[2];
126
127
switch (opc) {
128
- case INDEX_op_st8_i32:
129
- case INDEX_op_st8_i64:
130
- tcg_out_ldst(s, I3312_STRB, a0, a1, a2, 0);
131
- break;
132
- case INDEX_op_st16_i32:
133
- case INDEX_op_st16_i64:
134
- tcg_out_ldst(s, I3312_STRH, a0, a1, a2, 1);
135
- break;
136
- case INDEX_op_st_i32:
137
- case INDEX_op_st32_i64:
138
- tcg_out_ldst(s, I3312_STRW, a0, a1, a2, 2);
139
- break;
140
- case INDEX_op_st_i64:
141
- tcg_out_ldst(s, I3312_STRX, a0, a1, a2, 3);
142
- break;
143
-
144
case INDEX_op_qemu_ld_i32:
145
case INDEX_op_qemu_ld_i64:
146
tcg_out_qemu_ld(s, a0, a1, a2, ext);
147
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
148
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
149
{
150
switch (op) {
151
- case INDEX_op_st8_i32:
152
- case INDEX_op_st16_i32:
153
- case INDEX_op_st_i32:
154
- case INDEX_op_st8_i64:
155
- case INDEX_op_st16_i64:
156
- case INDEX_op_st32_i64:
157
- case INDEX_op_st_i64:
158
- return C_O0_I2(rz, r);
159
-
160
case INDEX_op_qemu_ld_i32:
161
case INDEX_op_qemu_ld_i64:
162
return C_O1_I1(r, r);
163
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/arm/tcg-target.c.inc
166
+++ b/tcg/arm/tcg-target.c.inc
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st32(TCGContext *s, ARMCond cond,
168
tcg_out_st32_12(s, cond, rd, rn, offset);
169
}
170
171
-static void tcg_out_st16(TCGContext *s, ARMCond cond,
172
- TCGReg rd, TCGReg rn, int32_t offset)
173
-{
174
- if (offset > 0xff || offset < -0xff) {
175
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
176
- tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
177
- } else
178
- tcg_out_st16_8(s, cond, rd, rn, offset);
179
-}
180
-
181
-static void tcg_out_st8(TCGContext *s, ARMCond cond,
182
- TCGReg rd, TCGReg rn, int32_t offset)
183
-{
184
- if (offset > 0xfff || offset < -0xfff) {
185
- tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
186
- tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
187
- } else
188
- tcg_out_st8_12(s, cond, rd, rn, offset);
189
-}
190
-
191
/*
192
* The _goto case is normally between TBs within the same code buffer, and
193
* with the code buffer limited to 16MB we wouldn't need the long case.
194
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld16s = {
195
.out = tgen_ld16s,
196
};
197
198
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg rd,
199
+ TCGReg rn, ptrdiff_t offset)
200
+{
201
+ if (offset > 0xfff || offset < -0xfff) {
202
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
203
+ tcg_out_st8_r(s, COND_AL, rd, rn, TCG_REG_TMP);
204
+ } else {
205
+ tcg_out_st8_12(s, COND_AL, rd, rn, offset);
206
+ }
207
+}
208
+
209
+static const TCGOutOpStore outop_st8 = {
210
+ .base.static_constraint = C_O0_I2(r, r),
211
+ .out_r = tgen_st8,
212
+};
213
+
214
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg rd,
215
+ TCGReg rn, ptrdiff_t offset)
216
+{
217
+ if (offset > 0xff || offset < -0xff) {
218
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, offset);
219
+ tcg_out_st16_r(s, COND_AL, rd, rn, TCG_REG_TMP);
220
+ } else {
221
+ tcg_out_st16_8(s, COND_AL, rd, rn, offset);
222
+ }
223
+}
224
+
225
+static const TCGOutOpStore outop_st16 = {
226
+ .base.static_constraint = C_O0_I2(r, r),
227
+ .out_r = tgen_st16,
228
+};
229
+
230
+static const TCGOutOpStore outop_st = {
231
+ .base.static_constraint = C_O0_I2(r, r),
232
+ .out_r = tcg_out_st,
233
+};
234
+
235
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
236
const TCGArg args[TCG_MAX_OP_ARGS],
237
const int const_args[TCG_MAX_OP_ARGS])
238
{
239
switch (opc) {
240
- case INDEX_op_st8_i32:
241
- tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
242
- break;
243
- case INDEX_op_st16_i32:
244
- tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
245
- break;
246
- case INDEX_op_st_i32:
247
- tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
248
- break;
249
-
250
case INDEX_op_qemu_ld_i32:
251
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
252
break;
253
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
254
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
255
{
256
switch (op) {
257
- case INDEX_op_st8_i32:
258
- case INDEX_op_st16_i32:
259
- case INDEX_op_st_i32:
260
- return C_O0_I2(r, r);
261
-
262
case INDEX_op_qemu_ld_i32:
263
return C_O1_I1(r, q);
264
case INDEX_op_qemu_ld_i64:
265
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
266
index XXXXXXX..XXXXXXX 100644
267
--- a/tcg/i386/tcg-target.c.inc
268
+++ b/tcg/i386/tcg-target.c.inc
269
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
270
};
271
#endif
272
273
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
274
+ TCGReg base, ptrdiff_t offset)
275
+{
276
+ tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, data, base, offset);
277
+}
278
+
279
+static void tgen_st8_i(TCGContext *s, TCGType type, tcg_target_long data,
280
+ TCGReg base, ptrdiff_t offset)
281
+{
282
+ tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, base, offset);
283
+ tcg_out8(s, data);
284
+}
285
+
286
+static const TCGOutOpStore outop_st8 = {
287
+ .base.static_constraint = C_O0_I2(qi, r),
288
+ .out_r = tgen_st8_r,
289
+ .out_i = tgen_st8_i,
290
+};
291
+
292
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
293
+ TCGReg base, ptrdiff_t offset)
294
+{
295
+ tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, data, base, offset);
296
+}
297
+
298
+static void tgen_st16_i(TCGContext *s, TCGType type, tcg_target_long data,
299
+ TCGReg base, ptrdiff_t offset)
300
+{
301
+ tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, base, offset);
302
+ tcg_out16(s, data);
303
+}
304
+
305
+static const TCGOutOpStore outop_st16 = {
306
+ .base.static_constraint = C_O0_I2(ri, r),
307
+ .out_r = tgen_st16_r,
308
+ .out_i = tgen_st16_i,
309
+};
310
+
311
+static void tgen_st_i(TCGContext *s, TCGType type, tcg_target_long data,
312
+ TCGReg base, ptrdiff_t offset)
313
+{
314
+ bool ok = tcg_out_sti(s, type, data, base, offset);
315
+ tcg_debug_assert(ok);
316
+}
317
+
318
+static const TCGOutOpStore outop_st = {
319
+ .base.static_constraint = C_O0_I2(re, r),
320
+ .out_r = tcg_out_st,
321
+ .out_i = tgen_st_i,
322
+};
323
+
324
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
325
const TCGArg args[TCG_MAX_OP_ARGS],
326
const int const_args[TCG_MAX_OP_ARGS])
327
{
328
TCGArg a0, a1, a2;
329
330
-#if TCG_TARGET_REG_BITS == 64
331
-# define OP_32_64(x) \
332
- case glue(glue(INDEX_op_, x), _i64): \
333
- case glue(glue(INDEX_op_, x), _i32)
334
-#else
335
-# define OP_32_64(x) \
336
- case glue(glue(INDEX_op_, x), _i32)
337
-#endif
338
-
339
/* Hoist the loads of the most common arguments. */
340
a0 = args[0];
341
a1 = args[1];
342
a2 = args[2];
343
344
switch (opc) {
345
- OP_32_64(st8):
346
- if (const_args[0]) {
347
- tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
348
- tcg_out8(s, a0);
349
- } else {
350
- tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
351
- }
352
- break;
353
- OP_32_64(st16):
354
- if (const_args[0]) {
355
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
356
- tcg_out16(s, a0);
357
- } else {
358
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
359
- }
360
- break;
361
-#if TCG_TARGET_REG_BITS == 64
362
- case INDEX_op_st32_i64:
363
-#endif
364
- case INDEX_op_st_i32:
365
- if (const_args[0]) {
366
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
367
- tcg_out32(s, a0);
368
- } else {
369
- tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
370
- }
371
- break;
372
-
373
case INDEX_op_qemu_ld_i32:
374
tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
375
break;
376
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
377
tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
378
break;
379
380
-#if TCG_TARGET_REG_BITS == 64
381
- case INDEX_op_st_i64:
382
- if (const_args[0]) {
383
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
384
- tcg_out32(s, a0);
385
- } else {
386
- tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
387
- }
388
- break;
389
-#endif
390
-
391
case INDEX_op_call: /* Always emitted via tcg_out_call. */
392
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
393
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
394
default:
395
g_assert_not_reached();
396
}
397
-
398
-#undef OP_32_64
399
}
400
401
static int const umin_insn[4] = {
402
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
403
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
404
{
405
switch (op) {
406
- case INDEX_op_st8_i32:
407
- case INDEX_op_st8_i64:
408
- return C_O0_I2(qi, r);
409
-
410
- case INDEX_op_st16_i32:
411
- case INDEX_op_st16_i64:
412
- case INDEX_op_st_i32:
413
- case INDEX_op_st32_i64:
414
- return C_O0_I2(ri, r);
415
-
416
- case INDEX_op_st_i64:
417
- return C_O0_I2(re, r);
418
-
419
case INDEX_op_qemu_ld_i32:
420
return C_O1_I1(r, L);
421
422
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
423
index XXXXXXX..XXXXXXX 100644
424
--- a/tcg/loongarch64/tcg-target.c.inc
425
+++ b/tcg/loongarch64/tcg-target.c.inc
426
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
427
.out = tgen_ld32s,
428
};
429
430
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
431
+ TCGReg base, ptrdiff_t offset)
432
+{
433
+ tcg_out_ldst(s, OPC_ST_B, data, base, offset);
434
+}
435
+
436
+static const TCGOutOpStore outop_st8 = {
437
+ .base.static_constraint = C_O0_I2(rz, r),
438
+ .out_r = tgen_st8_r,
439
+};
440
+
441
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
442
+ TCGReg base, ptrdiff_t offset)
443
+{
444
+ tcg_out_ldst(s, OPC_ST_H, data, base, offset);
445
+}
446
+
447
+static const TCGOutOpStore outop_st16 = {
448
+ .base.static_constraint = C_O0_I2(rz, r),
449
+ .out_r = tgen_st16_r,
450
+};
451
+
452
+static const TCGOutOpStore outop_st = {
453
+ .base.static_constraint = C_O0_I2(rz, r),
454
+ .out_r = tcg_out_st,
455
+};
456
+
457
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
458
const TCGArg args[TCG_MAX_OP_ARGS],
459
const int const_args[TCG_MAX_OP_ARGS])
460
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
461
TCGArg a3 = args[3];
462
463
switch (opc) {
464
- case INDEX_op_st8_i32:
465
- case INDEX_op_st8_i64:
466
- tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
467
- break;
468
- case INDEX_op_st16_i32:
469
- case INDEX_op_st16_i64:
470
- tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
471
- break;
472
- case INDEX_op_st_i32:
473
- case INDEX_op_st32_i64:
474
- tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
475
- break;
476
- case INDEX_op_st_i64:
477
- tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
478
- break;
479
-
480
case INDEX_op_qemu_ld_i32:
481
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
482
break;
483
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
484
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
485
{
486
switch (op) {
487
- case INDEX_op_st8_i32:
488
- case INDEX_op_st8_i64:
489
- case INDEX_op_st16_i32:
490
- case INDEX_op_st16_i64:
491
- case INDEX_op_st32_i64:
492
- case INDEX_op_st_i32:
493
- case INDEX_op_st_i64:
494
case INDEX_op_qemu_st_i32:
495
case INDEX_op_qemu_st_i64:
496
return C_O0_I2(rz, r);
497
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
498
index XXXXXXX..XXXXXXX 100644
499
--- a/tcg/mips/tcg-target.c.inc
500
+++ b/tcg/mips/tcg-target.c.inc
501
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
502
};
503
#endif
504
505
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
506
+ TCGReg base, ptrdiff_t offset)
507
+{
508
+ tcg_out_ldst(s, OPC_SB, data, base, offset);
509
+}
510
+
511
+static const TCGOutOpStore outop_st8 = {
512
+ .base.static_constraint = C_O0_I2(rz, r),
513
+ .out_r = tgen_st8_r,
514
+};
515
+
516
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
517
+ TCGReg base, ptrdiff_t offset)
518
+{
519
+ tcg_out_ldst(s, OPC_SH, data, base, offset);
520
+}
521
+
522
+static const TCGOutOpStore outop_st16 = {
523
+ .base.static_constraint = C_O0_I2(rz, r),
524
+ .out_r = tgen_st16_r,
525
+};
526
+
527
+static const TCGOutOpStore outop_st = {
528
+ .base.static_constraint = C_O0_I2(rz, r),
529
+ .out_r = tcg_out_st,
530
+};
531
+
532
533
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
534
const TCGArg args[TCG_MAX_OP_ARGS],
535
const int const_args[TCG_MAX_OP_ARGS])
536
{
537
- MIPSInsn i1;
538
TCGArg a0, a1, a2;
539
540
a0 = args[0];
541
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
542
a2 = args[2];
543
544
switch (opc) {
545
- case INDEX_op_st8_i32:
546
- case INDEX_op_st8_i64:
547
- i1 = OPC_SB;
548
- goto do_ldst;
549
- case INDEX_op_st16_i32:
550
- case INDEX_op_st16_i64:
551
- i1 = OPC_SH;
552
- goto do_ldst;
553
- case INDEX_op_st_i32:
554
- case INDEX_op_st32_i64:
555
- i1 = OPC_SW;
556
- goto do_ldst;
557
- case INDEX_op_st_i64:
558
- i1 = OPC_SD;
559
- do_ldst:
560
- tcg_out_ldst(s, i1, a0, a1, a2);
561
- break;
562
-
563
case INDEX_op_qemu_ld_i32:
564
tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
565
break;
566
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
567
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
568
{
569
switch (op) {
570
- case INDEX_op_st8_i32:
571
- case INDEX_op_st16_i32:
572
- case INDEX_op_st_i32:
573
- case INDEX_op_st8_i64:
574
- case INDEX_op_st16_i64:
575
- case INDEX_op_st32_i64:
576
- case INDEX_op_st_i64:
577
- return C_O0_I2(rz, r);
578
-
579
case INDEX_op_qemu_ld_i32:
580
return C_O1_I1(r, r);
581
case INDEX_op_qemu_st_i32:
582
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
583
index XXXXXXX..XXXXXXX 100644
584
--- a/tcg/ppc/tcg-target.c.inc
585
+++ b/tcg/ppc/tcg-target.c.inc
586
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
587
};
588
#endif
589
590
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
591
+ TCGReg base, ptrdiff_t offset)
592
+{
593
+ tcg_out_mem_long(s, STB, STBX, data, base, offset);
594
+}
595
+
596
+static const TCGOutOpStore outop_st8 = {
597
+ .base.static_constraint = C_O0_I2(r, r),
598
+ .out_r = tgen_st8,
599
+};
600
+
601
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
602
+ TCGReg base, ptrdiff_t offset)
603
+{
604
+ tcg_out_mem_long(s, STH, STHX, data, base, offset);
605
+}
606
+
607
+static const TCGOutOpStore outop_st16 = {
608
+ .base.static_constraint = C_O0_I2(r, r),
609
+ .out_r = tgen_st16,
610
+};
611
+
612
+static const TCGOutOpStore outop_st = {
613
+ .base.static_constraint = C_O0_I2(r, r),
614
+ .out_r = tcg_out_st,
615
+};
616
+
617
618
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
619
const TCGArg args[TCG_MAX_OP_ARGS],
620
const int const_args[TCG_MAX_OP_ARGS])
621
{
622
switch (opc) {
623
- case INDEX_op_st8_i32:
624
- case INDEX_op_st8_i64:
625
- tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
626
- break;
627
- case INDEX_op_st16_i32:
628
- case INDEX_op_st16_i64:
629
- tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
630
- break;
631
- case INDEX_op_st_i32:
632
- case INDEX_op_st32_i64:
633
- tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
634
- break;
635
- case INDEX_op_st_i64:
636
- tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
637
- break;
638
-
639
case INDEX_op_qemu_ld_i32:
640
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
641
break;
642
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
643
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
644
{
645
switch (op) {
646
- case INDEX_op_st8_i32:
647
- case INDEX_op_st16_i32:
648
- case INDEX_op_st_i32:
649
- case INDEX_op_st8_i64:
650
- case INDEX_op_st16_i64:
651
- case INDEX_op_st32_i64:
652
- case INDEX_op_st_i64:
653
- return C_O0_I2(r, r);
654
-
655
case INDEX_op_qemu_ld_i32:
656
return C_O1_I1(r, r);
657
case INDEX_op_qemu_ld_i64:
658
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
659
index XXXXXXX..XXXXXXX 100644
660
--- a/tcg/riscv/tcg-target.c.inc
661
+++ b/tcg/riscv/tcg-target.c.inc
662
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
663
.out = tgen_ld32s,
664
};
665
666
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
667
+ TCGReg base, ptrdiff_t offset)
668
+{
669
+ tcg_out_ldst(s, OPC_SB, data, base, offset);
670
+}
671
+
672
+static const TCGOutOpStore outop_st8 = {
673
+ .base.static_constraint = C_O0_I2(rz, r),
674
+ .out_r = tgen_st8_r,
675
+};
676
+
677
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
678
+ TCGReg base, ptrdiff_t offset)
679
+{
680
+ tcg_out_ldst(s, OPC_SH, data, base, offset);
681
+}
682
+
683
+static const TCGOutOpStore outop_st16 = {
684
+ .base.static_constraint = C_O0_I2(rz, r),
685
+ .out_r = tgen_st16_r,
686
+};
687
+
688
+static const TCGOutOpStore outop_st = {
689
+ .base.static_constraint = C_O0_I2(rz, r),
690
+ .out_r = tcg_out_st,
691
+};
692
+
693
694
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
695
const TCGArg args[TCG_MAX_OP_ARGS],
696
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
697
TCGArg a2 = args[2];
698
699
switch (opc) {
700
- case INDEX_op_st8_i32:
701
- case INDEX_op_st8_i64:
702
- tcg_out_ldst(s, OPC_SB, a0, a1, a2);
703
- break;
704
- case INDEX_op_st16_i32:
705
- case INDEX_op_st16_i64:
706
- tcg_out_ldst(s, OPC_SH, a0, a1, a2);
707
- break;
708
- case INDEX_op_st_i32:
709
- case INDEX_op_st32_i64:
710
- tcg_out_ldst(s, OPC_SW, a0, a1, a2);
711
- break;
712
- case INDEX_op_st_i64:
713
- tcg_out_ldst(s, OPC_SD, a0, a1, a2);
714
- break;
715
-
716
case INDEX_op_qemu_ld_i32:
717
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
718
break;
719
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
720
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
721
{
722
switch (op) {
723
- case INDEX_op_st8_i32:
724
- case INDEX_op_st16_i32:
725
- case INDEX_op_st_i32:
726
- case INDEX_op_st8_i64:
727
- case INDEX_op_st16_i64:
728
- case INDEX_op_st32_i64:
729
- case INDEX_op_st_i64:
730
- return C_O0_I2(rz, r);
731
-
732
case INDEX_op_qemu_ld_i32:
733
case INDEX_op_qemu_ld_i64:
734
return C_O1_I1(r, r);
735
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
736
index XXXXXXX..XXXXXXX 100644
737
--- a/tcg/s390x/tcg-target.c.inc
738
+++ b/tcg/s390x/tcg-target.c.inc
739
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
740
.out = tgen_ld32s,
741
};
742
743
-# define OP_32_64(x) \
744
- case glue(glue(INDEX_op_,x),_i32): \
745
- case glue(glue(INDEX_op_,x),_i64)
746
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
747
+ TCGReg base, ptrdiff_t offset)
748
+{
749
+ tcg_out_mem(s, RX_STC, RXY_STCY, data, base, TCG_REG_NONE, offset);
750
+}
751
+
752
+static const TCGOutOpStore outop_st8 = {
753
+ .base.static_constraint = C_O0_I2(r, r),
754
+ .out_r = tgen_st8,
755
+};
756
+
757
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
758
+ TCGReg base, ptrdiff_t offset)
759
+{
760
+ tcg_out_mem(s, RX_STH, RXY_STHY, data, base, TCG_REG_NONE, offset);
761
+}
762
+
763
+static const TCGOutOpStore outop_st16 = {
764
+ .base.static_constraint = C_O0_I2(r, r),
765
+ .out_r = tgen_st16,
766
+};
767
+
768
+static const TCGOutOpStore outop_st = {
769
+ .base.static_constraint = C_O0_I2(r, r),
770
+ .out_r = tcg_out_st,
771
+};
772
+
773
774
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
775
const TCGArg args[TCG_MAX_OP_ARGS],
776
const int const_args[TCG_MAX_OP_ARGS])
777
{
778
switch (opc) {
779
- OP_32_64(st8):
780
- tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
781
- TCG_REG_NONE, args[2]);
782
- break;
783
-
784
- OP_32_64(st16):
785
- tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
786
- TCG_REG_NONE, args[2]);
787
- break;
788
-
789
- case INDEX_op_st_i32:
790
- tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
791
- break;
792
-
793
case INDEX_op_qemu_ld_i32:
794
tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
795
break;
796
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
797
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
798
break;
799
800
- case INDEX_op_st32_i64:
801
- tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
802
- break;
803
- case INDEX_op_st_i64:
804
- tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
805
- break;
806
-
807
case INDEX_op_call: /* Always emitted via tcg_out_call. */
808
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
809
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
810
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
811
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
812
{
813
switch (op) {
814
- case INDEX_op_st8_i32:
815
- case INDEX_op_st8_i64:
816
- case INDEX_op_st16_i32:
817
- case INDEX_op_st16_i64:
818
- case INDEX_op_st_i32:
819
- case INDEX_op_st32_i64:
820
- case INDEX_op_st_i64:
821
- return C_O0_I2(r, r);
822
-
823
case INDEX_op_qemu_ld_i32:
824
case INDEX_op_qemu_ld_i64:
825
return C_O1_I1(r, r);
826
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
827
index XXXXXXX..XXXXXXX 100644
828
--- a/tcg/sparc64/tcg-target.c.inc
829
+++ b/tcg/sparc64/tcg-target.c.inc
830
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
831
.out = tgen_ld32s,
832
};
833
834
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
835
+ TCGReg base, ptrdiff_t offset)
836
+{
837
+ tcg_out_ldst(s, data, base, offset, STB);
838
+}
839
+
840
+static const TCGOutOpStore outop_st8 = {
841
+ .base.static_constraint = C_O0_I2(rz, r),
842
+ .out_r = tgen_st8_r,
843
+};
844
+
845
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
846
+ TCGReg base, ptrdiff_t offset)
847
+{
848
+ tcg_out_ldst(s, data, base, offset, STH);
849
+}
850
+
851
+static const TCGOutOpStore outop_st16 = {
852
+ .base.static_constraint = C_O0_I2(rz, r),
853
+ .out_r = tgen_st16_r,
854
+};
855
+
856
+static const TCGOutOpStore outop_st = {
857
+ .base.static_constraint = C_O0_I2(rz, r),
858
+ .out_r = tcg_out_st,
859
+};
860
+
861
862
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
863
const TCGArg args[TCG_MAX_OP_ARGS],
864
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
865
a2 = args[2];
866
867
switch (opc) {
868
-#define OP_32_64(x) \
869
- glue(glue(case INDEX_op_, x), _i32): \
870
- glue(glue(case INDEX_op_, x), _i64)
871
-
872
- OP_32_64(st8):
873
- tcg_out_ldst(s, a0, a1, a2, STB);
874
- break;
875
- OP_32_64(st16):
876
- tcg_out_ldst(s, a0, a1, a2, STH);
877
- break;
878
- case INDEX_op_st_i32:
879
- case INDEX_op_st32_i64:
880
- tcg_out_ldst(s, a0, a1, a2, STW);
881
- break;
882
-
883
case INDEX_op_qemu_ld_i32:
884
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
885
break;
886
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
887
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
888
break;
889
890
- case INDEX_op_st_i64:
891
- tcg_out_ldst(s, a0, a1, a2, STX);
892
- break;
893
-
894
case INDEX_op_call: /* Always emitted via tcg_out_call. */
895
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
896
case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
897
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
898
case INDEX_op_qemu_ld_i64:
899
return C_O1_I1(r, r);
900
901
- case INDEX_op_st8_i32:
902
- case INDEX_op_st8_i64:
903
- case INDEX_op_st16_i32:
904
- case INDEX_op_st16_i64:
905
- case INDEX_op_st_i32:
906
- case INDEX_op_st32_i64:
907
- case INDEX_op_st_i64:
908
case INDEX_op_qemu_st_i32:
909
case INDEX_op_qemu_st_i64:
910
return C_O0_I2(rz, r);
911
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
912
index XXXXXXX..XXXXXXX 100644
913
--- a/tcg/tci/tcg-target.c.inc
914
+++ b/tcg/tci/tcg-target.c.inc
915
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
916
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
917
{
918
switch (op) {
919
- case INDEX_op_st8_i32:
920
- case INDEX_op_st16_i32:
921
- case INDEX_op_st_i32:
922
- case INDEX_op_st8_i64:
923
- case INDEX_op_st16_i64:
924
- case INDEX_op_st32_i64:
925
- case INDEX_op_st_i64:
926
- return C_O0_I2(r, r);
927
-
928
case INDEX_op_qemu_ld_i32:
929
return C_O1_I1(r, r);
930
case INDEX_op_qemu_ld_i64:
931
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
932
tcg_out32(s, insn);
933
}
934
935
-#if TCG_TARGET_REG_BITS == 64
936
-# define CASE_32_64(x) \
937
- case glue(glue(INDEX_op_, x), _i64): \
938
- case glue(glue(INDEX_op_, x), _i32):
939
-# define CASE_64(x) \
940
- case glue(glue(INDEX_op_, x), _i64):
941
-#else
942
-# define CASE_32_64(x) \
943
- case glue(glue(INDEX_op_, x), _i32):
944
-# define CASE_64(x)
945
-#endif
946
-
947
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
948
{
949
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
950
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpLoad outop_ld32s = {
951
};
952
#endif
953
954
+static void tgen_st8(TCGContext *s, TCGType type, TCGReg data,
955
+ TCGReg base, ptrdiff_t offset)
956
+{
957
+ tcg_out_ldst(s, INDEX_op_st8_i32, data, base, offset);
958
+}
959
+
960
+static const TCGOutOpStore outop_st8 = {
961
+ .base.static_constraint = C_O0_I2(r, r),
962
+ .out_r = tgen_st8,
963
+};
964
+
965
+static void tgen_st16(TCGContext *s, TCGType type, TCGReg data,
966
+ TCGReg base, ptrdiff_t offset)
967
+{
968
+ tcg_out_ldst(s, INDEX_op_st16_i32, data, base, offset);
969
+}
970
+
971
+static const TCGOutOpStore outop_st16 = {
972
+ .base.static_constraint = C_O0_I2(r, r),
973
+ .out_r = tgen_st16,
974
+};
975
+
976
+static const TCGOutOpStore outop_st = {
977
+ .base.static_constraint = C_O0_I2(r, r),
978
+ .out_r = tcg_out_st,
979
+};
980
+
981
982
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
983
const TCGArg args[TCG_MAX_OP_ARGS],
984
const int const_args[TCG_MAX_OP_ARGS])
985
{
986
switch (opc) {
987
- CASE_32_64(st8)
988
- CASE_32_64(st16)
989
- case INDEX_op_st_i32:
990
- CASE_64(st32)
991
- CASE_64(st)
992
- tcg_out_ldst(s, opc, args[0], args[1], args[2]);
993
- break;
994
-
995
case INDEX_op_qemu_ld_i64:
996
case INDEX_op_qemu_st_i64:
997
if (TCG_TARGET_REG_BITS == 32) {
998
--
999
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Merge into INDEX_op_{ld,st,ld2,st2}, where "2" indicates that two
2
inputs or outputs are required. This simplifies the processing of
3
i64/i128 depending on host word size.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 16 ++-----
9
tcg/optimize.c | 15 ++-----
10
tcg/tcg-op-ldst.c | 75 +++++++++++++++++---------------
11
tcg/tcg.c | 28 +++++++-----
12
tcg/tci.c | 69 ++++++++++++-----------------
13
tcg/aarch64/tcg-target.c.inc | 20 ++++-----
14
tcg/arm/tcg-target.c.inc | 16 +++----
15
tcg/i386/tcg-target.c.inc | 50 ++++++---------------
16
tcg/loongarch64/tcg-target.c.inc | 28 +++++-------
17
tcg/mips/tcg-target.c.inc | 38 +++++++---------
18
tcg/ppc/tcg-target.c.inc | 47 ++++++++------------
19
tcg/riscv/tcg-target.c.inc | 20 +++------
20
tcg/s390x/tcg-target.c.inc | 28 +++++-------
21
tcg/sparc64/tcg-target.c.inc | 20 +++------
22
tcg/tci/tcg-target.c.inc | 36 ++++++---------
23
15 files changed, 200 insertions(+), 306 deletions(-)
24
25
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/tcg/tcg-opc.h
28
+++ b/include/tcg/tcg-opc.h
29
@@ -XXX,XX +XXX,XX @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
30
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
31
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
32
33
-DEF(qemu_ld_i32, 1, 1, 1,
34
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
35
-DEF(qemu_st_i32, 0, 1 + 1, 1,
36
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
37
-DEF(qemu_ld_i64, DATA64_ARGS, 1, 1,
38
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
39
-DEF(qemu_st_i64, 0, DATA64_ARGS + 1, 1,
40
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
41
-
42
-/* Only for 64-bit hosts at the moment. */
43
-DEF(qemu_ld_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
44
-DEF(qemu_st_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
45
+DEF(qemu_ld, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
46
+DEF(qemu_st, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
47
+DEF(qemu_ld2, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
48
+DEF(qemu_st2, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_INT)
49
50
/* Host vector support. */
51
52
diff --git a/tcg/optimize.c b/tcg/optimize.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/optimize.c
55
+++ b/tcg/optimize.c
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
case INDEX_op_orc_vec:
58
done = fold_orc(&ctx, op);
59
break;
60
- case INDEX_op_qemu_ld_i32:
61
+ case INDEX_op_qemu_ld:
62
done = fold_qemu_ld_1reg(&ctx, op);
63
break;
64
- case INDEX_op_qemu_ld_i64:
65
- if (TCG_TARGET_REG_BITS == 64) {
66
- done = fold_qemu_ld_1reg(&ctx, op);
67
- break;
68
- }
69
- QEMU_FALLTHROUGH;
70
- case INDEX_op_qemu_ld_i128:
71
+ case INDEX_op_qemu_ld2:
72
done = fold_qemu_ld_2reg(&ctx, op);
73
break;
74
- case INDEX_op_qemu_st_i32:
75
- case INDEX_op_qemu_st_i64:
76
- case INDEX_op_qemu_st_i128:
77
+ case INDEX_op_qemu_st:
78
+ case INDEX_op_qemu_st2:
79
done = fold_qemu_st(&ctx, op);
80
break;
81
case INDEX_op_rems:
82
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/tcg-op-ldst.c
85
+++ b/tcg/tcg-op-ldst.c
86
@@ -XXX,XX +XXX,XX @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
87
return op;
88
}
89
90
-static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
91
- TCGTemp *addr, MemOpIdx oi)
92
+static void gen_ldst1(TCGOpcode opc, TCGType type, TCGTemp *v,
93
+ TCGTemp *addr, MemOpIdx oi)
94
{
95
- TCGOp *op;
96
-
97
- if (vh) {
98
- op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
99
- temp_arg(addr), oi);
100
- } else {
101
- op = tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
102
- }
103
+ TCGOp *op = tcg_gen_op3(opc, type, temp_arg(v), temp_arg(addr), oi);
104
TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
105
}
106
107
-static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
108
+static void gen_ldst2(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
109
+ TCGTemp *addr, MemOpIdx oi)
110
+{
111
+ TCGOp *op = tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
112
+ temp_arg(addr), oi);
113
+ TCGOP_FLAGS(op) = get_memop(oi) & MO_SIZE;
114
+}
115
+
116
+static void gen_ld_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
117
{
118
if (TCG_TARGET_REG_BITS == 32) {
119
- TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
120
- TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
121
- gen_ldst(opc, TCG_TYPE_I64, vl, vh, addr, oi);
122
+ gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I64,
123
+ tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
124
+ addr, oi);
125
} else {
126
- gen_ldst(opc, TCG_TYPE_I64, tcgv_i64_temp(v), NULL, addr, oi);
127
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
128
+ }
129
+}
130
+
131
+static void gen_st_i64(TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
132
+{
133
+ if (TCG_TARGET_REG_BITS == 32) {
134
+ gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I64,
135
+ tcgv_i32_temp(TCGV_LOW(v)), tcgv_i32_temp(TCGV_HIGH(v)),
136
+ addr, oi);
137
+ } else {
138
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I64, tcgv_i64_temp(v), addr, oi);
139
}
140
}
141
142
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
143
}
144
145
copy_addr = plugin_maybe_preserve_addr(addr);
146
- gen_ldst(INDEX_op_qemu_ld_i32, TCG_TYPE_I32,
147
- tcgv_i32_temp(val), NULL, addr, oi);
148
+ gen_ldst1(INDEX_op_qemu_ld, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
149
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
150
QEMU_PLUGIN_MEM_R);
151
152
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
153
oi = make_memop_idx(memop, idx);
154
}
155
156
- gen_ldst(INDEX_op_qemu_st_i32, TCG_TYPE_I32,
157
- tcgv_i32_temp(val), NULL, addr, oi);
158
+ gen_ldst1(INDEX_op_qemu_st, TCG_TYPE_I32, tcgv_i32_temp(val), addr, oi);
159
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
160
161
if (swap) {
162
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr,
163
}
164
165
copy_addr = plugin_maybe_preserve_addr(addr);
166
- gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi);
167
+ gen_ld_i64(val, addr, oi);
168
plugin_gen_mem_callbacks_i64(val, copy_addr, addr, orig_oi,
169
QEMU_PLUGIN_MEM_R);
170
171
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr,
172
oi = make_memop_idx(memop, idx);
173
}
174
175
- gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi);
176
+ gen_st_i64(val, addr, oi);
177
plugin_gen_mem_callbacks_i64(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
178
179
if (swap) {
180
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
181
hi = TCGV128_HIGH(val);
182
}
183
184
- gen_ldst(INDEX_op_qemu_ld_i128, TCG_TYPE_I128, tcgv_i64_temp(lo),
185
- tcgv_i64_temp(hi), addr, oi);
186
+ gen_ldst2(INDEX_op_qemu_ld2, TCG_TYPE_I128, tcgv_i64_temp(lo),
187
+ tcgv_i64_temp(hi), addr, oi);
188
189
if (need_bswap) {
190
tcg_gen_bswap64_i64(lo, lo);
191
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
192
y = TCGV128_LOW(val);
193
}
194
195
- gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr,
196
- make_memop_idx(mop[0], idx));
197
+ gen_ld_i64(x, addr, make_memop_idx(mop[0], idx));
198
199
if (need_bswap) {
200
tcg_gen_bswap64_i64(x, x);
201
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
202
addr_p8 = tcgv_i64_temp(t);
203
}
204
205
- gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8,
206
- make_memop_idx(mop[1], idx));
207
+ gen_ld_i64(y, addr_p8, make_memop_idx(mop[1], idx));
208
tcg_temp_free_internal(addr_p8);
209
210
if (need_bswap) {
211
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
212
hi = TCGV128_HIGH(val);
213
}
214
215
- gen_ldst(INDEX_op_qemu_st_i128, TCG_TYPE_I128,
216
- tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
217
+ gen_ldst2(INDEX_op_qemu_st2, TCG_TYPE_I128,
218
+ tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
219
220
if (need_bswap) {
221
tcg_temp_free_i64(lo);
222
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
223
x = b;
224
}
225
226
- gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr,
227
- make_memop_idx(mop[0], idx));
228
+ gen_st_i64(x, addr, make_memop_idx(mop[0], idx));
229
230
if (tcg_ctx->addr_type == TCG_TYPE_I32) {
231
TCGv_i32 t = tcg_temp_ebb_new_i32();
232
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
233
234
if (b) {
235
tcg_gen_bswap64_i64(b, y);
236
- gen_ldst_i64(INDEX_op_qemu_st_i64, b, addr_p8,
237
- make_memop_idx(mop[1], idx));
238
+ gen_st_i64(b, addr_p8, make_memop_idx(mop[1], idx));
239
tcg_temp_free_i64(b);
240
} else {
241
- gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8,
242
- make_memop_idx(mop[1], idx));
243
+ gen_st_i64(y, addr_p8, make_memop_idx(mop[1], idx));
244
}
245
tcg_temp_free_internal(addr_p8);
246
} else {
247
diff --git a/tcg/tcg.c b/tcg/tcg.c
248
index XXXXXXX..XXXXXXX 100644
249
--- a/tcg/tcg.c
250
+++ b/tcg/tcg.c
251
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
252
case INDEX_op_exit_tb:
253
case INDEX_op_goto_tb:
254
case INDEX_op_goto_ptr:
255
- case INDEX_op_qemu_ld_i32:
256
- case INDEX_op_qemu_st_i32:
257
- case INDEX_op_qemu_ld_i64:
258
- case INDEX_op_qemu_st_i64:
259
return true;
260
261
- case INDEX_op_qemu_ld_i128:
262
- case INDEX_op_qemu_st_i128:
263
+ case INDEX_op_qemu_ld:
264
+ case INDEX_op_qemu_st:
265
+ tcg_debug_assert(type <= TCG_TYPE_REG);
266
+ return true;
267
+
268
+ case INDEX_op_qemu_ld2:
269
+ case INDEX_op_qemu_st2:
270
+ if (TCG_TARGET_REG_BITS == 32) {
271
+ tcg_debug_assert(type == TCG_TYPE_I64);
272
+ return true;
273
+ }
274
+ tcg_debug_assert(type == TCG_TYPE_I128);
275
return TCG_TARGET_HAS_qemu_ldst_i128;
276
277
case INDEX_op_add:
278
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
279
}
280
i = 1;
281
break;
282
- case INDEX_op_qemu_ld_i32:
283
- case INDEX_op_qemu_st_i32:
284
- case INDEX_op_qemu_ld_i64:
285
- case INDEX_op_qemu_st_i64:
286
- case INDEX_op_qemu_ld_i128:
287
- case INDEX_op_qemu_st_i128:
288
+ case INDEX_op_qemu_ld:
289
+ case INDEX_op_qemu_st:
290
+ case INDEX_op_qemu_ld2:
291
+ case INDEX_op_qemu_st2:
292
{
293
const char *s_al, *s_op, *s_at;
294
MemOpIdx oi = op->args[k++];
295
diff --git a/tcg/tci.c b/tcg/tci.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/tci.c
298
+++ b/tcg/tci.c
299
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
300
tb_ptr = ptr;
301
break;
302
303
- case INDEX_op_qemu_ld_i32:
304
+ case INDEX_op_qemu_ld:
305
tci_args_rrm(insn, &r0, &r1, &oi);
306
taddr = regs[r1];
307
regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr);
308
break;
309
310
- case INDEX_op_qemu_ld_i64:
311
- if (TCG_TARGET_REG_BITS == 64) {
312
- tci_args_rrm(insn, &r0, &r1, &oi);
313
- taddr = regs[r1];
314
- } else {
315
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
316
- taddr = regs[r2];
317
- oi = regs[r3];
318
- }
319
- tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
320
- if (TCG_TARGET_REG_BITS == 32) {
321
- tci_write_reg64(regs, r1, r0, tmp64);
322
- } else {
323
- regs[r0] = tmp64;
324
- }
325
- break;
326
-
327
- case INDEX_op_qemu_st_i32:
328
+ case INDEX_op_qemu_st:
329
tci_args_rrm(insn, &r0, &r1, &oi);
330
taddr = regs[r1];
331
tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr);
332
break;
333
334
- case INDEX_op_qemu_st_i64:
335
- if (TCG_TARGET_REG_BITS == 64) {
336
- tci_args_rrm(insn, &r0, &r1, &oi);
337
- tmp64 = regs[r0];
338
- taddr = regs[r1];
339
- } else {
340
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
341
- tmp64 = tci_uint64(regs[r1], regs[r0]);
342
- taddr = regs[r2];
343
- oi = regs[r3];
344
- }
345
+ case INDEX_op_qemu_ld2:
346
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
347
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
348
+ taddr = regs[r2];
349
+ oi = regs[r3];
350
+ tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
351
+ tci_write_reg64(regs, r1, r0, tmp64);
352
+ break;
353
+
354
+ case INDEX_op_qemu_st2:
355
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
356
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
357
+ tmp64 = tci_uint64(regs[r1], regs[r0]);
358
+ taddr = regs[r2];
359
+ oi = regs[r3];
360
tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
361
break;
362
363
@@ -XXX,XX +XXX,XX @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
364
str_r(r2), str_r(r3));
365
break;
366
367
- case INDEX_op_qemu_ld_i64:
368
- case INDEX_op_qemu_st_i64:
369
- if (TCG_TARGET_REG_BITS == 32) {
370
- tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
371
- info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
372
- op_name, str_r(r0), str_r(r1),
373
- str_r(r2), str_r(r3));
374
- break;
375
- }
376
- /* fall through */
377
- case INDEX_op_qemu_ld_i32:
378
- case INDEX_op_qemu_st_i32:
379
+ case INDEX_op_qemu_ld:
380
+ case INDEX_op_qemu_st:
381
tci_args_rrm(insn, &r0, &r1, &oi);
382
info->fprintf_func(info->stream, "%-12s %s, %s, %x",
383
op_name, str_r(r0), str_r(r1), oi);
384
break;
385
386
+ case INDEX_op_qemu_ld2:
387
+ case INDEX_op_qemu_st2:
388
+ tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
389
+ info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
390
+ op_name, str_r(r0), str_r(r1),
391
+ str_r(r2), str_r(r3));
392
+ break;
393
+
394
case 0:
395
/* tcg_out_nop_fill uses zeros */
396
if (insn == 0) {
397
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
398
index XXXXXXX..XXXXXXX 100644
399
--- a/tcg/aarch64/tcg-target.c.inc
400
+++ b/tcg/aarch64/tcg-target.c.inc
401
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
402
TCGArg a2 = args[2];
403
404
switch (opc) {
405
- case INDEX_op_qemu_ld_i32:
406
- case INDEX_op_qemu_ld_i64:
407
+ case INDEX_op_qemu_ld:
408
tcg_out_qemu_ld(s, a0, a1, a2, ext);
409
break;
410
- case INDEX_op_qemu_st_i32:
411
- case INDEX_op_qemu_st_i64:
412
+ case INDEX_op_qemu_st:
413
tcg_out_qemu_st(s, a0, a1, a2, ext);
414
break;
415
- case INDEX_op_qemu_ld_i128:
416
+ case INDEX_op_qemu_ld2:
417
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
418
break;
419
- case INDEX_op_qemu_st_i128:
420
+ case INDEX_op_qemu_st2:
421
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
422
break;
423
424
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
425
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
426
{
427
switch (op) {
428
- case INDEX_op_qemu_ld_i32:
429
- case INDEX_op_qemu_ld_i64:
430
+ case INDEX_op_qemu_ld:
431
return C_O1_I1(r, r);
432
- case INDEX_op_qemu_ld_i128:
433
+ case INDEX_op_qemu_ld2:
434
return C_O2_I1(r, r, r);
435
- case INDEX_op_qemu_st_i32:
436
- case INDEX_op_qemu_st_i64:
437
+ case INDEX_op_qemu_st:
438
return C_O0_I2(rz, r);
439
- case INDEX_op_qemu_st_i128:
440
+ case INDEX_op_qemu_st2:
441
return C_O0_I3(rz, rz, r);
442
443
case INDEX_op_add_vec:
444
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
445
index XXXXXXX..XXXXXXX 100644
446
--- a/tcg/arm/tcg-target.c.inc
447
+++ b/tcg/arm/tcg-target.c.inc
448
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
449
const int const_args[TCG_MAX_OP_ARGS])
450
{
451
switch (opc) {
452
- case INDEX_op_qemu_ld_i32:
453
+ case INDEX_op_qemu_ld:
454
tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
455
break;
456
- case INDEX_op_qemu_ld_i64:
457
+ case INDEX_op_qemu_ld2:
458
tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
459
break;
460
461
- case INDEX_op_qemu_st_i32:
462
+ case INDEX_op_qemu_st:
463
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
464
break;
465
- case INDEX_op_qemu_st_i64:
466
+ case INDEX_op_qemu_st2:
467
tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
468
break;
469
470
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
471
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
472
{
473
switch (op) {
474
- case INDEX_op_qemu_ld_i32:
475
+ case INDEX_op_qemu_ld:
476
return C_O1_I1(r, q);
477
- case INDEX_op_qemu_ld_i64:
478
+ case INDEX_op_qemu_ld2:
479
return C_O2_I1(e, p, q);
480
- case INDEX_op_qemu_st_i32:
481
+ case INDEX_op_qemu_st:
482
return C_O0_I2(q, q);
483
- case INDEX_op_qemu_st_i64:
484
+ case INDEX_op_qemu_st2:
485
return C_O0_I3(Q, p, q);
486
487
case INDEX_op_st_vec:
488
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
489
index XXXXXXX..XXXXXXX 100644
490
--- a/tcg/i386/tcg-target.c.inc
491
+++ b/tcg/i386/tcg-target.c.inc
492
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
493
494
switch (memop & MO_SIZE) {
495
case MO_8:
496
- /* This is handled with constraints on INDEX_op_qemu_st_i32. */
497
+ /* This is handled with constraints on INDEX_op_qemu_st. */
498
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
499
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
500
datalo, h.base, h.index, 0, h.ofs);
501
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
502
a2 = args[2];
503
504
switch (opc) {
505
- case INDEX_op_qemu_ld_i32:
506
- tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I32);
507
+ case INDEX_op_qemu_ld:
508
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, type);
509
break;
510
- case INDEX_op_qemu_ld_i64:
511
- if (TCG_TARGET_REG_BITS == 64) {
512
- tcg_out_qemu_ld(s, a0, -1, a1, a2, TCG_TYPE_I64);
513
- } else {
514
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
515
- }
516
- break;
517
- case INDEX_op_qemu_ld_i128:
518
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
519
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I128);
520
+ case INDEX_op_qemu_ld2:
521
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
522
break;
523
524
- case INDEX_op_qemu_st_i32:
525
- tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I32);
526
+ case INDEX_op_qemu_st:
527
+ tcg_out_qemu_st(s, a0, -1, a1, a2, type);
528
break;
529
- case INDEX_op_qemu_st_i64:
530
- if (TCG_TARGET_REG_BITS == 64) {
531
- tcg_out_qemu_st(s, a0, -1, a1, a2, TCG_TYPE_I64);
532
- } else {
533
- tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
534
- }
535
- break;
536
- case INDEX_op_qemu_st_i128:
537
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
538
- tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I128);
539
+ case INDEX_op_qemu_st2:
540
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
541
break;
542
543
case INDEX_op_call: /* Always emitted via tcg_out_call. */
544
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
545
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
546
{
547
switch (op) {
548
- case INDEX_op_qemu_ld_i32:
549
+ case INDEX_op_qemu_ld:
550
return C_O1_I1(r, L);
551
552
- case INDEX_op_qemu_st_i32:
553
+ case INDEX_op_qemu_st:
554
return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
555
? C_O0_I2(s, L)
556
: C_O0_I2(L, L));
557
558
- case INDEX_op_qemu_ld_i64:
559
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L) : C_O2_I1(r, r, L);
560
-
561
- case INDEX_op_qemu_st_i64:
562
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I3(L, L, L);
563
-
564
- case INDEX_op_qemu_ld_i128:
565
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
566
+ case INDEX_op_qemu_ld2:
567
return C_O2_I1(r, r, L);
568
- case INDEX_op_qemu_st_i128:
569
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
570
+ case INDEX_op_qemu_st2:
571
return C_O0_I3(L, L, L);
572
573
case INDEX_op_ld_vec:
574
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
575
index XXXXXXX..XXXXXXX 100644
576
--- a/tcg/loongarch64/tcg-target.c.inc
577
+++ b/tcg/loongarch64/tcg-target.c.inc
578
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
579
TCGArg a3 = args[3];
580
581
switch (opc) {
582
- case INDEX_op_qemu_ld_i32:
583
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
584
+ case INDEX_op_qemu_ld:
585
+ tcg_out_qemu_ld(s, a0, a1, a2, type);
586
break;
587
- case INDEX_op_qemu_ld_i64:
588
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
589
- break;
590
- case INDEX_op_qemu_ld_i128:
591
+ case INDEX_op_qemu_ld2:
592
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
593
break;
594
- case INDEX_op_qemu_st_i32:
595
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
596
+ case INDEX_op_qemu_st:
597
+ tcg_out_qemu_st(s, a0, a1, a2, type);
598
break;
599
- case INDEX_op_qemu_st_i64:
600
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
601
- break;
602
- case INDEX_op_qemu_st_i128:
603
+ case INDEX_op_qemu_st2:
604
tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
605
break;
606
607
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
608
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
609
{
610
switch (op) {
611
- case INDEX_op_qemu_st_i32:
612
- case INDEX_op_qemu_st_i64:
613
+ case INDEX_op_qemu_st:
614
return C_O0_I2(rz, r);
615
616
- case INDEX_op_qemu_ld_i128:
617
+ case INDEX_op_qemu_ld2:
618
return C_N2_I1(r, r, r);
619
620
- case INDEX_op_qemu_st_i128:
621
+ case INDEX_op_qemu_st2:
622
return C_O0_I3(r, r, r);
623
624
- case INDEX_op_qemu_ld_i32:
625
- case INDEX_op_qemu_ld_i64:
626
+ case INDEX_op_qemu_ld:
627
return C_O1_I1(r, r);
628
629
case INDEX_op_ld_vec:
630
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
631
index XXXXXXX..XXXXXXX 100644
632
--- a/tcg/mips/tcg-target.c.inc
633
+++ b/tcg/mips/tcg-target.c.inc
634
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
635
a2 = args[2];
636
637
switch (opc) {
638
- case INDEX_op_qemu_ld_i32:
639
- tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I32);
640
+ case INDEX_op_qemu_ld:
641
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, type);
642
break;
643
- case INDEX_op_qemu_ld_i64:
644
- if (TCG_TARGET_REG_BITS == 64) {
645
- tcg_out_qemu_ld(s, a0, 0, a1, a2, TCG_TYPE_I64);
646
- } else {
647
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], TCG_TYPE_I64);
648
- }
649
+ case INDEX_op_qemu_ld2:
650
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
651
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
652
break;
653
654
- case INDEX_op_qemu_st_i32:
655
- tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I32);
656
+ case INDEX_op_qemu_st:
657
+ tcg_out_qemu_st(s, a0, 0, a1, a2, type);
658
break;
659
- case INDEX_op_qemu_st_i64:
660
- if (TCG_TARGET_REG_BITS == 64) {
661
- tcg_out_qemu_st(s, a0, 0, a1, a2, TCG_TYPE_I64);
662
- } else {
663
- tcg_out_qemu_st(s, a0, a1, a2, args[3], TCG_TYPE_I64);
664
- }
665
+ case INDEX_op_qemu_st2:
666
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
667
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
668
break;
669
670
case INDEX_op_call: /* Always emitted via tcg_out_call. */
671
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
672
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
673
{
674
switch (op) {
675
- case INDEX_op_qemu_ld_i32:
676
+ case INDEX_op_qemu_ld:
677
return C_O1_I1(r, r);
678
- case INDEX_op_qemu_st_i32:
679
+ case INDEX_op_qemu_st:
680
return C_O0_I2(rz, r);
681
- case INDEX_op_qemu_ld_i64:
682
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
683
- case INDEX_op_qemu_st_i64:
684
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(rz, r) : C_O0_I3(rz, rz, r);
685
+ case INDEX_op_qemu_ld2:
686
+ return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
687
+ case INDEX_op_qemu_st2:
688
+ return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(rz, rz, r);
689
690
default:
691
return C_NotImplemented;
692
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
693
index XXXXXXX..XXXXXXX 100644
694
--- a/tcg/ppc/tcg-target.c.inc
695
+++ b/tcg/ppc/tcg-target.c.inc
696
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
697
const int const_args[TCG_MAX_OP_ARGS])
698
{
699
switch (opc) {
700
- case INDEX_op_qemu_ld_i32:
701
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
702
+ case INDEX_op_qemu_ld:
703
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], type);
704
break;
705
- case INDEX_op_qemu_ld_i64:
706
- if (TCG_TARGET_REG_BITS == 64) {
707
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
708
- } else {
709
+ case INDEX_op_qemu_ld2:
710
+ if (TCG_TARGET_REG_BITS == 32) {
711
tcg_out_qemu_ld(s, args[0], args[1], args[2],
712
args[3], TCG_TYPE_I64);
713
+ break;
714
}
715
- break;
716
- case INDEX_op_qemu_ld_i128:
717
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
718
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
719
break;
720
721
- case INDEX_op_qemu_st_i32:
722
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
723
+ case INDEX_op_qemu_st:
724
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2], type);
725
break;
726
- case INDEX_op_qemu_st_i64:
727
- if (TCG_TARGET_REG_BITS == 64) {
728
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I64);
729
- } else {
730
+ case INDEX_op_qemu_st2:
731
+ if (TCG_TARGET_REG_BITS == 32) {
732
tcg_out_qemu_st(s, args[0], args[1], args[2],
733
args[3], TCG_TYPE_I64);
734
+ break;
735
}
736
- break;
737
- case INDEX_op_qemu_st_i128:
738
- tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
739
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
740
break;
741
742
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
743
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
744
{
745
switch (op) {
746
- case INDEX_op_qemu_ld_i32:
747
+ case INDEX_op_qemu_ld:
748
return C_O1_I1(r, r);
749
- case INDEX_op_qemu_ld_i64:
750
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
751
+ case INDEX_op_qemu_ld2:
752
+ return TCG_TARGET_REG_BITS == 64
753
+ ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r);
754
755
- case INDEX_op_qemu_st_i32:
756
+ case INDEX_op_qemu_st:
757
return C_O0_I2(r, r);
758
- case INDEX_op_qemu_st_i64:
759
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
760
-
761
- case INDEX_op_qemu_ld_i128:
762
- return C_N1O1_I1(o, m, r);
763
- case INDEX_op_qemu_st_i128:
764
- return C_O0_I3(o, m, r);
765
+ case INDEX_op_qemu_st2:
766
+ return TCG_TARGET_REG_BITS == 64
767
+ ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r);
768
769
case INDEX_op_add_vec:
770
case INDEX_op_sub_vec:
771
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
772
index XXXXXXX..XXXXXXX 100644
773
--- a/tcg/riscv/tcg-target.c.inc
774
+++ b/tcg/riscv/tcg-target.c.inc
775
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
776
TCGArg a2 = args[2];
777
778
switch (opc) {
779
- case INDEX_op_qemu_ld_i32:
780
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
781
+ case INDEX_op_qemu_ld:
782
+ tcg_out_qemu_ld(s, a0, a1, a2, type);
783
break;
784
- case INDEX_op_qemu_ld_i64:
785
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
786
- break;
787
- case INDEX_op_qemu_st_i32:
788
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
789
- break;
790
- case INDEX_op_qemu_st_i64:
791
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
792
+ case INDEX_op_qemu_st:
793
+ tcg_out_qemu_st(s, a0, a1, a2, type);
794
break;
795
796
case INDEX_op_call: /* Always emitted via tcg_out_call. */
797
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
798
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
799
{
800
switch (op) {
801
- case INDEX_op_qemu_ld_i32:
802
- case INDEX_op_qemu_ld_i64:
803
+ case INDEX_op_qemu_ld:
804
return C_O1_I1(r, r);
805
- case INDEX_op_qemu_st_i32:
806
- case INDEX_op_qemu_st_i64:
807
+ case INDEX_op_qemu_st:
808
return C_O0_I2(rz, r);
809
810
case INDEX_op_st_vec:
811
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
812
index XXXXXXX..XXXXXXX 100644
813
--- a/tcg/s390x/tcg-target.c.inc
814
+++ b/tcg/s390x/tcg-target.c.inc
815
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
816
const int const_args[TCG_MAX_OP_ARGS])
817
{
818
switch (opc) {
819
- case INDEX_op_qemu_ld_i32:
820
- tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
821
+ case INDEX_op_qemu_ld:
822
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], type);
823
break;
824
- case INDEX_op_qemu_ld_i64:
825
- tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
826
+ case INDEX_op_qemu_st:
827
+ tcg_out_qemu_st(s, args[0], args[1], args[2], type);
828
break;
829
- case INDEX_op_qemu_st_i32:
830
- tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
831
- break;
832
- case INDEX_op_qemu_st_i64:
833
- tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
834
- break;
835
- case INDEX_op_qemu_ld_i128:
836
+ case INDEX_op_qemu_ld2:
837
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
838
break;
839
- case INDEX_op_qemu_st_i128:
840
+ case INDEX_op_qemu_st2:
841
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
842
break;
843
844
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
845
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
846
{
847
switch (op) {
848
- case INDEX_op_qemu_ld_i32:
849
- case INDEX_op_qemu_ld_i64:
850
+ case INDEX_op_qemu_ld:
851
return C_O1_I1(r, r);
852
- case INDEX_op_qemu_st_i64:
853
- case INDEX_op_qemu_st_i32:
854
+ case INDEX_op_qemu_st:
855
return C_O0_I2(r, r);
856
- case INDEX_op_qemu_ld_i128:
857
+ case INDEX_op_qemu_ld2:
858
return C_O2_I1(o, m, r);
859
- case INDEX_op_qemu_st_i128:
860
+ case INDEX_op_qemu_st2:
861
return C_O0_I3(o, m, r);
862
863
case INDEX_op_st_vec:
864
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
865
index XXXXXXX..XXXXXXX 100644
866
--- a/tcg/sparc64/tcg-target.c.inc
867
+++ b/tcg/sparc64/tcg-target.c.inc
868
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
869
a2 = args[2];
870
871
switch (opc) {
872
- case INDEX_op_qemu_ld_i32:
873
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
874
+ case INDEX_op_qemu_ld:
875
+ tcg_out_qemu_ld(s, a0, a1, a2, type);
876
break;
877
- case INDEX_op_qemu_ld_i64:
878
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
879
- break;
880
- case INDEX_op_qemu_st_i32:
881
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
882
- break;
883
- case INDEX_op_qemu_st_i64:
884
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
885
+ case INDEX_op_qemu_st:
886
+ tcg_out_qemu_st(s, a0, a1, a2, type);
887
break;
888
889
case INDEX_op_call: /* Always emitted via tcg_out_call. */
890
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
891
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
892
{
893
switch (op) {
894
- case INDEX_op_qemu_ld_i32:
895
- case INDEX_op_qemu_ld_i64:
896
+ case INDEX_op_qemu_ld:
897
return C_O1_I1(r, r);
898
899
- case INDEX_op_qemu_st_i32:
900
- case INDEX_op_qemu_st_i64:
901
+ case INDEX_op_qemu_st:
902
return C_O0_I2(rz, r);
903
904
default:
905
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
906
index XXXXXXX..XXXXXXX 100644
907
--- a/tcg/tci/tcg-target.c.inc
908
+++ b/tcg/tci/tcg-target.c.inc
909
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
910
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
911
{
912
switch (op) {
913
- case INDEX_op_qemu_ld_i32:
914
+ case INDEX_op_qemu_ld:
915
return C_O1_I1(r, r);
916
- case INDEX_op_qemu_ld_i64:
917
- return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
918
- case INDEX_op_qemu_st_i32:
919
+ case INDEX_op_qemu_st:
920
return C_O0_I2(r, r);
921
- case INDEX_op_qemu_st_i64:
922
- return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
923
+ case INDEX_op_qemu_ld2:
924
+ return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
925
+ case INDEX_op_qemu_st2:
926
+ return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r);
927
928
default:
929
return C_NotImplemented;
930
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
931
const int const_args[TCG_MAX_OP_ARGS])
932
{
933
switch (opc) {
934
- case INDEX_op_qemu_ld_i64:
935
- case INDEX_op_qemu_st_i64:
936
- if (TCG_TARGET_REG_BITS == 32) {
937
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
938
- tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
939
- break;
940
- }
941
- /* fall through */
942
- case INDEX_op_qemu_ld_i32:
943
- case INDEX_op_qemu_st_i32:
944
- if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
945
- tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
946
- tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
947
- } else {
948
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
949
- }
950
+ case INDEX_op_qemu_ld:
951
+ case INDEX_op_qemu_st:
952
+ tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
953
+ break;
954
+ case INDEX_op_qemu_ld2:
955
+ case INDEX_op_qemu_st2:
956
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
957
+ tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
958
break;
959
960
case INDEX_op_call: /* Always emitted via tcg_out_call. */
961
--
962
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 32 +++++++++++++++-
5
tcg/aarch64/tcg-target.c.inc | 30 +++++++++------
6
tcg/arm/tcg-target.c.inc | 63 +++++++++++++++++++++++---------
7
tcg/i386/tcg-target.c.inc | 47 ++++++++++++++++--------
8
tcg/loongarch64/tcg-target.c.inc | 37 ++++++++++---------
9
tcg/mips/tcg-target.c.inc | 57 +++++++++++++++++++++--------
10
tcg/ppc/tcg-target.c.inc | 45 ++++++++++++++---------
11
tcg/riscv/tcg-target.c.inc | 22 ++++++-----
12
tcg/s390x/tcg-target.c.inc | 32 +++++++++-------
13
tcg/sparc64/tcg-target.c.inc | 21 ++++++-----
14
tcg/tci/tcg-target.c.inc | 30 ++++++++++++---
15
11 files changed, 283 insertions(+), 133 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOutOpMul2 {
22
TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3);
23
} TCGOutOpMul2;
24
25
+typedef struct TCGOutOpQemuLdSt {
26
+ TCGOutOp base;
27
+ void (*out)(TCGContext *s, TCGType type, TCGReg dest,
28
+ TCGReg addr, MemOpIdx oi);
29
+} TCGOutOpQemuLdSt;
30
+
31
+typedef struct TCGOutOpQemuLdSt2 {
32
+ TCGOutOp base;
33
+ void (*out)(TCGContext *s, TCGType type, TCGReg dlo, TCGReg dhi,
34
+ TCGReg addr, MemOpIdx oi);
35
+} TCGOutOpQemuLdSt2;
36
+
37
typedef struct TCGOutOpUnary {
38
TCGOutOp base;
39
void (*out_rr)(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1);
40
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
41
OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
42
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
43
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
44
+ OUTOP(INDEX_op_qemu_ld, TCGOutOpQemuLdSt, outop_qemu_ld),
45
+ OUTOP(INDEX_op_qemu_ld2, TCGOutOpQemuLdSt2, outop_qemu_ld2),
46
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
47
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
48
OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
49
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
50
return true;
51
}
52
tcg_debug_assert(type == TCG_TYPE_I128);
53
- return TCG_TARGET_HAS_qemu_ldst_i128;
54
+ goto do_lookup;
55
56
case INDEX_op_add:
57
case INDEX_op_and:
58
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
59
return false;
60
}
61
62
+ do_lookup:
63
outop = all_outop[op];
64
tcg_debug_assert(outop != NULL);
65
66
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
67
}
68
break;
69
70
+ case INDEX_op_qemu_ld:
71
+ {
72
+ const TCGOutOpQemuLdSt *out = &outop_qemu_ld;
73
+ out->out(s, type, new_args[0], new_args[1], new_args[2]);
74
+ }
75
+ break;
76
+
77
+ case INDEX_op_qemu_ld2:
78
+ {
79
+ const TCGOutOpQemuLdSt2 *out = &outop_qemu_ld2;
80
+ out->out(s, type, new_args[0], new_args[1],
81
+ new_args[2], new_args[3]);
82
+ }
83
+ break;
84
+
85
case INDEX_op_brcond:
86
{
87
const TCGOutOpBrcond *out = &outop_brcond;
88
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/aarch64/tcg-target.c.inc
91
+++ b/tcg/aarch64/tcg-target.c.inc
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
93
}
94
}
95
96
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
97
- MemOpIdx oi, TCGType data_type)
98
+static void tgen_qemu_ld(TCGContext *s, TCGType data_type, TCGReg data_reg,
99
+ TCGReg addr_reg, MemOpIdx oi)
100
{
101
TCGLabelQemuLdst *ldst;
102
HostAddress h;
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
104
}
105
}
106
107
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
108
+ .base.static_constraint = C_O1_I1(r, r),
109
+ .out = tgen_qemu_ld,
110
+};
111
+
112
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
113
MemOpIdx oi, TCGType data_type)
114
{
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
116
}
117
}
118
119
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
120
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
121
+{
122
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
123
+}
124
+
125
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
126
+ .base.static_constraint = C_O2_I1(r, r, r),
127
+ .out = tgen_qemu_ld2,
128
+};
129
+
130
static const tcg_insn_unit *tb_ret_addr;
131
132
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
133
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
134
TCGArg a2 = args[2];
135
136
switch (opc) {
137
- case INDEX_op_qemu_ld:
138
- tcg_out_qemu_ld(s, a0, a1, a2, ext);
139
- break;
140
case INDEX_op_qemu_st:
141
tcg_out_qemu_st(s, a0, a1, a2, ext);
142
break;
143
- case INDEX_op_qemu_ld2:
144
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
145
- break;
146
case INDEX_op_qemu_st2:
147
tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
148
break;
149
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
150
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
151
{
152
switch (op) {
153
- case INDEX_op_qemu_ld:
154
- return C_O1_I1(r, r);
155
- case INDEX_op_qemu_ld2:
156
- return C_O2_I1(r, r, r);
157
case INDEX_op_qemu_st:
158
return C_O0_I2(rz, r);
159
case INDEX_op_qemu_st2:
160
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
161
index XXXXXXX..XXXXXXX 100644
162
--- a/tcg/arm/tcg-target.c.inc
163
+++ b/tcg/arm/tcg-target.c.inc
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
165
}
166
}
167
168
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
169
- TCGReg addr, MemOpIdx oi, TCGType data_type)
170
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
171
+ TCGReg addr, MemOpIdx oi)
172
{
173
MemOp opc = get_memop(oi);
174
TCGLabelQemuLdst *ldst;
175
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
176
177
ldst = prepare_host_addr(s, &h, addr, oi, true);
178
if (ldst) {
179
- ldst->type = data_type;
180
+ ldst->type = type;
181
+ ldst->datalo_reg = data;
182
+ ldst->datahi_reg = -1;
183
+
184
+ /*
185
+ * This a conditional BL only to load a pointer within this
186
+ * opcode into LR for the slow path. We will not be using
187
+ * the value for a tail call.
188
+ */
189
+ ldst->label_ptr[0] = s->code_ptr;
190
+ tcg_out_bl_imm(s, COND_NE, 0);
191
+ }
192
+
193
+ tcg_out_qemu_ld_direct(s, opc, data, -1, h);
194
+
195
+ if (ldst) {
196
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
197
+ }
198
+}
199
+
200
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
201
+ .base.static_constraint = C_O1_I1(r, q),
202
+ .out = tgen_qemu_ld,
203
+};
204
+
205
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
206
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
207
+{
208
+ MemOp opc = get_memop(oi);
209
+ TCGLabelQemuLdst *ldst;
210
+ HostAddress h;
211
+
212
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
213
+ if (ldst) {
214
+ ldst->type = type;
215
ldst->datalo_reg = datalo;
216
ldst->datahi_reg = datahi;
217
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
219
*/
220
ldst->label_ptr[0] = s->code_ptr;
221
tcg_out_bl_imm(s, COND_NE, 0);
222
+ }
223
224
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
225
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
226
+
227
+ if (ldst) {
228
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
229
- } else {
230
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
231
}
232
}
233
234
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
235
+ .base.static_constraint = C_O2_I1(e, p, q),
236
+ .out = tgen_qemu_ld2,
237
+};
238
+
239
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
240
TCGReg datahi, HostAddress h)
241
{
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
243
const int const_args[TCG_MAX_OP_ARGS])
244
{
245
switch (opc) {
246
- case INDEX_op_qemu_ld:
247
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
248
- break;
249
- case INDEX_op_qemu_ld2:
250
- tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
251
- break;
252
-
253
case INDEX_op_qemu_st:
254
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
255
break;
256
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
257
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
258
{
259
switch (op) {
260
- case INDEX_op_qemu_ld:
261
- return C_O1_I1(r, q);
262
- case INDEX_op_qemu_ld2:
263
- return C_O2_I1(e, p, q);
264
case INDEX_op_qemu_st:
265
return C_O0_I2(q, q);
266
case INDEX_op_qemu_st2:
267
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
268
index XXXXXXX..XXXXXXX 100644
269
--- a/tcg/i386/tcg-target.c.inc
270
+++ b/tcg/i386/tcg-target.c.inc
271
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
272
}
273
}
274
275
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
276
- TCGReg addr, MemOpIdx oi, TCGType data_type)
277
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
278
+ TCGReg addr, MemOpIdx oi)
279
{
280
TCGLabelQemuLdst *ldst;
281
HostAddress h;
282
283
ldst = prepare_host_addr(s, &h, addr, oi, true);
284
- tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, get_memop(oi));
285
+ tcg_out_qemu_ld_direct(s, data, -1, h, type, get_memop(oi));
286
287
if (ldst) {
288
- ldst->type = data_type;
289
+ ldst->type = type;
290
+ ldst->datalo_reg = data;
291
+ ldst->datahi_reg = -1;
292
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
293
+ }
294
+}
295
+
296
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
297
+ .base.static_constraint = C_O1_I1(r, L),
298
+ .out = tgen_qemu_ld,
299
+};
300
+
301
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
302
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
303
+{
304
+ TCGLabelQemuLdst *ldst;
305
+ HostAddress h;
306
+
307
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
308
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, type, get_memop(oi));
309
+
310
+ if (ldst) {
311
+ ldst->type = type;
312
ldst->datalo_reg = datalo;
313
ldst->datahi_reg = datahi;
314
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
315
}
316
}
317
318
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
319
+ .base.static_constraint = C_O2_I1(r, r, L),
320
+ .out = tgen_qemu_ld2,
321
+};
322
+
323
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
324
HostAddress h, MemOp memop)
325
{
326
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
327
a2 = args[2];
328
329
switch (opc) {
330
- case INDEX_op_qemu_ld:
331
- tcg_out_qemu_ld(s, a0, -1, a1, a2, type);
332
- break;
333
- case INDEX_op_qemu_ld2:
334
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
335
- break;
336
-
337
case INDEX_op_qemu_st:
338
tcg_out_qemu_st(s, a0, -1, a1, a2, type);
339
break;
340
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
341
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
342
{
343
switch (op) {
344
- case INDEX_op_qemu_ld:
345
- return C_O1_I1(r, L);
346
-
347
case INDEX_op_qemu_st:
348
return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
349
? C_O0_I2(s, L)
350
: C_O0_I2(L, L));
351
352
- case INDEX_op_qemu_ld2:
353
- return C_O2_I1(r, r, L);
354
case INDEX_op_qemu_st2:
355
return C_O0_I3(L, L, L);
356
357
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
358
index XXXXXXX..XXXXXXX 100644
359
--- a/tcg/loongarch64/tcg-target.c.inc
360
+++ b/tcg/loongarch64/tcg-target.c.inc
361
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
362
}
363
}
364
365
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
366
- MemOpIdx oi, TCGType data_type)
367
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
368
+ TCGReg addr_reg, MemOpIdx oi)
369
{
370
TCGLabelQemuLdst *ldst;
371
HostAddress h;
372
373
ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
374
- tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
375
+ tcg_out_qemu_ld_indexed(s, get_memop(oi), type, data_reg, h);
376
377
if (ldst) {
378
- ldst->type = data_type;
379
+ ldst->type = type;
380
ldst->datalo_reg = data_reg;
381
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
382
}
383
}
384
385
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
386
+ .base.static_constraint = C_O1_I1(r, r),
387
+ .out = tgen_qemu_ld,
388
+};
389
+
390
static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
391
TCGReg rd, HostAddress h)
392
{
393
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi
394
}
395
}
396
397
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
398
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
399
+{
400
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
401
+}
402
+
403
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
404
+ .base.static_constraint = C_N2_I1(r, r, r),
405
+ .out = tgen_qemu_ld2,
406
+};
407
+
408
/*
409
* Entry-points
410
*/
411
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
412
TCGArg a3 = args[3];
413
414
switch (opc) {
415
- case INDEX_op_qemu_ld:
416
- tcg_out_qemu_ld(s, a0, a1, a2, type);
417
- break;
418
- case INDEX_op_qemu_ld2:
419
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
420
- break;
421
case INDEX_op_qemu_st:
422
tcg_out_qemu_st(s, a0, a1, a2, type);
423
break;
424
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
425
switch (op) {
426
case INDEX_op_qemu_st:
427
return C_O0_I2(rz, r);
428
-
429
- case INDEX_op_qemu_ld2:
430
- return C_N2_I1(r, r, r);
431
-
432
case INDEX_op_qemu_st2:
433
return C_O0_I3(r, r, r);
434
435
- case INDEX_op_qemu_ld:
436
- return C_O1_I1(r, r);
437
-
438
case INDEX_op_ld_vec:
439
case INDEX_op_dupm_vec:
440
case INDEX_op_dup_vec:
441
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
442
index XXXXXXX..XXXXXXX 100644
443
--- a/tcg/mips/tcg-target.c.inc
444
+++ b/tcg/mips/tcg-target.c.inc
445
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
446
}
447
}
448
449
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
450
- TCGReg addr, MemOpIdx oi, TCGType data_type)
451
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
452
+ TCGReg addr, MemOpIdx oi)
453
{
454
MemOp opc = get_memop(oi);
455
TCGLabelQemuLdst *ldst;
456
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
457
ldst = prepare_host_addr(s, &h, addr, oi, true);
458
459
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
460
- tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, data_type);
461
+ tcg_out_qemu_ld_direct(s, data, 0, h.base, opc, type);
462
} else {
463
- tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, data_type);
464
+ tcg_out_qemu_ld_unalign(s, data, 0, h.base, opc, type);
465
}
466
467
if (ldst) {
468
- ldst->type = data_type;
469
+ ldst->type = type;
470
+ ldst->datalo_reg = data;
471
+ ldst->datahi_reg = 0;
472
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
473
+ }
474
+}
475
+
476
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
477
+ .base.static_constraint = C_O1_I1(r, r),
478
+ .out = tgen_qemu_ld,
479
+};
480
+
481
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
482
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
483
+{
484
+ MemOp opc = get_memop(oi);
485
+ TCGLabelQemuLdst *ldst;
486
+ HostAddress h;
487
+
488
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
489
+ ldst = prepare_host_addr(s, &h, addr, oi, true);
490
+
491
+ if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
492
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, type);
493
+ } else {
494
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, type);
495
+ }
496
+
497
+ if (ldst) {
498
+ ldst->type = type;
499
ldst->datalo_reg = datalo;
500
ldst->datahi_reg = datahi;
501
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
502
}
503
}
504
505
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
506
+ /* Ensure that the mips32 code is compiled but discarded for mips64. */
507
+ .base.static_constraint =
508
+ TCG_TARGET_REG_BITS == 32 ? C_O2_I1(r, r, r) : C_NotImplemented,
509
+ .out =
510
+ TCG_TARGET_REG_BITS == 32 ? tgen_qemu_ld2 : NULL,
511
+};
512
+
513
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
514
TCGReg base, MemOp opc)
515
{
516
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
517
a2 = args[2];
518
519
switch (opc) {
520
- case INDEX_op_qemu_ld:
521
- tcg_out_qemu_ld(s, a0, 0, a1, a2, type);
522
- break;
523
- case INDEX_op_qemu_ld2:
524
- tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
525
- tcg_out_qemu_ld(s, a0, a1, a2, args[3], type);
526
- break;
527
-
528
case INDEX_op_qemu_st:
529
tcg_out_qemu_st(s, a0, 0, a1, a2, type);
530
break;
531
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
532
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
533
{
534
switch (op) {
535
- case INDEX_op_qemu_ld:
536
- return C_O1_I1(r, r);
537
case INDEX_op_qemu_st:
538
return C_O0_I2(rz, r);
539
case INDEX_op_qemu_ld2:
540
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
541
index XXXXXXX..XXXXXXX 100644
542
--- a/tcg/ppc/tcg-target.c.inc
543
+++ b/tcg/ppc/tcg-target.c.inc
544
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
545
}
546
}
547
548
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
549
+ TCGReg addr, MemOpIdx oi)
550
+{
551
+ tcg_out_qemu_ld(s, data, -1, addr, oi, type);
552
+}
553
+
554
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
555
+ .base.static_constraint = C_O1_I1(r, r),
556
+ .out = tgen_qemu_ld,
557
+};
558
+
559
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
560
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
561
+{
562
+ if (TCG_TARGET_REG_BITS == 32) {
563
+ tcg_out_qemu_ld(s, datalo, datahi, addr, oi, type);
564
+ } else {
565
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true);
566
+ }
567
+}
568
+
569
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
570
+ .base.static_constraint =
571
+ TCG_TARGET_REG_BITS == 64 ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r),
572
+ .out = tgen_qemu_ld2,
573
+};
574
+
575
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
576
{
577
int i;
578
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
579
const int const_args[TCG_MAX_OP_ARGS])
580
{
581
switch (opc) {
582
- case INDEX_op_qemu_ld:
583
- tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], type);
584
- break;
585
- case INDEX_op_qemu_ld2:
586
- if (TCG_TARGET_REG_BITS == 32) {
587
- tcg_out_qemu_ld(s, args[0], args[1], args[2],
588
- args[3], TCG_TYPE_I64);
589
- break;
590
- }
591
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
592
- break;
593
-
594
case INDEX_op_qemu_st:
595
tcg_out_qemu_st(s, args[0], -1, args[1], args[2], type);
596
break;
597
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
598
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
599
{
600
switch (op) {
601
- case INDEX_op_qemu_ld:
602
- return C_O1_I1(r, r);
603
- case INDEX_op_qemu_ld2:
604
- return TCG_TARGET_REG_BITS == 64
605
- ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r);
606
-
607
case INDEX_op_qemu_st:
608
return C_O0_I2(r, r);
609
case INDEX_op_qemu_st2:
610
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
611
index XXXXXXX..XXXXXXX 100644
612
--- a/tcg/riscv/tcg-target.c.inc
613
+++ b/tcg/riscv/tcg-target.c.inc
614
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
615
}
616
}
617
618
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
619
- MemOpIdx oi, TCGType data_type)
620
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
621
+ TCGReg addr_reg, MemOpIdx oi)
622
{
623
TCGLabelQemuLdst *ldst;
624
TCGReg base;
625
626
ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
627
- tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
628
+ tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), type);
629
630
if (ldst) {
631
- ldst->type = data_type;
632
+ ldst->type = type;
633
ldst->datalo_reg = data_reg;
634
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
635
}
636
}
637
638
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
639
+ .base.static_constraint = C_O1_I1(r, r),
640
+ .out = tgen_qemu_ld,
641
+};
642
+
643
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
644
+ .base.static_constraint = C_NotImplemented,
645
+};
646
+
647
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
648
TCGReg base, MemOp opc)
649
{
650
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
651
TCGArg a2 = args[2];
652
653
switch (opc) {
654
- case INDEX_op_qemu_ld:
655
- tcg_out_qemu_ld(s, a0, a1, a2, type);
656
- break;
657
case INDEX_op_qemu_st:
658
tcg_out_qemu_st(s, a0, a1, a2, type);
659
break;
660
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
661
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
662
{
663
switch (op) {
664
- case INDEX_op_qemu_ld:
665
- return C_O1_I1(r, r);
666
case INDEX_op_qemu_st:
667
return C_O0_I2(rz, r);
668
669
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
670
index XXXXXXX..XXXXXXX 100644
671
--- a/tcg/s390x/tcg-target.c.inc
672
+++ b/tcg/s390x/tcg-target.c.inc
673
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
674
return ldst;
675
}
676
677
-static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
678
- MemOpIdx oi, TCGType data_type)
679
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data_reg,
680
+ TCGReg addr_reg, MemOpIdx oi)
681
{
682
TCGLabelQemuLdst *ldst;
683
HostAddress h;
684
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
685
tcg_out_qemu_ld_direct(s, get_memop(oi), data_reg, h);
686
687
if (ldst) {
688
- ldst->type = data_type;
689
+ ldst->type = type;
690
ldst->datalo_reg = data_reg;
691
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
692
}
693
}
694
695
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
696
+ .base.static_constraint = C_O1_I1(r, r),
697
+ .out = tgen_qemu_ld,
698
+};
699
+
700
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
701
MemOpIdx oi, TCGType data_type)
702
{
703
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
704
}
705
}
706
707
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
708
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
709
+{
710
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
711
+}
712
+
713
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
714
+ .base.static_constraint = C_O2_I1(o, m, r),
715
+ .out = tgen_qemu_ld2,
716
+};
717
+
718
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
719
{
720
/* Reuse the zeroing that exists for goto_ptr. */
721
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
722
const int const_args[TCG_MAX_OP_ARGS])
723
{
724
switch (opc) {
725
- case INDEX_op_qemu_ld:
726
- tcg_out_qemu_ld(s, args[0], args[1], args[2], type);
727
- break;
728
case INDEX_op_qemu_st:
729
tcg_out_qemu_st(s, args[0], args[1], args[2], type);
730
break;
731
- case INDEX_op_qemu_ld2:
732
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
733
- break;
734
case INDEX_op_qemu_st2:
735
tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
736
break;
737
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
738
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
739
{
740
switch (op) {
741
- case INDEX_op_qemu_ld:
742
- return C_O1_I1(r, r);
743
case INDEX_op_qemu_st:
744
return C_O0_I2(r, r);
745
- case INDEX_op_qemu_ld2:
746
- return C_O2_I1(o, m, r);
747
case INDEX_op_qemu_st2:
748
return C_O0_I3(o, m, r);
749
750
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
751
index XXXXXXX..XXXXXXX 100644
752
--- a/tcg/sparc64/tcg-target.c.inc
753
+++ b/tcg/sparc64/tcg-target.c.inc
754
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
755
return ldst;
756
}
757
758
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
759
- MemOpIdx oi, TCGType data_type)
760
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
761
+ TCGReg addr, MemOpIdx oi)
762
{
763
static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
764
[MO_UB] = LDUB,
765
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
766
ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
767
768
if (ldst) {
769
- ldst->type = data_type;
770
+ ldst->type = type;
771
ldst->datalo_reg = data;
772
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
773
}
774
}
775
776
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
777
+ .base.static_constraint = C_O1_I1(r, r),
778
+ .out = tgen_qemu_ld,
779
+};
780
+
781
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
782
+ .base.static_constraint = C_NotImplemented,
783
+};
784
+
785
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
786
MemOpIdx oi, TCGType data_type)
787
{
788
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
789
a2 = args[2];
790
791
switch (opc) {
792
- case INDEX_op_qemu_ld:
793
- tcg_out_qemu_ld(s, a0, a1, a2, type);
794
- break;
795
case INDEX_op_qemu_st:
796
tcg_out_qemu_st(s, a0, a1, a2, type);
797
break;
798
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
799
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
800
{
801
switch (op) {
802
- case INDEX_op_qemu_ld:
803
- return C_O1_I1(r, r);
804
-
805
case INDEX_op_qemu_st:
806
return C_O0_I2(rz, r);
807
808
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
809
index XXXXXXX..XXXXXXX 100644
810
--- a/tcg/tci/tcg-target.c.inc
811
+++ b/tcg/tci/tcg-target.c.inc
812
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
813
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
814
{
815
switch (op) {
816
- case INDEX_op_qemu_ld:
817
- return C_O1_I1(r, r);
818
case INDEX_op_qemu_st:
819
return C_O0_I2(r, r);
820
- case INDEX_op_qemu_ld2:
821
- return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
822
case INDEX_op_qemu_st2:
823
return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r);
824
825
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpStore outop_st = {
826
.out_r = tcg_out_st,
827
};
828
829
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
830
+ TCGReg addr, MemOpIdx oi)
831
+{
832
+ tcg_out_op_rrm(s, INDEX_op_qemu_ld, data, addr, oi);
833
+}
834
+
835
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
836
+ .base.static_constraint = C_O1_I1(r, r),
837
+ .out = tgen_qemu_ld,
838
+};
839
+
840
+static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
841
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
842
+{
843
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi);
844
+ tcg_out_op_rrrr(s, INDEX_op_qemu_ld2, datalo, datahi, addr, TCG_REG_TMP);
845
+}
846
+
847
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
848
+ .base.static_constraint =
849
+ TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r),
850
+ .out =
851
+ TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2,
852
+};
853
854
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
855
const TCGArg args[TCG_MAX_OP_ARGS],
856
const int const_args[TCG_MAX_OP_ARGS])
857
{
858
switch (opc) {
859
- case INDEX_op_qemu_ld:
860
case INDEX_op_qemu_st:
861
tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
862
break;
863
- case INDEX_op_qemu_ld2:
864
case INDEX_op_qemu_st2:
865
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
866
tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
867
--
868
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/tcg.c | 12 ++++-
5
tcg/aarch64/tcg-target.c.inc | 45 ++++++++-----------
6
tcg/arm/tcg-target.c.inc | 61 ++++++++++++++++---------
7
tcg/i386/tcg-target.c.inc | 73 ++++++++++++++++--------------
8
tcg/loongarch64/tcg-target.c.inc | 47 +++++++++----------
9
tcg/mips/tcg-target.c.inc | 77 +++++++++++++++++---------------
10
tcg/ppc/tcg-target.c.inc | 47 +++++++++++--------
11
tcg/riscv/tcg-target.c.inc | 34 ++++++--------
12
tcg/s390x/tcg-target.c.inc | 42 +++++++++--------
13
tcg/sparc64/tcg-target.c.inc | 42 ++++++-----------
14
tcg/tci/tcg-target.c.inc | 51 +++++++++++----------
15
11 files changed, 272 insertions(+), 259 deletions(-)
16
1
17
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg.c
20
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ static const TCGOutOp * const all_outop[NB_OPS] = {
22
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
23
OUTOP(INDEX_op_qemu_ld, TCGOutOpQemuLdSt, outop_qemu_ld),
24
OUTOP(INDEX_op_qemu_ld2, TCGOutOpQemuLdSt2, outop_qemu_ld2),
25
+ OUTOP(INDEX_op_qemu_st, TCGOutOpQemuLdSt, outop_qemu_st),
26
+ OUTOP(INDEX_op_qemu_st2, TCGOutOpQemuLdSt2, outop_qemu_st2),
27
OUTOP(INDEX_op_rems, TCGOutOpBinary, outop_rems),
28
OUTOP(INDEX_op_remu, TCGOutOpBinary, outop_remu),
29
OUTOP(INDEX_op_rotl, TCGOutOpBinary, outop_rotl),
30
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
31
break;
32
33
case INDEX_op_qemu_ld:
34
+ case INDEX_op_qemu_st:
35
{
36
- const TCGOutOpQemuLdSt *out = &outop_qemu_ld;
37
+ const TCGOutOpQemuLdSt *out =
38
+ container_of(all_outop[op->opc], TCGOutOpQemuLdSt, base);
39
+
40
out->out(s, type, new_args[0], new_args[1], new_args[2]);
41
}
42
break;
43
44
case INDEX_op_qemu_ld2:
45
+ case INDEX_op_qemu_st2:
46
{
47
- const TCGOutOpQemuLdSt2 *out = &outop_qemu_ld2;
48
+ const TCGOutOpQemuLdSt2 *out =
49
+ container_of(all_outop[op->opc], TCGOutOpQemuLdSt2, base);
50
+
51
out->out(s, type, new_args[0], new_args[1],
52
new_args[2], new_args[3]);
53
}
54
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/aarch64/tcg-target.c.inc
57
+++ b/tcg/aarch64/tcg-target.c.inc
58
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt outop_qemu_ld = {
59
.out = tgen_qemu_ld,
60
};
61
62
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
63
- MemOpIdx oi, TCGType data_type)
64
+static void tgen_qemu_st(TCGContext *s, TCGType data_type, TCGReg data_reg,
65
+ TCGReg addr_reg, MemOpIdx oi)
66
{
67
TCGLabelQemuLdst *ldst;
68
HostAddress h;
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
70
}
71
}
72
73
+static const TCGOutOpQemuLdSt outop_qemu_st = {
74
+ .base.static_constraint = C_O0_I2(rz, r),
75
+ .out = tgen_qemu_st,
76
+};
77
+
78
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
79
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
80
{
81
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
82
.out = tgen_qemu_ld2,
83
};
84
85
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
86
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
87
+{
88
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
89
+}
90
+
91
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
92
+ .base.static_constraint = C_O0_I3(rz, rz, r),
93
+ .out = tgen_qemu_st2,
94
+};
95
+
96
static const tcg_insn_unit *tb_ret_addr;
97
98
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
100
const TCGArg args[TCG_MAX_OP_ARGS],
101
const int const_args[TCG_MAX_OP_ARGS])
102
{
103
- /* Hoist the loads of the most common arguments. */
104
- TCGArg a0 = args[0];
105
- TCGArg a1 = args[1];
106
- TCGArg a2 = args[2];
107
-
108
- switch (opc) {
109
- case INDEX_op_qemu_st:
110
- tcg_out_qemu_st(s, a0, a1, a2, ext);
111
- break;
112
- case INDEX_op_qemu_st2:
113
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], false);
114
- break;
115
-
116
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
117
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
118
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
119
- default:
120
- g_assert_not_reached();
121
- }
122
+ g_assert_not_reached();
123
}
124
125
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
126
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
127
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
128
{
129
switch (op) {
130
- case INDEX_op_qemu_st:
131
- return C_O0_I2(rz, r);
132
- case INDEX_op_qemu_st2:
133
- return C_O0_I3(rz, rz, r);
134
-
135
case INDEX_op_add_vec:
136
case INDEX_op_sub_vec:
137
case INDEX_op_mul_vec:
138
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/arm/tcg-target.c.inc
141
+++ b/tcg/arm/tcg-target.c.inc
142
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
143
}
144
}
145
146
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
147
- TCGReg addr, MemOpIdx oi, TCGType data_type)
148
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
149
+ TCGReg addr, MemOpIdx oi)
150
{
151
MemOp opc = get_memop(oi);
152
TCGLabelQemuLdst *ldst;
153
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
154
155
ldst = prepare_host_addr(s, &h, addr, oi, false);
156
if (ldst) {
157
- ldst->type = data_type;
158
+ ldst->type = type;
159
+ ldst->datalo_reg = data;
160
+ ldst->datahi_reg = -1;
161
+
162
+ h.cond = COND_EQ;
163
+ tcg_out_qemu_st_direct(s, opc, data, -1, h);
164
+
165
+ /* The conditional call is last, as we're going to return here. */
166
+ ldst->label_ptr[0] = s->code_ptr;
167
+ tcg_out_bl_imm(s, COND_NE, 0);
168
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
169
+ } else {
170
+ tcg_out_qemu_st_direct(s, opc, data, -1, h);
171
+ }
172
+}
173
+
174
+static const TCGOutOpQemuLdSt outop_qemu_st = {
175
+ .base.static_constraint = C_O0_I2(q, q),
176
+ .out = tgen_qemu_st,
177
+};
178
+
179
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
180
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
181
+{
182
+ MemOp opc = get_memop(oi);
183
+ TCGLabelQemuLdst *ldst;
184
+ HostAddress h;
185
+
186
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
187
+ if (ldst) {
188
+ ldst->type = type;
189
ldst->datalo_reg = datalo;
190
ldst->datahi_reg = datahi;
191
192
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
193
}
194
}
195
196
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
197
+ .base.static_constraint = C_O0_I3(Q, p, q),
198
+ .out = tgen_qemu_st2,
199
+};
200
+
201
static void tcg_out_epilogue(TCGContext *s);
202
203
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
205
const TCGArg args[TCG_MAX_OP_ARGS],
206
const int const_args[TCG_MAX_OP_ARGS])
207
{
208
- switch (opc) {
209
- case INDEX_op_qemu_st:
210
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
211
- break;
212
- case INDEX_op_qemu_st2:
213
- tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
214
- break;
215
-
216
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
217
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
218
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
219
- default:
220
- g_assert_not_reached();
221
- }
222
+ g_assert_not_reached();
223
}
224
225
static TCGConstraintSetIndex
226
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
227
{
228
switch (op) {
229
- case INDEX_op_qemu_st:
230
- return C_O0_I2(q, q);
231
- case INDEX_op_qemu_st2:
232
- return C_O0_I3(Q, p, q);
233
-
234
case INDEX_op_st_vec:
235
return C_O0_I2(w, r);
236
case INDEX_op_ld_vec:
237
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/i386/tcg-target.c.inc
240
+++ b/tcg/i386/tcg-target.c.inc
241
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
242
243
switch (memop & MO_SIZE) {
244
case MO_8:
245
- /* This is handled with constraints on INDEX_op_qemu_st. */
246
+ /* This is handled with constraints in cset_qemu_st(). */
247
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
248
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
249
datalo, h.base, h.index, 0, h.ofs);
250
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
251
}
252
}
253
254
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
255
- TCGReg addr, MemOpIdx oi, TCGType data_type)
256
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
257
+ TCGReg addr, MemOpIdx oi)
258
+{
259
+ TCGLabelQemuLdst *ldst;
260
+ HostAddress h;
261
+
262
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
263
+ tcg_out_qemu_st_direct(s, data, -1, h, get_memop(oi));
264
+
265
+ if (ldst) {
266
+ ldst->type = type;
267
+ ldst->datalo_reg = data;
268
+ ldst->datahi_reg = -1;
269
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
270
+ }
271
+}
272
+
273
+static TCGConstraintSetIndex cset_qemu_st(TCGType type, unsigned flags)
274
+{
275
+ return flags == MO_8 ? C_O0_I2(s, L) : C_O0_I2(L, L);
276
+}
277
+
278
+static const TCGOutOpQemuLdSt outop_qemu_st = {
279
+ .base.static_constraint =
280
+ TCG_TARGET_REG_BITS == 32 ? C_Dynamic : C_O0_I2(L, L),
281
+ .base.dynamic_constraint =
282
+ TCG_TARGET_REG_BITS == 32 ? cset_qemu_st : NULL,
283
+ .out = tgen_qemu_st,
284
+};
285
+
286
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
287
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
288
{
289
TCGLabelQemuLdst *ldst;
290
HostAddress h;
291
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
292
tcg_out_qemu_st_direct(s, datalo, datahi, h, get_memop(oi));
293
294
if (ldst) {
295
- ldst->type = data_type;
296
+ ldst->type = type;
297
ldst->datalo_reg = datalo;
298
ldst->datahi_reg = datahi;
299
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
300
}
301
}
302
303
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
304
+ .base.static_constraint = C_O0_I3(L, L, L),
305
+ .out = tgen_qemu_st2,
306
+};
307
+
308
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
309
{
310
/* Reuse the zeroing that exists for goto_ptr. */
311
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
312
const TCGArg args[TCG_MAX_OP_ARGS],
313
const int const_args[TCG_MAX_OP_ARGS])
314
{
315
- TCGArg a0, a1, a2;
316
-
317
- /* Hoist the loads of the most common arguments. */
318
- a0 = args[0];
319
- a1 = args[1];
320
- a2 = args[2];
321
-
322
- switch (opc) {
323
- case INDEX_op_qemu_st:
324
- tcg_out_qemu_st(s, a0, -1, a1, a2, type);
325
- break;
326
- case INDEX_op_qemu_st2:
327
- tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
328
- break;
329
-
330
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
331
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
332
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
333
- default:
334
- g_assert_not_reached();
335
- }
336
+ g_assert_not_reached();
337
}
338
339
static int const umin_insn[4] = {
340
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
341
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
342
{
343
switch (op) {
344
- case INDEX_op_qemu_st:
345
- return (TCG_TARGET_REG_BITS == 32 && flags == MO_8
346
- ? C_O0_I2(s, L)
347
- : C_O0_I2(L, L));
348
-
349
- case INDEX_op_qemu_st2:
350
- return C_O0_I3(L, L, L);
351
-
352
case INDEX_op_ld_vec:
353
case INDEX_op_dupm_vec:
354
return C_O1_I1(x, r);
355
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
356
index XXXXXXX..XXXXXXX 100644
357
--- a/tcg/loongarch64/tcg-target.c.inc
358
+++ b/tcg/loongarch64/tcg-target.c.inc
359
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
360
}
361
}
362
363
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
364
- MemOpIdx oi, TCGType data_type)
365
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
366
+ TCGReg addr_reg, MemOpIdx oi)
367
{
368
TCGLabelQemuLdst *ldst;
369
HostAddress h;
370
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
371
tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
372
373
if (ldst) {
374
- ldst->type = data_type;
375
+ ldst->type = type;
376
ldst->datalo_reg = data_reg;
377
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
378
}
379
}
380
381
+static const TCGOutOpQemuLdSt outop_qemu_st = {
382
+ .base.static_constraint = C_O0_I2(rz, r),
383
+ .out = tgen_qemu_st,
384
+};
385
+
386
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
387
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
388
{
389
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
390
.out = tgen_qemu_ld2,
391
};
392
393
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
394
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
395
+{
396
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
397
+}
398
+
399
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
400
+ .base.static_constraint = C_O0_I3(r, r, r),
401
+ .out = tgen_qemu_st2,
402
+};
403
+
404
/*
405
* Entry-points
406
*/
407
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
408
const TCGArg args[TCG_MAX_OP_ARGS],
409
const int const_args[TCG_MAX_OP_ARGS])
410
{
411
- TCGArg a0 = args[0];
412
- TCGArg a1 = args[1];
413
- TCGArg a2 = args[2];
414
- TCGArg a3 = args[3];
415
-
416
- switch (opc) {
417
- case INDEX_op_qemu_st:
418
- tcg_out_qemu_st(s, a0, a1, a2, type);
419
- break;
420
- case INDEX_op_qemu_st2:
421
- tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
422
- break;
423
-
424
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
425
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
426
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
427
- default:
428
- g_assert_not_reached();
429
- }
430
+ g_assert_not_reached();
431
}
432
433
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
434
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
435
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
436
{
437
switch (op) {
438
- case INDEX_op_qemu_st:
439
- return C_O0_I2(rz, r);
440
- case INDEX_op_qemu_st2:
441
- return C_O0_I3(r, r, r);
442
-
443
case INDEX_op_ld_vec:
444
case INDEX_op_dupm_vec:
445
case INDEX_op_dup_vec:
446
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
447
index XXXXXXX..XXXXXXX 100644
448
--- a/tcg/mips/tcg-target.c.inc
449
+++ b/tcg/mips/tcg-target.c.inc
450
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
451
}
452
}
453
454
-static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
455
- TCGReg addr, MemOpIdx oi, TCGType data_type)
456
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
457
+ TCGReg addr, MemOpIdx oi)
458
{
459
MemOp opc = get_memop(oi);
460
TCGLabelQemuLdst *ldst;
461
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
462
463
ldst = prepare_host_addr(s, &h, addr, oi, false);
464
465
+ if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
466
+ tcg_out_qemu_st_direct(s, data, 0, h.base, opc);
467
+ } else {
468
+ tcg_out_qemu_st_unalign(s, data, 0, h.base, opc);
469
+ }
470
+
471
+ if (ldst) {
472
+ ldst->type = type;
473
+ ldst->datalo_reg = data;
474
+ ldst->datahi_reg = 0;
475
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
476
+ }
477
+}
478
+
479
+static const TCGOutOpQemuLdSt outop_qemu_st = {
480
+ .base.static_constraint = C_O0_I2(rz, r),
481
+ .out = tgen_qemu_st,
482
+};
483
+
484
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
485
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
486
+{
487
+ MemOp opc = get_memop(oi);
488
+ TCGLabelQemuLdst *ldst;
489
+ HostAddress h;
490
+
491
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
492
+ ldst = prepare_host_addr(s, &h, addr, oi, false);
493
+
494
if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) {
495
tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc);
496
} else {
497
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
498
}
499
500
if (ldst) {
501
- ldst->type = data_type;
502
+ ldst->type = type;
503
ldst->datalo_reg = datalo;
504
ldst->datahi_reg = datahi;
505
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
506
}
507
}
508
509
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
510
+ /* Ensure that the mips32 code is compiled but discarded for mips64. */
511
+ .base.static_constraint =
512
+ TCG_TARGET_REG_BITS == 32 ? C_O0_I3(rz, rz, r) : C_NotImplemented,
513
+ .out =
514
+ TCG_TARGET_REG_BITS == 32 ? tgen_qemu_st2 : NULL,
515
+};
516
+
517
static void tcg_out_mb(TCGContext *s, unsigned a0)
518
{
519
static const MIPSInsn sync[] = {
520
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
521
const TCGArg args[TCG_MAX_OP_ARGS],
522
const int const_args[TCG_MAX_OP_ARGS])
523
{
524
- TCGArg a0, a1, a2;
525
-
526
- a0 = args[0];
527
- a1 = args[1];
528
- a2 = args[2];
529
-
530
- switch (opc) {
531
- case INDEX_op_qemu_st:
532
- tcg_out_qemu_st(s, a0, 0, a1, a2, type);
533
- break;
534
- case INDEX_op_qemu_st2:
535
- tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
536
- tcg_out_qemu_st(s, a0, a1, a2, args[3], type);
537
- break;
538
-
539
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
540
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
541
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
542
- default:
543
- g_assert_not_reached();
544
- }
545
+ g_assert_not_reached();
546
}
547
548
static TCGConstraintSetIndex
549
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
550
{
551
- switch (op) {
552
- case INDEX_op_qemu_st:
553
- return C_O0_I2(rz, r);
554
- case INDEX_op_qemu_ld2:
555
- return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O2_I1(r, r, r);
556
- case INDEX_op_qemu_st2:
557
- return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(rz, rz, r);
558
-
559
- default:
560
- return C_NotImplemented;
561
- }
562
+ return C_NotImplemented;
563
}
564
565
static const int tcg_target_callee_save_regs[] = {
566
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
567
index XXXXXXX..XXXXXXX 100644
568
--- a/tcg/ppc/tcg-target.c.inc
569
+++ b/tcg/ppc/tcg-target.c.inc
570
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
571
.out = tgen_qemu_ld2,
572
};
573
574
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
575
+ TCGReg addr, MemOpIdx oi)
576
+{
577
+ tcg_out_qemu_st(s, data, -1, addr, oi, type);
578
+}
579
+
580
+static const TCGOutOpQemuLdSt outop_qemu_st = {
581
+ .base.static_constraint = C_O0_I2(r, r),
582
+ .out = tgen_qemu_st,
583
+};
584
+
585
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
586
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
587
+{
588
+ if (TCG_TARGET_REG_BITS == 32) {
589
+ tcg_out_qemu_st(s, datalo, datahi, addr, oi, type);
590
+ } else {
591
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, false);
592
+ }
593
+}
594
+
595
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
596
+ .base.static_constraint =
597
+ TCG_TARGET_REG_BITS == 64 ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r),
598
+ .out = tgen_qemu_st2,
599
+};
600
+
601
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
602
{
603
int i;
604
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
605
const TCGArg args[TCG_MAX_OP_ARGS],
606
const int const_args[TCG_MAX_OP_ARGS])
607
{
608
- switch (opc) {
609
- case INDEX_op_qemu_st:
610
- tcg_out_qemu_st(s, args[0], -1, args[1], args[2], type);
611
- break;
612
- case INDEX_op_qemu_st2:
613
- if (TCG_TARGET_REG_BITS == 32) {
614
- tcg_out_qemu_st(s, args[0], args[1], args[2],
615
- args[3], TCG_TYPE_I64);
616
- break;
617
- }
618
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
619
- break;
620
-
621
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
622
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
623
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
624
- default:
625
- g_assert_not_reached();
626
- }
627
+ g_assert_not_reached();
628
}
629
630
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
631
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
632
index XXXXXXX..XXXXXXX 100644
633
--- a/tcg/riscv/tcg-target.c.inc
634
+++ b/tcg/riscv/tcg-target.c.inc
635
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
636
}
637
}
638
639
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
640
- MemOpIdx oi, TCGType data_type)
641
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
642
+ TCGReg addr_reg, MemOpIdx oi)
643
{
644
TCGLabelQemuLdst *ldst;
645
TCGReg base;
646
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
647
tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
648
649
if (ldst) {
650
- ldst->type = data_type;
651
+ ldst->type = type;
652
ldst->datalo_reg = data_reg;
653
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
654
}
655
}
656
657
+static const TCGOutOpQemuLdSt outop_qemu_st = {
658
+ .base.static_constraint = C_O0_I2(rz, r),
659
+ .out = tgen_qemu_st,
660
+};
661
+
662
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
663
+ .base.static_constraint = C_NotImplemented,
664
+};
665
+
666
static const tcg_insn_unit *tb_ret_addr;
667
668
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
669
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
670
const TCGArg args[TCG_MAX_OP_ARGS],
671
const int const_args[TCG_MAX_OP_ARGS])
672
{
673
- TCGArg a0 = args[0];
674
- TCGArg a1 = args[1];
675
- TCGArg a2 = args[2];
676
-
677
- switch (opc) {
678
- case INDEX_op_qemu_st:
679
- tcg_out_qemu_st(s, a0, a1, a2, type);
680
- break;
681
-
682
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
683
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
684
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
685
- default:
686
- g_assert_not_reached();
687
- }
688
+ g_assert_not_reached();
689
}
690
691
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
692
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
693
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
694
{
695
switch (op) {
696
- case INDEX_op_qemu_st:
697
- return C_O0_I2(rz, r);
698
-
699
case INDEX_op_st_vec:
700
return C_O0_I2(v, r);
701
case INDEX_op_dup_vec:
702
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
703
index XXXXXXX..XXXXXXX 100644
704
--- a/tcg/s390x/tcg-target.c.inc
705
+++ b/tcg/s390x/tcg-target.c.inc
706
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt outop_qemu_ld = {
707
.out = tgen_qemu_ld,
708
};
709
710
-static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
711
- MemOpIdx oi, TCGType data_type)
712
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data_reg,
713
+ TCGReg addr_reg, MemOpIdx oi)
714
{
715
TCGLabelQemuLdst *ldst;
716
HostAddress h;
717
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
718
tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
719
720
if (ldst) {
721
- ldst->type = data_type;
722
+ ldst->type = type;
723
ldst->datalo_reg = data_reg;
724
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
725
}
726
}
727
728
+static const TCGOutOpQemuLdSt outop_qemu_st = {
729
+ .base.static_constraint = C_O0_I2(r, r),
730
+ .out = tgen_qemu_st,
731
+};
732
+
733
static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
734
TCGReg addr_reg, MemOpIdx oi, bool is_ld)
735
{
736
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
737
.out = tgen_qemu_ld2,
738
};
739
740
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
741
+ TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
742
+{
743
+ tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
744
+}
745
+
746
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
747
+ .base.static_constraint = C_O0_I3(o, m, r),
748
+ .out = tgen_qemu_st2,
749
+};
750
+
751
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
752
{
753
/* Reuse the zeroing that exists for goto_ptr. */
754
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
755
const TCGArg args[TCG_MAX_OP_ARGS],
756
const int const_args[TCG_MAX_OP_ARGS])
757
{
758
- switch (opc) {
759
- case INDEX_op_qemu_st:
760
- tcg_out_qemu_st(s, args[0], args[1], args[2], type);
761
- break;
762
- case INDEX_op_qemu_st2:
763
- tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
764
- break;
765
-
766
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
767
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
768
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
769
- default:
770
- g_assert_not_reached();
771
- }
772
+ g_assert_not_reached();
773
}
774
775
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
776
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex
777
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
778
{
779
switch (op) {
780
- case INDEX_op_qemu_st:
781
- return C_O0_I2(r, r);
782
- case INDEX_op_qemu_st2:
783
- return C_O0_I3(o, m, r);
784
-
785
case INDEX_op_st_vec:
786
return C_O0_I2(v, r);
787
case INDEX_op_ld_vec:
788
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
789
index XXXXXXX..XXXXXXX 100644
790
--- a/tcg/sparc64/tcg-target.c.inc
791
+++ b/tcg/sparc64/tcg-target.c.inc
792
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
793
.base.static_constraint = C_NotImplemented,
794
};
795
796
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
797
- MemOpIdx oi, TCGType data_type)
798
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
799
+ TCGReg addr, MemOpIdx oi)
800
{
801
static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
802
[MO_UB] = STB,
803
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
804
st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
805
806
if (ldst) {
807
- ldst->type = data_type;
808
+ ldst->type = type;
809
ldst->datalo_reg = data;
810
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
811
}
812
}
813
814
+static const TCGOutOpQemuLdSt outop_qemu_st = {
815
+ .base.static_constraint = C_O0_I2(rz, r),
816
+ .out = tgen_qemu_st,
817
+};
818
+
819
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
820
+ .base.static_constraint = C_NotImplemented,
821
+};
822
+
823
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
824
{
825
if (check_fit_ptr(a0, 13)) {
826
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
827
const TCGArg args[TCG_MAX_OP_ARGS],
828
const int const_args[TCG_MAX_OP_ARGS])
829
{
830
- TCGArg a0, a1, a2;
831
-
832
- /* Hoist the loads of the most common arguments. */
833
- a0 = args[0];
834
- a1 = args[1];
835
- a2 = args[2];
836
-
837
- switch (opc) {
838
- case INDEX_op_qemu_st:
839
- tcg_out_qemu_st(s, a0, a1, a2, type);
840
- break;
841
-
842
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
843
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
844
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
845
- default:
846
- g_assert_not_reached();
847
- }
848
+ g_assert_not_reached();
849
}
850
851
static TCGConstraintSetIndex
852
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
853
{
854
- switch (op) {
855
- case INDEX_op_qemu_st:
856
- return C_O0_I2(rz, r);
857
-
858
- default:
859
- return C_NotImplemented;
860
- }
861
+ return C_NotImplemented;
862
}
863
864
static void tcg_target_init(TCGContext *s)
865
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
866
index XXXXXXX..XXXXXXX 100644
867
--- a/tcg/tci/tcg-target.c.inc
868
+++ b/tcg/tci/tcg-target.c.inc
869
@@ -XXX,XX +XXX,XX @@
870
static TCGConstraintSetIndex
871
tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
872
{
873
- switch (op) {
874
- case INDEX_op_qemu_st:
875
- return C_O0_I2(r, r);
876
- case INDEX_op_qemu_st2:
877
- return TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r);
878
-
879
- default:
880
- return C_NotImplemented;
881
- }
882
+ return C_NotImplemented;
883
}
884
885
static const int tcg_target_reg_alloc_order[] = {
886
@@ -XXX,XX +XXX,XX @@ static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
887
TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_ld2,
888
};
889
890
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
891
+ TCGReg addr, MemOpIdx oi)
892
+{
893
+ tcg_out_op_rrm(s, INDEX_op_qemu_st, data, addr, oi);
894
+}
895
+
896
+static const TCGOutOpQemuLdSt outop_qemu_st = {
897
+ .base.static_constraint = C_O0_I2(r, r),
898
+ .out = tgen_qemu_st,
899
+};
900
+
901
+static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
902
+ TCGReg datahi, TCGReg addr, MemOpIdx oi)
903
+{
904
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, oi);
905
+ tcg_out_op_rrrr(s, INDEX_op_qemu_st2, datalo, datahi, addr, TCG_REG_TMP);
906
+}
907
+
908
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
909
+ .base.static_constraint =
910
+ TCG_TARGET_REG_BITS == 64 ? C_NotImplemented : C_O0_I3(r, r, r),
911
+ .out =
912
+ TCG_TARGET_REG_BITS == 64 ? NULL : tgen_qemu_st2,
913
+};
914
+
915
static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
916
const TCGArg args[TCG_MAX_OP_ARGS],
917
const int const_args[TCG_MAX_OP_ARGS])
918
{
919
- switch (opc) {
920
- case INDEX_op_qemu_st:
921
- tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
922
- break;
923
- case INDEX_op_qemu_st2:
924
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
925
- tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
926
- break;
927
-
928
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
929
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
930
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
931
- default:
932
- g_assert_not_reached();
933
- }
934
+ g_assert_not_reached();
935
}
936
937
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
938
--
939
2.43.0
diff view generated by jsdifflib