1
The following changes since commit 39a6e4f87e7b75a45b08d6dc8b8b7c2954c87440:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'pull-qapi-2024-02-03' of https://repo.or.cz/qemu/armbru into staging (2024-02-03 13:31:58 +0000)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20240205
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 867db6870a6f5d4e0915822d6a84e665bec1f22e:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tcg/tci: Support TCG_COND_TST{EQ,NE} (2024-02-03 23:53:49 +0000)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
tcg: Introduce TCG_COND_TST{EQ,NE}
14
tcg/optimize: Remove in-flight mask data from OptContext
15
target/alpha: Use TCG_COND_TST{EQ,NE}
15
fpu: Add float*_muladd_scalbn
16
target/m68k: Use TCG_COND_TST{EQ,NE} in gen_fcc_cond
16
fpu: Remove float_muladd_halve_result
17
target/sparc: Use TCG_COND_TSTEQ in gen_op_mulscc
17
fpu: Add float_round_nearest_even_max
18
target/s390x: Use TCG_COND_TSTNE for CC_OP_{TM,ICM}
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/s390x: Improve general case of disas_jcc
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
20
21
21
----------------------------------------------------------------
22
----------------------------------------------------------------
22
Paolo Bonzini (1):
23
Ilya Leoshkevich (1):
23
tcg/i386: Use TEST r,r to test 8/16/32 bits
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
24
25
25
Philippe Mathieu-Daudé (1):
26
Pierrick Bouvier (1):
26
tcg/aarch64: Massage tcg_out_brcond()
27
plugins: optimize cpu_index code generation
27
28
28
Richard Henderson (37):
29
Richard Henderson (70):
29
tcg: Introduce TCG_COND_TST{EQ,NE}
30
tcg/optimize: Split out finish_bb, finish_ebb
30
tcg: Introduce TCG_TARGET_HAS_tst
31
tcg/optimize: Split out fold_affected_mask
31
tcg/optimize: Split out arg_is_const_val
32
tcg/optimize: Copy mask writeback to fold_masks
32
tcg/optimize: Split out do_constant_folding_cond1
33
tcg/optimize: Split out fold_masks_zs
33
tcg/optimize: Do swap_commutative2 in do_constant_folding_cond2
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
34
tcg/optimize: Handle TCG_COND_TST{EQ,NE}
35
tcg/optimize: Change representation of s_mask
35
tcg/optimize: Lower TCG_COND_TST{EQ,NE} if unsupported
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
36
target/alpha: Pass immediate value to gen_bcond_internal()
37
tcg/optimize: Introduce const value accessors for TempOptInfo
37
target/alpha: Use TCG_COND_TST{EQ,NE} for BLB{C,S}
38
tcg/optimize: Use fold_masks_zs in fold_and
38
target/alpha: Use TCG_COND_TST{EQ,NE} for CMOVLB{C,S}
39
tcg/optimize: Use fold_masks_zs in fold_andc
39
target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero
40
tcg/optimize: Use fold_masks_zs in fold_bswap
40
target/m68k: Use TCG_COND_TST{EQ,NE} in gen_fcc_cond
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
41
target/sparc: Use TCG_COND_TSTEQ in gen_op_mulscc
42
tcg/optimize: Use fold_masks_z in fold_ctpop
42
target/s390x: Use TCG_COND_TSTNE for CC_OP_{TM,ICM}
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
43
target/s390x: Improve general case of disas_jcc
44
tcg/optimize: Compute sign mask in fold_deposit
44
tcg: Add TCGConst argument to tcg_target_const_match
45
tcg/optimize: Use finish_folding in fold_divide
45
tcg/aarch64: Support TCG_COND_TST{EQ,NE}
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
46
tcg/aarch64: Generate TBZ, TBNZ
47
tcg/optimize: Use fold_masks_s in fold_eqv
47
tcg/aarch64: Generate CBNZ for TSTNE of UINT32_MAX
48
tcg/optimize: Use fold_masks_z in fold_extract
48
tcg/arm: Split out tcg_out_cmp()
49
tcg/optimize: Use finish_folding in fold_extract2
49
tcg/arm: Support TCG_COND_TST{EQ,NE}
50
tcg/optimize: Use fold_masks_zs in fold_exts
50
tcg/i386: Pass x86 condition codes to tcg_out_cmov
51
tcg/optimize: Use fold_masks_z in fold_extu
51
tcg/i386: Move tcg_cond_to_jcc[] into tcg_out_cmp
52
tcg/optimize: Use fold_masks_zs in fold_movcond
52
tcg/i386: Support TCG_COND_TST{EQ,NE}
53
tcg/optimize: Use finish_folding in fold_mul*
53
tcg/i386: Improve TSTNE/TESTEQ vs powers of two
54
tcg/optimize: Use fold_masks_s in fold_nand
54
tcg/sparc64: Hoist read of tcg_cond_to_rcond
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
55
tcg/sparc64: Pass TCGCond to tcg_out_cmp
56
tcg/optimize: Use fold_masks_s in fold_nor
56
tcg/sparc64: Support TCG_COND_TST{EQ,NE}
57
tcg/optimize: Use fold_masks_s in fold_not
57
tcg/ppc: Sink tcg_to_bc usage into tcg_out_bc
58
tcg/optimize: Use fold_masks_zs in fold_or
58
tcg/ppc: Use cr0 in tcg_to_bc and tcg_to_isel
59
tcg/optimize: Use fold_masks_zs in fold_orc
59
tcg/ppc: Tidy up tcg_target_const_match
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
60
tcg/ppc: Add TCG_CT_CONST_CMP
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
61
tcg/ppc: Support TCG_COND_TST{EQ,NE}
62
tcg/optimize: Use finish_folding in fold_remainder
62
tcg/s390x: Split constraint A into J+U
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
63
tcg/s390x: Add TCG_CT_CONST_CMP
64
tcg/optimize: Use fold_masks_z in fold_setcond
64
tcg/s390x: Support TCG_COND_TST{EQ,NE}
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
65
tcg/tci: Support TCG_COND_TST{EQ,NE}
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
66
100
67
include/tcg/tcg-cond.h | 74 ++++--
101
include/exec/translator.h | 14 -
68
tcg/aarch64/tcg-target-con-set.h | 5 +-
102
include/fpu/softfloat-types.h | 2 +
69
tcg/aarch64/tcg-target-con-str.h | 1 +
103
include/fpu/softfloat.h | 14 +-
70
tcg/aarch64/tcg-target.h | 2 +
104
include/hw/core/tcg-cpu-ops.h | 13 +
71
tcg/arm/tcg-target.h | 2 +
105
target/alpha/cpu.h | 2 +
72
tcg/i386/tcg-target-con-set.h | 6 +-
106
target/arm/internals.h | 2 +
73
tcg/i386/tcg-target-con-str.h | 1 +
107
target/avr/cpu.h | 2 +
74
tcg/i386/tcg-target.h | 2 +
108
target/hexagon/cpu.h | 2 +
75
tcg/loongarch64/tcg-target.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
76
tcg/mips/tcg-target.h | 2 +
110
target/hppa/cpu.h | 2 +
77
tcg/ppc/tcg-target-con-set.h | 5 +-
111
target/i386/tcg/helper-tcg.h | 2 +
78
tcg/ppc/tcg-target-con-str.h | 1 +
112
target/loongarch/internals.h | 2 +
79
tcg/ppc/tcg-target.h | 2 +
113
target/m68k/cpu.h | 2 +
80
tcg/riscv/tcg-target.h | 2 +
114
target/microblaze/cpu.h | 2 +
81
tcg/s390x/tcg-target-con-set.h | 8 +-
115
target/mips/tcg/tcg-internal.h | 2 +
82
tcg/s390x/tcg-target-con-str.h | 3 +-
116
target/openrisc/cpu.h | 2 +
83
tcg/s390x/tcg-target.h | 2 +
117
target/ppc/cpu.h | 2 +
84
tcg/sparc64/tcg-target.h | 2 +
118
target/riscv/cpu.h | 3 +
85
tcg/tcg-internal.h | 2 +
119
target/rx/cpu.h | 2 +
86
tcg/tci/tcg-target.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
87
target/alpha/translate.c | 94 ++++----
121
target/sh4/cpu.h | 2 +
88
target/m68k/translate.c | 74 +++---
122
target/sparc/cpu.h | 2 +
89
target/s390x/tcg/translate.c | 100 +++------
123
target/sparc/helper.h | 4 +-
90
target/sparc/translate.c | 4 +-
124
target/tricore/cpu.h | 2 +
91
tcg/optimize.c | 474 ++++++++++++++++++++++++++++++---------
125
target/xtensa/cpu.h | 2 +
92
tcg/tcg.c | 40 +++-
126
accel/tcg/cpu-exec.c | 8 +-
93
tcg/tci.c | 14 ++
127
accel/tcg/plugin-gen.c | 9 +
94
docs/devel/tcg-ops.rst | 2 +
128
accel/tcg/translate-all.c | 8 +-
95
tcg/aarch64/tcg-target.c.inc | 166 +++++++++++---
129
fpu/softfloat.c | 63 +--
96
tcg/arm/tcg-target.c.inc | 62 +++--
130
target/alpha/cpu.c | 1 +
97
tcg/i386/tcg-target.c.inc | 201 ++++++++++++-----
131
target/alpha/translate.c | 4 +-
98
tcg/loongarch64/tcg-target.c.inc | 3 +-
132
target/arm/cpu.c | 1 +
99
tcg/mips/tcg-target.c.inc | 3 +-
133
target/arm/tcg/cpu-v7m.c | 1 +
100
tcg/ppc/tcg-target.c.inc | 294 ++++++++++++++++++------
134
target/arm/tcg/helper-a64.c | 6 +-
101
tcg/riscv/tcg-target.c.inc | 3 +-
135
target/arm/tcg/translate.c | 5 +-
102
tcg/s390x/tcg-target.c.inc | 246 +++++++++++++-------
136
target/avr/cpu.c | 1 +
103
tcg/sparc64/tcg-target.c.inc | 65 ++++--
137
target/avr/translate.c | 6 +-
104
tcg/tci/tcg-target.c.inc | 3 +-
138
target/hexagon/cpu.c | 1 +
105
38 files changed, 1379 insertions(+), 595 deletions(-)
139
target/hexagon/fma_emu.c | 496 ++++++---------------
106
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Rename the current tcg_out_bc function to tcg_out_bc_lab, and
1
Call them directly from the opcode switch statement in tcg_optimize,
2
create a new function that takes an integer displacement + link.
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
3
4
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/ppc/tcg-target.c.inc | 28 +++++++++++++++++-----------
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
8
1 file changed, 17 insertions(+), 11 deletions(-)
9
1 file changed, 31 insertions(+), 16 deletions(-)
9
10
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
13
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
15
}
16
}
16
}
17
}
17
18
18
-static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
19
+static void finish_bb(OptContext *ctx)
19
+static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
20
+{
20
{
21
+ /* We only optimize memory barriers across basic blocks. */
21
+ tcg_out32(s, tcg_to_bc[cond] | bd);
22
+ ctx->prev_mb = NULL;
22
+}
23
+}
23
+
24
+
24
+static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
25
+static void finish_ebb(OptContext *ctx)
25
+{
26
+{
26
+ int bd = 0;
27
+ finish_bb(ctx);
27
if (l->has_value) {
28
+ /* We only optimize across extended basic blocks. */
28
- bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
29
+ bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
30
+ remove_mem_copy_all(ctx);
30
} else {
31
+}
31
tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
34
{
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
32
}
61
}
33
- tcg_out32(s, bc);
62
- return false;
34
+ tcg_out_bc(s, cond, bd);
63
+ return true;
35
}
64
}
36
65
37
static void tcg_out_brcond(TCGContext *s, TCGCond cond,
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond,
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
39
TCGLabel *l, TCGType type)
68
}
40
{
69
op->opc = INDEX_op_br;
41
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
70
op->args[0] = label;
42
- tcg_out_bc(s, tcg_to_bc[cond], l);
71
- break;
43
+ tcg_out_bc_lab(s, cond, l);
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
44
}
79
}
45
80
46
static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
49
}
96
}
50
/* Branch forward over one insn */
51
- tcg_out32(s, tcg_to_bc[cond] | 8);
52
+ tcg_out_bc(s, cond, 8);
53
if (v2 == 0) {
54
tcg_out_movi(s, type, dest, 0);
55
} else {
56
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
57
tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
58
tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
59
} else if (!const_a2 && a0 == a2) {
60
- tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
61
+ tcg_out_bc(s, TCG_COND_EQ, 8);
62
tcg_out32(s, opc | RA(a0) | RS(a1));
63
} else {
64
tcg_out32(s, opc | RA(a0) | RS(a1));
65
- tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
66
+ tcg_out_bc(s, TCG_COND_NE, 8);
67
if (const_a2) {
68
tcg_out_movi(s, type, a0, 0);
69
} else {
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
71
tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
72
}
73
74
-static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
75
- const int *const_args)
76
+static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
77
+ const int *const_args)
78
{
79
tcg_out_cmp2(s, args, const_args);
80
- tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
81
+ tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
82
}
83
84
static void tcg_out_mb(TCGContext *s, TCGArg a0)
85
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
86
87
/* Load a pointer into the current opcode w/conditional branch-link. */
88
ldst->label_ptr[0] = s->code_ptr;
89
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
90
+ tcg_out_bc(s, TCG_COND_NE, LK);
91
92
h->base = TCG_REG_TMP1;
93
} else {
94
--
97
--
95
2.34.1
98
2.43.0
96
97
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
After having performed other simplifications, lower any
1
Consider the passed s_mask to be a minimum deduced from
2
remaining test comparisons with AND.
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
3
5
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/tcg-internal.h | 2 ++
9
tcg/optimize.c | 21 ++++++---------------
8
tcg/optimize.c | 60 +++++++++++++++++++++++++++++++++++++++-------
10
1 file changed, 6 insertions(+), 15 deletions(-)
9
tcg/tcg.c | 2 +-
10
3 files changed, 55 insertions(+), 9 deletions(-)
11
11
12
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tcg-internal.h
15
+++ b/tcg/tcg-internal.h
16
@@ -XXX,XX +XXX,XX @@ static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t)
17
18
bool tcg_target_has_memory_bswap(MemOp memop);
19
20
+TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind);
21
+
22
/*
23
* Locate or create a read-only temporary that is a constant.
24
* This kind of temporary need not be freed, but for convenience
25
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
26
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
28
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
29
@@ -XXX,XX +XXX,XX @@ static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
30
return temp_arg(ts);
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
31
}
32
32
33
+static TCGArg arg_new_temp(OptContext *ctx)
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
+{
34
default:
35
+ TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
35
g_assert_not_reached();
36
+ init_ts_info(ctx, ts);
37
+ return temp_arg(ts);
38
+}
39
+
40
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
41
{
42
TCGTemp *dst_ts = arg_temp(dst);
43
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
44
* Return -1 if the condition can't be simplified,
45
* and the result of the condition (0 or 1) if it can.
46
*/
47
-static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
48
+static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
49
TCGArg *p1, TCGArg *p2, TCGArg *pcond)
50
{
51
TCGCond cond;
52
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
53
? INT32_MIN : INT64_MIN))) {
54
*p2 = arg_new_constant(ctx, 0);
55
*pcond = tcg_tst_ltge_cond(cond);
56
+ return -1;
57
+ }
58
+
59
+ /* Expand to AND with a temporary if no backend support. */
60
+ if (!TCG_TARGET_HAS_tst) {
61
+ TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
62
+ ? INDEX_op_and_i32 : INDEX_op_and_i64);
63
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
64
+ TCGArg tmp = arg_new_temp(ctx);
65
+
66
+ op2->args[0] = tmp;
67
+ op2->args[1] = *p1;
68
+ op2->args[2] = *p2;
69
+
70
+ *p1 = tmp;
71
+ *p2 = arg_new_constant(ctx, 0);
72
+ *pcond = tcg_tst_eqne_cond(cond);
73
}
36
}
74
return -1;
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
75
}
57
}
76
58
77
-static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
78
+static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
60
default:
79
{
61
g_assert_not_reached();
80
TCGArg al, ah, bl, bh;
62
}
81
TCGCond c;
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
82
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
64
return false;
83
return -1;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
84
}
94
}
85
}
95
}
86
+
96
87
+ /* Expand to AND with a temporary if no backend support. */
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
88
+ if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
98
fold_setcond_tst_pow2(ctx, op, false);
89
+ TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
99
90
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
100
ctx->z_mask = 1;
91
+ TCGArg t1 = arg_new_temp(ctx);
101
- ctx->s_mask = smask_from_zmask(1);
92
+ TCGArg t2 = arg_new_temp(ctx);
102
return false;
93
+
94
+ op1->args[0] = t1;
95
+ op1->args[1] = al;
96
+ op1->args[2] = bl;
97
+ op2->args[0] = t2;
98
+ op2->args[1] = ah;
99
+ op2->args[2] = bh;
100
+
101
+ args[0] = t1;
102
+ args[1] = t2;
103
+ args[3] = args[2] = arg_new_constant(ctx, 0);
104
+ args[4] = tcg_tst_eqne_cond(c);
105
+ }
106
return -1;
107
}
103
}
108
104
109
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
110
111
static bool fold_brcond(OptContext *ctx, TCGOp *op)
112
{
113
- int i = do_constant_folding_cond1(ctx, NO_DEST, &op->args[0],
114
+ int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
115
&op->args[1], &op->args[2]);
116
if (i == 0) {
117
tcg_op_remove(ctx->tcg, op);
118
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
119
TCGArg label;
120
int i, inv = 0;
121
122
- i = do_constant_folding_cond2(ctx, &op->args[0]);
123
+ i = do_constant_folding_cond2(ctx, op, &op->args[0]);
124
cond = op->args[4];
125
label = op->args[5];
126
if (i >= 0) {
127
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
128
op->args[5] = tcg_invert_cond(op->args[5]);
129
}
106
}
130
107
131
- i = do_constant_folding_cond1(ctx, NO_DEST, &op->args[1],
108
ctx->z_mask = 1;
132
+ i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
109
- ctx->s_mask = smask_from_zmask(1);
133
&op->args[2], &op->args[5]);
110
return false;
134
if (i >= 0) {
111
135
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
112
do_setcond_const:
136
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
137
114
break;
138
static bool fold_setcond(OptContext *ctx, TCGOp *op)
115
CASE_OP_32_64(ld8u):
139
{
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
140
- int i = do_constant_folding_cond1(ctx, op->args[0], &op->args[1],
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
141
+ int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
118
break;
142
&op->args[2], &op->args[3]);
119
CASE_OP_32_64(ld16s):
143
if (i >= 0) {
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
144
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
121
break;
145
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
122
CASE_OP_32_64(ld16u):
146
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
147
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
148
{
125
break;
149
- int i = do_constant_folding_cond1(ctx, op->args[0], &op->args[1],
126
case INDEX_op_ld32s_i64:
150
+ int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
151
&op->args[2], &op->args[3]);
128
break;
152
if (i >= 0) {
129
case INDEX_op_ld32u_i64:
153
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
154
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
155
TCGCond cond;
132
break;
156
int i, inv = 0;
133
default:
157
134
g_assert_not_reached();
158
- i = do_constant_folding_cond2(ctx, &op->args[1]);
159
+ i = do_constant_folding_cond2(ctx, op, &op->args[1]);
160
cond = op->args[5];
161
if (i >= 0) {
162
goto do_setcond_const;
163
diff --git a/tcg/tcg.c b/tcg/tcg.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/tcg.c
166
+++ b/tcg/tcg.c
167
@@ -XXX,XX +XXX,XX @@ TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name)
168
return temp_tcgv_ptr(ts);
169
}
170
171
-static TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
172
+TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
173
{
174
TCGContext *s = tcg_ctx;
175
TCGTemp *ts;
176
--
135
--
177
2.34.1
136
2.43.0
178
179
diff view generated by jsdifflib
1
Hoist the tcg_cond_to_jcc index outside the function.
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
23
---
6
tcg/i386/tcg-target.c.inc | 16 ++++++++--------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
7
1 file changed, 8 insertions(+), 8 deletions(-)
25
1 file changed, 15 insertions(+), 49 deletions(-)
8
26
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
29
--- a/tcg/optimize.c
12
+++ b/tcg/i386/tcg-target.c.inc
30
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
14
}
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
15
#endif
33
uint64_t val;
16
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
17
-static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
18
+static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGReg dest, TCGReg v1)
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
20
{
89
{
21
if (have_cmov) {
90
return ts->state_ptr;
22
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
23
+ tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1);
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
24
} else {
97
} else {
25
TCGLabel *over = gen_new_label();
98
ti->is_const = false;
26
- tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
99
ti->z_mask = -1;
27
+ tcg_out_jxx(s, jcc ^ 1, over, 1);
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
28
tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
101
*/
29
tcg_out_label(s, over);
102
if (i == 0) {
30
}
103
ts_info(ts)->z_mask = ctx->z_mask;
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
104
- ts_info(ts)->s_mask = ctx->s_mask;
32
TCGReg v1)
33
{
34
tcg_out_cmp(s, c1, c2, const_c2, rexw);
35
- tcg_out_cmov(s, cond, rexw, dest, v1);
36
+ tcg_out_cmov(s, tcg_cond_to_jcc[cond], rexw, dest, v1);
37
}
38
39
static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
41
tcg_debug_assert(arg2 == (rexw ? 64 : 32));
42
} else {
43
tcg_debug_assert(dest != arg2);
44
- tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
45
+ tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
46
}
105
}
47
} else {
48
tcg_debug_assert(dest != arg2);
49
tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
50
- tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
51
+ tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
52
}
106
}
53
}
107
}
54
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
109
* The passed s_mask may be augmented by z_mask.
56
tcg_debug_assert(arg2 == (rexw ? 64 : 32));
110
*/
57
} else {
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
58
tcg_debug_assert(dest != arg2);
112
- uint64_t z_mask, uint64_t s_mask)
59
- tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
113
+ uint64_t z_mask, int64_t s_mask)
60
+ tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
114
{
61
}
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
62
} else {
116
TCGTemp *ts;
63
tcg_debug_assert(!const_a2);
117
TempOptInfo *ti;
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
118
+ int rep;
65
119
66
/* Since we have destroyed the flags from BSR, we have to re-test. */
120
/* Only single-output opcodes are supported here. */
67
tcg_out_cmp(s, arg1, 0, 1, rexw);
121
tcg_debug_assert(def->nb_oargs == 1);
68
- tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
69
+ tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
70
}
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
71
}
144
}
72
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
73
--
172
--
74
2.34.1
173
2.43.0
75
76
diff view generated by jsdifflib
1
Fill the new argument from any condition within the opcode.
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Not yet used within any backend.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/tcg.c | 34 ++++++++++++++++++++++++++++++--
4
tcg/optimize.c | 9 +++++----
8
tcg/aarch64/tcg-target.c.inc | 3 ++-
5
1 file changed, 5 insertions(+), 4 deletions(-)
9
tcg/arm/tcg-target.c.inc | 3 ++-
10
tcg/i386/tcg-target.c.inc | 3 ++-
11
tcg/loongarch64/tcg-target.c.inc | 3 ++-
12
tcg/mips/tcg-target.c.inc | 3 ++-
13
tcg/ppc/tcg-target.c.inc | 3 ++-
14
tcg/riscv/tcg-target.c.inc | 3 ++-
15
tcg/s390x/tcg-target.c.inc | 3 ++-
16
tcg/sparc64/tcg-target.c.inc | 3 ++-
17
tcg/tci/tcg-target.c.inc | 3 ++-
18
11 files changed, 52 insertions(+), 12 deletions(-)
19
6
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
21
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
9
--- a/tcg/optimize.c
23
+++ b/tcg/tcg.c
10
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
25
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
12
remove_mem_copy_all(ctx);
26
const TCGHelperInfo *info);
13
}
27
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
14
28
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
29
+static bool tcg_target_const_match(int64_t val, int ct,
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
30
+ TCGType type, TCGCond cond, int vece);
17
{
31
#ifdef TCG_TARGET_NEED_LDST_LABELS
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
32
static int tcg_out_ldst_finalize(TCGContext *s);
19
int i, nb_oargs;
33
#endif
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
34
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
35
TCGTemp *ts;
22
}
36
TCGArg new_args[TCG_MAX_OP_ARGS];
37
int const_args[TCG_MAX_OP_ARGS];
38
+ TCGCond op_cond;
39
40
nb_oargs = def->nb_oargs;
41
nb_iargs = def->nb_iargs;
42
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
43
i_allocated_regs = s->reserved_regs;
44
o_allocated_regs = s->reserved_regs;
45
46
+ switch (op->opc) {
47
+ case INDEX_op_brcond_i32:
48
+ case INDEX_op_brcond_i64:
49
+ op_cond = op->args[2];
50
+ break;
51
+ case INDEX_op_setcond_i32:
52
+ case INDEX_op_setcond_i64:
53
+ case INDEX_op_negsetcond_i32:
54
+ case INDEX_op_negsetcond_i64:
55
+ case INDEX_op_cmp_vec:
56
+ op_cond = op->args[3];
57
+ break;
58
+ case INDEX_op_brcond2_i32:
59
+ op_cond = op->args[4];
60
+ break;
61
+ case INDEX_op_movcond_i32:
62
+ case INDEX_op_movcond_i64:
63
+ case INDEX_op_setcond2_i32:
64
+ case INDEX_op_cmpsel_vec:
65
+ op_cond = op->args[5];
66
+ break;
67
+ default:
68
+ /* No condition within opcode. */
69
+ op_cond = TCG_COND_ALWAYS;
70
+ break;
71
+ }
72
+
73
/* satisfy input constraints */
74
for (k = 0; k < nb_iargs; k++) {
75
TCGRegSet i_preferred_regs, i_required_regs;
76
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
77
ts = arg_temp(arg);
78
79
if (ts->val_type == TEMP_VAL_CONST
80
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
81
+ && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
82
+ op_cond, TCGOP_VECE(op))) {
83
/* constant is OK for instruction */
84
const_args[i] = 1;
85
new_args[i] = ts->val;
86
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/aarch64/tcg-target.c.inc
89
+++ b/tcg/aarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
91
}
23
}
24
+ return true;
92
}
25
}
93
26
94
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
27
/*
95
+static bool tcg_target_const_match(int64_t val, int ct,
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
96
+ TCGType type, TCGCond cond, int vece)
29
fold_xi_to_x(ctx, op, 0)) {
97
{
30
return true;
98
if (ct & TCG_CT_CONST) {
31
}
99
return 1;
32
- return false;
100
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
33
+ return finish_folding(ctx, op);
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/arm/tcg-target.c.inc
103
+++ b/tcg/arm/tcg-target.c.inc
104
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
105
* mov operand2: values represented with x << (2 * y), x < 0x100
106
* add, sub, eor...: ditto
107
*/
108
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
109
+static bool tcg_target_const_match(int64_t val, int ct,
110
+ TCGType type, TCGCond cond, int vece)
111
{
112
if (ct & TCG_CT_CONST) {
113
return 1;
114
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
115
index XXXXXXX..XXXXXXX 100644
116
--- a/tcg/i386/tcg-target.c.inc
117
+++ b/tcg/i386/tcg-target.c.inc
118
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
119
}
34
}
120
35
121
/* test if a constant matches the constraint */
36
/* We cannot as yet do_constant_folding with vectors. */
122
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
123
+static bool tcg_target_const_match(int64_t val, int ct,
38
fold_xi_to_x(ctx, op, 0)) {
124
+ TCGType type, TCGCond cond, int vece)
39
return true;
125
{
40
}
126
if (ct & TCG_CT_CONST) {
41
- return false;
127
return 1;
42
+ return finish_folding(ctx, op);
128
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/loongarch64/tcg-target.c.inc
131
+++ b/tcg/loongarch64/tcg-target.c.inc
132
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
133
}
43
}
134
44
135
/* test if a constant matches the constraint */
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
136
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
137
+static bool tcg_target_const_match(int64_t val, int ct,
47
op->args[4] = arg_new_constant(ctx, bl);
138
+ TCGType type, TCGCond cond, int vece)
48
op->args[5] = arg_new_constant(ctx, bh);
139
{
49
}
140
if (ct & TCG_CT_CONST) {
50
- return false;
141
return true;
51
+ return finish_folding(ctx, op);
142
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/mips/tcg-target.c.inc
145
+++ b/tcg/mips/tcg-target.c.inc
146
@@ -XXX,XX +XXX,XX @@ static bool is_p2m1(tcg_target_long val)
147
}
52
}
148
53
149
/* test if a constant matches the constraint */
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
150
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
151
+static bool tcg_target_const_match(int64_t val, int ct,
152
+ TCGType type, TCGCond cond, int vece)
153
{
154
if (ct & TCG_CT_CONST) {
155
return 1;
156
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
157
index XXXXXXX..XXXXXXX 100644
158
--- a/tcg/ppc/tcg-target.c.inc
159
+++ b/tcg/ppc/tcg-target.c.inc
160
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
161
}
162
163
/* test if a constant matches the constraint */
164
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
165
+static bool tcg_target_const_match(int64_t val, int ct,
166
+ TCGType type, TCGCond cond, int vece)
167
{
168
if (ct & TCG_CT_CONST) {
169
return 1;
170
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tcg/riscv/tcg-target.c.inc
173
+++ b/tcg/riscv/tcg-target.c.inc
174
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
175
#define sextreg sextract64
176
177
/* test if a constant matches the constraint */
178
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
179
+static bool tcg_target_const_match(int64_t val, int ct,
180
+ TCGType type, TCGCond cond, int vece)
181
{
182
if (ct & TCG_CT_CONST) {
183
return 1;
184
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
185
index XXXXXXX..XXXXXXX 100644
186
--- a/tcg/s390x/tcg-target.c.inc
187
+++ b/tcg/s390x/tcg-target.c.inc
188
@@ -XXX,XX +XXX,XX @@ static bool risbg_mask(uint64_t c)
189
}
190
191
/* Test if a constant matches the constraint. */
192
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
193
+static bool tcg_target_const_match(int64_t val, int ct,
194
+ TCGType type, TCGCond cond, int vece)
195
{
196
if (ct & TCG_CT_CONST) {
197
return 1;
198
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
199
index XXXXXXX..XXXXXXX 100644
200
--- a/tcg/sparc64/tcg-target.c.inc
201
+++ b/tcg/sparc64/tcg-target.c.inc
202
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
203
}
204
205
/* test if a constant matches the constraint */
206
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
207
+static bool tcg_target_const_match(int64_t val, int ct,
208
+ TCGType type, TCGCond cond, int vece)
209
{
210
if (ct & TCG_CT_CONST) {
211
return 1;
212
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
213
index XXXXXXX..XXXXXXX 100644
214
--- a/tcg/tci/tcg-target.c.inc
215
+++ b/tcg/tci/tcg-target.c.inc
216
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
217
}
218
219
/* Test if a constant matches the constraint. */
220
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
221
+static bool tcg_target_const_match(int64_t val, int ct,
222
+ TCGType type, TCGCond cond, int vece)
223
{
224
return ct & TCG_CT_CONST;
225
}
226
--
55
--
227
2.34.1
56
2.43.0
228
229
diff view generated by jsdifflib
1
Add the enumerators, adjust the helpers to match, and dump.
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
Not supported anywhere else just yet.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
include/tcg/tcg-cond.h | 74 ++++++++++++++++++++++++++++++------------
5
tcg/optimize.c | 20 +++++++++++++++++---
8
tcg/tcg.c | 4 ++-
6
1 file changed, 17 insertions(+), 3 deletions(-)
9
docs/devel/tcg-ops.rst | 2 ++
10
3 files changed, 58 insertions(+), 22 deletions(-)
11
7
12
diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg-cond.h
10
--- a/tcg/optimize.c
15
+++ b/include/tcg/tcg-cond.h
11
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
17
* Conditions. Note that these are laid out for easy manipulation by
13
return ts_info(arg_temp(arg));
18
* the functions below:
19
* bit 0 is used for inverting;
20
- * bit 1 is signed,
21
- * bit 2 is unsigned,
22
- * bit 3 is used with bit 0 for swapping signed/unsigned.
23
+ * bit 1 is used for conditions that need swapping (signed/unsigned).
24
+ * bit 2 is used with bit 1 for swapping.
25
+ * bit 3 is used for unsigned conditions.
26
*/
27
typedef enum {
28
/* non-signed */
29
TCG_COND_NEVER = 0 | 0 | 0 | 0,
30
TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
31
+
32
+ /* equality */
33
TCG_COND_EQ = 8 | 0 | 0 | 0,
34
TCG_COND_NE = 8 | 0 | 0 | 1,
35
+
36
+ /* "test" i.e. and then compare vs 0 */
37
+ TCG_COND_TSTEQ = 8 | 4 | 0 | 0,
38
+ TCG_COND_TSTNE = 8 | 4 | 0 | 1,
39
+
40
/* signed */
41
TCG_COND_LT = 0 | 0 | 2 | 0,
42
TCG_COND_GE = 0 | 0 | 2 | 1,
43
- TCG_COND_LE = 8 | 0 | 2 | 0,
44
- TCG_COND_GT = 8 | 0 | 2 | 1,
45
+ TCG_COND_GT = 0 | 4 | 2 | 0,
46
+ TCG_COND_LE = 0 | 4 | 2 | 1,
47
+
48
/* unsigned */
49
- TCG_COND_LTU = 0 | 4 | 0 | 0,
50
- TCG_COND_GEU = 0 | 4 | 0 | 1,
51
- TCG_COND_LEU = 8 | 4 | 0 | 0,
52
- TCG_COND_GTU = 8 | 4 | 0 | 1,
53
+ TCG_COND_LTU = 8 | 0 | 2 | 0,
54
+ TCG_COND_GEU = 8 | 0 | 2 | 1,
55
+ TCG_COND_GTU = 8 | 4 | 2 | 0,
56
+ TCG_COND_LEU = 8 | 4 | 2 | 1,
57
} TCGCond;
58
59
/* Invert the sense of the comparison. */
60
@@ -XXX,XX +XXX,XX @@ static inline TCGCond tcg_invert_cond(TCGCond c)
61
/* Swap the operands in a comparison. */
62
static inline TCGCond tcg_swap_cond(TCGCond c)
63
{
64
- return c & 6 ? (TCGCond)(c ^ 9) : c;
65
+ return (TCGCond)(c ^ ((c & 2) << 1));
66
}
14
}
67
15
68
-/* Create an "unsigned" version of a "signed" comparison. */
16
+static inline bool ti_is_const(TempOptInfo *ti)
69
-static inline TCGCond tcg_unsigned_cond(TCGCond c)
17
+{
70
+/* Must a comparison be considered signed? */
18
+ return ti->is_const;
71
+static inline bool is_signed_cond(TCGCond c)
72
{
73
- return c & 2 ? (TCGCond)(c ^ 6) : c;
74
-}
75
-
76
-/* Create a "signed" version of an "unsigned" comparison. */
77
-static inline TCGCond tcg_signed_cond(TCGCond c)
78
-{
79
- return c & 4 ? (TCGCond)(c ^ 6) : c;
80
+ return (c & (8 | 2)) == 2;
81
}
82
83
/* Must a comparison be considered unsigned? */
84
static inline bool is_unsigned_cond(TCGCond c)
85
{
86
- return (c & 4) != 0;
87
+ return (c & (8 | 2)) == (8 | 2);
88
+}
19
+}
89
+
20
+
90
+/* Must a comparison be considered a test? */
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
91
+static inline bool is_tst_cond(TCGCond c)
92
+{
22
+{
93
+ return (c | 1) == TCG_COND_TSTNE;
23
+ return ti->val;
94
+}
24
+}
95
+
25
+
96
+/* Create an "unsigned" version of a "signed" comparison. */
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
97
+static inline TCGCond tcg_unsigned_cond(TCGCond c)
98
+{
27
+{
99
+ return is_signed_cond(c) ? (TCGCond)(c + 8) : c;
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
100
+}
29
+}
101
+
30
+
102
+/* Create a "signed" version of an "unsigned" comparison. */
31
static inline bool ts_is_const(TCGTemp *ts)
103
+static inline TCGCond tcg_signed_cond(TCGCond c)
32
{
104
+{
33
- return ts_info(ts)->is_const;
105
+ return is_unsigned_cond(c) ? (TCGCond)(c - 8) : c;
34
+ return ti_is_const(ts_info(ts));
106
+}
107
+
108
+/* Create the eq/ne version of a tsteq/tstne comparison. */
109
+static inline TCGCond tcg_tst_eqne_cond(TCGCond c)
110
+{
111
+ return is_tst_cond(c) ? (TCGCond)(c - 4) : c;
112
+}
113
+
114
+/* Create the lt/ge version of a tstne/tsteq comparison of the sign. */
115
+static inline TCGCond tcg_tst_ltge_cond(TCGCond c)
116
+{
117
+ return is_tst_cond(c) ? (TCGCond)(c ^ 0xf) : c;
118
}
35
}
119
36
120
/*
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
121
@@ -XXX,XX +XXX,XX @@ static inline TCGCond tcg_high_cond(TCGCond c)
38
{
122
case TCG_COND_LE:
39
- TempOptInfo *ti = ts_info(ts);
123
case TCG_COND_GEU:
40
- return ti->is_const && ti->val == val;
124
case TCG_COND_LEU:
41
+ return ti_is_const_val(ts_info(ts), val);
125
- return (TCGCond)(c ^ 8);
42
}
126
+ return (TCGCond)(c ^ (4 | 1));
43
127
default:
44
static inline bool arg_is_const(TCGArg arg)
128
return c;
129
}
130
diff --git a/tcg/tcg.c b/tcg/tcg.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/tcg.c
133
+++ b/tcg/tcg.c
134
@@ -XXX,XX +XXX,XX @@ static const char * const cond_name[] =
135
[TCG_COND_LTU] = "ltu",
136
[TCG_COND_GEU] = "geu",
137
[TCG_COND_LEU] = "leu",
138
- [TCG_COND_GTU] = "gtu"
139
+ [TCG_COND_GTU] = "gtu",
140
+ [TCG_COND_TSTEQ] = "tsteq",
141
+ [TCG_COND_TSTNE] = "tstne",
142
};
143
144
static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
145
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
146
index XXXXXXX..XXXXXXX 100644
147
--- a/docs/devel/tcg-ops.rst
148
+++ b/docs/devel/tcg-ops.rst
149
@@ -XXX,XX +XXX,XX @@ Jumps/Labels
150
| ``TCG_COND_GEU /* unsigned */``
151
| ``TCG_COND_LEU /* unsigned */``
152
| ``TCG_COND_GTU /* unsigned */``
153
+ | ``TCG_COND_TSTEQ /* t1 & t2 == 0 */``
154
+ | ``TCG_COND_TSTNE /* t1 & t2 != 0 */``
155
156
Arithmetic
157
----------
158
--
45
--
159
2.34.1
46
2.43.0
160
161
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/sparc64/tcg-target.c.inc | 21 +++++++++++----------
7
tcg/optimize.c | 21 +++++++++++----------
5
1 file changed, 11 insertions(+), 10 deletions(-)
8
1 file changed, 11 insertions(+), 10 deletions(-)
6
9
7
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/sparc64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/sparc64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
12
tcg_out_bpcc0(s, scond, flags, off19);
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
13
}
55
}
14
56
15
-static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
16
+static void tcg_out_cmp(TCGContext *s, TCGCond cond,
17
+ TCGReg c1, int32_t c2, int c2const)
18
{
19
tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
20
}
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
22
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
23
int32_t arg2, int const_arg2, TCGLabel *l)
24
{
25
- tcg_out_cmp(s, arg1, arg2, const_arg2);
26
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
27
tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
28
tcg_out_nop(s);
29
}
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
31
TCGReg c1, int32_t c2, int c2const,
32
int32_t v1, int v1const)
33
{
34
- tcg_out_cmp(s, c1, c2, c2const);
35
+ tcg_out_cmp(s, cond, c1, c2, c2const);
36
tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
37
}
38
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
40
tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
41
| INSN_COND(rcond) | off16);
42
} else {
43
- tcg_out_cmp(s, arg1, arg2, const_arg2);
44
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
45
tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
46
}
47
tcg_out_nop(s);
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
49
if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
50
tcg_out_movr(s, rcond, ret, c1, v1, v1const);
51
} else {
52
- tcg_out_cmp(s, c1, c2, c2const);
53
+ tcg_out_cmp(s, cond, c1, c2, c2const);
54
tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
55
}
56
}
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
58
/* FALLTHRU */
59
60
default:
61
- tcg_out_cmp(s, c1, c2, c2const);
62
+ tcg_out_cmp(s, cond, c1, c2, c2const);
63
tcg_out_movi_s13(s, ret, 0);
64
tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
65
return;
66
}
67
68
- tcg_out_cmp(s, c1, c2, c2const);
69
+ tcg_out_cmp(s, cond, c1, c2, c2const);
70
if (cond == TCG_COND_LTU) {
71
if (neg) {
72
/* 0 - 0 - C = -C = (C ? -1 : 0) */
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
74
c2 = c1, c2const = 0, c1 = TCG_REG_G0;
75
/* FALLTHRU */
76
case TCG_COND_LTU:
77
- tcg_out_cmp(s, c1, c2, c2const);
78
+ tcg_out_cmp(s, cond, c1, c2, c2const);
79
tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
80
return;
81
default:
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
83
tcg_out_movi_s13(s, ret, 0);
84
tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
85
} else {
86
- tcg_out_cmp(s, c1, c2, c2const);
87
+ tcg_out_cmp(s, cond, c1, c2, c2const);
88
tcg_out_movi_s13(s, ret, 0);
89
tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
90
}
91
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
92
tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
93
tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
94
}
95
- tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0);
96
+ tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
97
98
ldst = new_ldst_label(s);
99
ldst->is_ld = is_ld;
100
--
58
--
101
2.34.1
59
2.43.0
102
103
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
Test the sign bit for LT/GE vs 0, and TSTNE/EQ vs a power of 2.
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
2
3
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Message-Id: <20240119224737.48943-2-philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/aarch64/tcg-target.c.inc | 74 ++++++++++++++++++++++++++++++------
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 62 insertions(+), 12 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
9
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/aarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return false;
15
return true;
16
}
16
}
17
17
18
+static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
19
+{
20
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+ ptrdiff_t offset = target - src_rx;
22
+
23
+ if (offset == sextract64(offset, 0, 14)) {
24
+ *src_rw = deposit32(*src_rw, 5, 14, offset);
25
+ return true;
26
+ }
27
+ return false;
28
+}
21
+}
29
+
22
+
30
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
31
intptr_t value, intptr_t addend)
32
{
24
{
33
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
34
return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
35
case R_AARCH64_CONDBR19:
27
36
return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
37
+ case R_AARCH64_TSTBR14:
29
{
38
+ return reloc_pc14(code_ptr, (const tcg_insn_unit *)value);
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
39
default:
45
default:
40
g_assert_not_reached();
46
g_assert_not_reached();
41
}
47
}
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
48
- return false;
43
/* Conditional branch (immediate). */
49
+ return fold_masks_z(ctx, op, z_mask);
44
I3202_B_C = 0x54000000,
45
46
+ /* Test and branch (immediate). */
47
+ I3205_TBZ = 0x36000000,
48
+ I3205_TBNZ = 0x37000000,
49
+
50
/* Unconditional branch (immediate). */
51
I3206_B = 0x14000000,
52
I3206_BL = 0x94000000,
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
54
tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
55
}
50
}
56
51
57
+static void tcg_out_insn_3205(TCGContext *s, AArch64Insn insn,
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
58
+ TCGReg rt, int imm6, int imm14)
59
+{
60
+ insn |= (imm6 & 0x20) << (31 - 5);
61
+ insn |= (imm6 & 0x1f) << 19;
62
+ tcg_out32(s, insn | (imm14 & 0x3fff) << 5 | rt);
63
+}
64
+
65
static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
66
{
67
tcg_out32(s, insn | (imm26 & 0x03ffffff));
68
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
69
static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
70
TCGArg b, bool b_const, TCGLabel *l)
71
{
72
- intptr_t offset;
73
+ int tbit = -1;
74
bool need_cmp = true;
75
76
switch (c) {
77
case TCG_COND_EQ:
78
case TCG_COND_NE:
79
+ /* cmp xN,0; b.ne L -> cbnz xN,L */
80
if (b_const && b == 0) {
81
need_cmp = false;
82
}
83
break;
84
+ case TCG_COND_LT:
85
+ case TCG_COND_GE:
86
+ /* cmp xN,0; b.mi L -> tbnz xN,63,L */
87
+ if (b_const && b == 0) {
88
+ c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ);
89
+ tbit = ext ? 63 : 31;
90
+ need_cmp = false;
91
+ }
92
+ break;
93
+ case TCG_COND_TSTEQ:
94
+ case TCG_COND_TSTNE:
95
+ /* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
96
+ if (b_const && is_power_of_2(b)) {
97
+ tbit = ctz64(b);
98
+ need_cmp = false;
99
+ }
100
+ break;
101
default:
102
break;
103
}
104
105
if (need_cmp) {
106
tcg_out_cmp(s, ext, c, a, b, b_const);
107
- }
108
-
109
- if (!l->has_value) {
110
tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
111
- offset = tcg_in32(s) >> 5;
112
- } else {
113
- offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2;
114
- tcg_debug_assert(offset == sextract64(offset, 0, 19));
115
+ tcg_out_insn(s, 3202, B_C, c, 0);
116
+ return;
117
}
118
119
- if (need_cmp) {
120
- tcg_out_insn(s, 3202, B_C, c, offset);
121
+ if (tbit >= 0) {
122
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_TSTBR14, l, 0);
123
+ switch (c) {
124
+ case TCG_COND_TSTEQ:
125
+ tcg_out_insn(s, 3205, TBZ, a, tbit, 0);
126
+ break;
127
+ case TCG_COND_TSTNE:
128
+ tcg_out_insn(s, 3205, TBNZ, a, tbit, 0);
129
+ break;
130
+ default:
131
+ g_assert_not_reached();
132
+ }
133
} else {
134
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
135
switch (c) {
136
case TCG_COND_EQ:
137
- tcg_out_insn(s, 3201, CBZ, ext, a, offset);
138
+ tcg_out_insn(s, 3201, CBZ, ext, a, 0);
139
break;
140
case TCG_COND_NE:
141
- tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
142
+ tcg_out_insn(s, 3201, CBNZ, ext, a, 0);
143
break;
144
default:
145
g_assert_not_reached();
146
--
53
--
147
2.34.1
54
2.43.0
148
149
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/optimize.c | 38 +++++++++++++++++++++++---------------
7
tcg/optimize.c | 35 +++++++++++++++++------------------
5
1 file changed, 23 insertions(+), 15 deletions(-)
8
1 file changed, 17 insertions(+), 18 deletions(-)
6
9
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static inline bool ts_is_const(TCGTemp *ts)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
12
return ts_info(ts)->is_const;
15
13
}
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
14
15
+static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
16
+{
17
+ TempOptInfo *ti = ts_info(ts);
18
+ return ti->is_const && ti->val == val;
19
+}
20
+
21
static inline bool arg_is_const(TCGArg arg)
22
{
17
{
23
return ts_is_const(arg_temp(arg));
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
24
}
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
25
20
+ int ofs = op->args[3];
26
+static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
21
+ int len = op->args[4];
27
+{
22
TCGOpcode and_opc;
28
+ return ts_is_const_val(arg_temp(arg), val);
23
+ uint64_t z_mask;
29
+}
24
30
+
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
static inline bool ts_is_copy(TCGTemp *ts)
26
- uint64_t t1 = arg_info(op->args[1])->val;
32
{
27
- uint64_t t2 = arg_info(op->args[2])->val;
33
return ts_info(ts)->next_copy != ts;
28
-
34
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
35
}
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
36
} else if (args_are_copies(x, y)) {
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
37
return do_constant_folding_cond_eq(c);
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
38
- } else if (arg_is_const(y) && arg_info(y)->val == 0) {
33
+ deposit64(ti_const_val(t1), ofs, len,
39
+ } else if (arg_is_const_val(y, 0)) {
34
+ ti_const_val(t2)));
40
switch (c) {
41
case TCG_COND_LTU:
42
return 0;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
44
/* If the binary operation has first argument @i, fold to @i. */
45
static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
46
{
47
- if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
48
+ if (arg_is_const_val(op->args[1], i)) {
49
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
50
}
35
}
51
return false;
36
52
@@ -XXX,XX +XXX,XX @@ static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
37
switch (ctx->type) {
53
/* If the binary operation has first argument @i, fold to NOT. */
54
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
55
{
56
- if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
57
+ if (arg_is_const_val(op->args[1], i)) {
58
return fold_to_not(ctx, op, 2);
59
}
60
return false;
61
@@ -XXX,XX +XXX,XX @@ static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
62
/* If the binary operation has second argument @i, fold to @i. */
63
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
64
{
65
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
66
+ if (arg_is_const_val(op->args[2], i)) {
67
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
68
}
69
return false;
70
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
71
/* If the binary operation has second argument @i, fold to identity. */
72
static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
73
{
74
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
75
+ if (arg_is_const_val(op->args[2], i)) {
76
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
77
}
78
return false;
79
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
80
/* If the binary operation has second argument @i, fold to NOT. */
81
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
82
{
83
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
84
+ if (arg_is_const_val(op->args[2], i)) {
85
return fold_to_not(ctx, op, 1);
86
}
87
return false;
88
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
89
* Simplify LT/GE comparisons vs zero to a single compare
90
* vs the high word of the input.
91
*/
92
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
93
- arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
94
+ if (arg_is_const_val(op->args[2], 0) &&
95
+ arg_is_const_val(op->args[3], 0)) {
96
goto do_brcond_high;
97
}
98
break;
99
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
100
}
39
}
101
40
102
/* Inserting a value into zero at offset 0. */
41
/* Inserting a value into zero at offset 0. */
103
- if (arg_is_const(op->args[1])
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
104
- && arg_info(op->args[1])->val == 0
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
105
- && op->args[3] == 0) {
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
106
+ if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
107
uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
108
46
109
op->opc = and_opc;
47
op->opc = and_opc;
110
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
111
}
53
}
112
54
113
/* Inserting zero into a value. */
55
/* Inserting zero into a value. */
114
- if (arg_is_const(op->args[2])
56
- if (arg_is_const_val(op->args[2], 0)) {
115
- && arg_info(op->args[2])->val == 0) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
116
+ if (arg_is_const_val(op->args[2], 0)) {
58
+ if (ti_is_const_val(t2, 0)) {
117
uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
118
60
119
op->opc = and_opc;
61
op->opc = and_opc;
120
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
62
op->args[2] = arg_new_constant(ctx, mask);
121
* Simplify LT/GE comparisons vs zero to a single compare
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
122
* vs the high word of the input.
64
- return false;
123
*/
65
+ return fold_and(ctx, op);
124
- if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
66
}
125
- arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
67
126
+ if (arg_is_const_val(op->args[3], 0) &&
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
127
+ arg_is_const_val(op->args[4], 0)) {
69
- op->args[3], op->args[4],
128
goto do_setcond_high;
70
- arg_info(op->args[2])->z_mask);
129
}
71
- return false;
130
break;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
131
--
77
--
132
2.34.1
78
2.43.0
133
134
diff view generated by jsdifflib
1
Better constraint for tcg_out_cmp, based on the comparison.
1
The input which overlaps the sign bit of the output can
2
We can't yet remove the fallback to load constants into a
2
have its input s_mask propagated to the output s_mask.
3
scratch because of tcg_out_cmp2, but that path should not
4
be as frequent.
5
3
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/ppc/tcg-target-con-set.h | 5 ++--
7
tcg/optimize.c | 14 ++++++++++++--
10
tcg/ppc/tcg-target-con-str.h | 1 +
8
1 file changed, 12 insertions(+), 2 deletions(-)
11
tcg/ppc/tcg-target.c.inc | 48 ++++++++++++++++++++++++++++++------
12
3 files changed, 44 insertions(+), 10 deletions(-)
13
9
14
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/ppc/tcg-target-con-set.h
12
--- a/tcg/optimize.c
17
+++ b/tcg/ppc/tcg-target-con-set.h
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
19
*/
15
TempOptInfo *t2 = arg_info(op->args[2]);
20
C_O0_I1(r)
16
int ofs = op->args[3];
21
C_O0_I2(r, r)
17
int len = op->args[4];
22
-C_O0_I2(r, ri)
18
+ int width;
23
+C_O0_I2(r, rC)
19
TCGOpcode and_opc;
24
C_O0_I2(v, r)
20
- uint64_t z_mask;
25
C_O0_I3(r, r, r)
21
+ uint64_t z_mask, s_mask;
26
C_O0_I3(o, m, r)
22
27
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rI, ri)
23
if (ti_is_const(t1) && ti_is_const(t2)) {
28
C_O1_I2(r, rI, rT)
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
29
C_O1_I2(r, r, r)
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
30
C_O1_I2(r, r, ri)
26
switch (ctx->type) {
31
+C_O1_I2(r, r, rC)
27
case TCG_TYPE_I32:
32
C_O1_I2(r, r, rI)
28
and_opc = INDEX_op_and_i32;
33
C_O1_I2(r, r, rT)
29
+ width = 32;
34
C_O1_I2(r, r, rU)
30
break;
35
C_O1_I2(r, r, rZW)
31
case TCG_TYPE_I64:
36
C_O1_I2(v, v, v)
32
and_opc = INDEX_op_and_i64;
37
C_O1_I3(v, v, v, v)
33
+ width = 64;
38
-C_O1_I4(r, r, ri, rZ, rZ)
34
break;
39
+C_O1_I4(r, r, rC, rZ, rZ)
35
default:
40
C_O1_I4(r, r, r, ri, ri)
36
g_assert_not_reached();
41
C_O2_I1(r, r, r)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
42
C_N1O1_I1(o, m, r)
38
return fold_and(ctx, op);
43
diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/ppc/tcg-target-con-str.h
46
+++ b/tcg/ppc/tcg-target-con-str.h
47
@@ -XXX,XX +XXX,XX @@ REGS('v', ALL_VECTOR_REGS)
48
* Define constraint letters for constants:
49
* CONST(letter, TCG_CT_CONST_* bit set)
50
*/
51
+CONST('C', TCG_CT_CONST_CMP)
52
CONST('I', TCG_CT_CONST_S16)
53
CONST('M', TCG_CT_CONST_MONE)
54
CONST('T', TCG_CT_CONST_S32)
55
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/ppc/tcg-target.c.inc
58
+++ b/tcg/ppc/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@
60
#define SZR (TCG_TARGET_REG_BITS / 8)
61
62
#define TCG_CT_CONST_S16 0x100
63
+#define TCG_CT_CONST_U16 0x200
64
#define TCG_CT_CONST_S32 0x400
65
#define TCG_CT_CONST_U32 0x800
66
#define TCG_CT_CONST_ZERO 0x1000
67
#define TCG_CT_CONST_MONE 0x2000
68
#define TCG_CT_CONST_WSZ 0x4000
69
+#define TCG_CT_CONST_CMP 0x8000
70
71
#define ALL_GENERAL_REGS 0xffffffffu
72
#define ALL_VECTOR_REGS 0xffffffff00000000ull
73
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t sval, int ct,
74
sval = (int32_t)sval;
75
}
39
}
76
40
77
+ if (ct & TCG_CT_CONST_CMP) {
41
+ /* The s_mask from the top portion of the deposit is still valid. */
78
+ switch (cond) {
42
+ if (ofs + len == width) {
79
+ case TCG_COND_EQ:
43
+ s_mask = t2->s_mask << ofs;
80
+ case TCG_COND_NE:
44
+ } else {
81
+ ct |= TCG_CT_CONST_S16 | TCG_CT_CONST_U16;
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
82
+ break;
83
+ case TCG_COND_LT:
84
+ case TCG_COND_GE:
85
+ case TCG_COND_LE:
86
+ case TCG_COND_GT:
87
+ ct |= TCG_CT_CONST_S16;
88
+ break;
89
+ case TCG_COND_LTU:
90
+ case TCG_COND_GEU:
91
+ case TCG_COND_LEU:
92
+ case TCG_COND_GTU:
93
+ ct |= TCG_CT_CONST_U16;
94
+ break;
95
+ default:
96
+ g_assert_not_reached();
97
+ }
98
+ }
46
+ }
99
+
47
+
100
if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) {
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
101
return 1;
49
- return fold_masks_z(ctx, op, z_mask);
102
}
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
103
+ if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
51
}
104
+ return 1;
52
105
+ }
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
106
if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
107
return 1;
108
}
109
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
110
111
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
112
113
- /* Simplify the comparisons below wrt CMPI. */
114
+ /*
115
+ * Simplify the comparisons below wrt CMPI.
116
+ * All of the tests are 16-bit, so a 32-bit sign extend always works.
117
+ */
118
if (type == TCG_TYPE_I32) {
119
arg2 = (int32_t)arg2;
120
}
121
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
122
case INDEX_op_sar_i32:
123
case INDEX_op_rotl_i32:
124
case INDEX_op_rotr_i32:
125
- case INDEX_op_setcond_i32:
126
- case INDEX_op_negsetcond_i32:
127
case INDEX_op_and_i64:
128
case INDEX_op_andc_i64:
129
case INDEX_op_shl_i64:
130
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
131
case INDEX_op_sar_i64:
132
case INDEX_op_rotl_i64:
133
case INDEX_op_rotr_i64:
134
- case INDEX_op_setcond_i64:
135
- case INDEX_op_negsetcond_i64:
136
return C_O1_I2(r, r, ri);
137
138
case INDEX_op_mul_i32:
139
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
140
141
case INDEX_op_brcond_i32:
142
case INDEX_op_brcond_i64:
143
- return C_O0_I2(r, ri);
144
-
145
+ return C_O0_I2(r, rC);
146
+ case INDEX_op_setcond_i32:
147
+ case INDEX_op_setcond_i64:
148
+ case INDEX_op_negsetcond_i32:
149
+ case INDEX_op_negsetcond_i64:
150
+ return C_O1_I2(r, r, rC);
151
case INDEX_op_movcond_i32:
152
case INDEX_op_movcond_i64:
153
- return C_O1_I4(r, r, ri, rZ, rZ);
154
+ return C_O1_I4(r, r, rC, rZ, rZ);
155
+
156
case INDEX_op_deposit_i32:
157
case INDEX_op_deposit_i64:
158
return C_O1_I2(r, 0, rZ);
159
--
54
--
160
2.34.1
55
2.43.0
161
162
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Message-Id: <20231028194522.245170-12-richard.henderson@linaro.org>
4
[PMD: Split from bigger patch, part 1/2]
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-Id: <20231108145244.72421-1-philmd@linaro.org>
7
---
6
---
8
tcg/arm/tcg-target.c.inc | 32 +++++++++++++++++---------------
7
tcg/optimize.c | 13 ++++++++++---
9
1 file changed, 17 insertions(+), 15 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
10
9
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
12
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
16
}
15
return fold_masks_zs(ctx, op, z_mask, 0);
17
}
16
}
18
17
19
+static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
20
+ TCGArg b, int b_const)
21
+{
19
+{
22
+ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
23
+ return cond;
24
+}
21
+}
25
+
22
+
26
static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
27
const int *const_args)
28
{
24
{
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
30
/* Constraints mean that v2 is always in the same register as dest,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
31
* so we only need to do "if condition passed, move v1 to dest".
27
32
*/
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
33
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
29
{
34
- args[1], args[2], const_args[2]);
30
+ uint64_t s_mask;
35
- tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
31
+
36
+ c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
32
if (fold_const2_commutative(ctx, op) ||
37
+ tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
33
fold_xi_to_x(ctx, op, -1) ||
38
ARITH_MVN, args[0], 0, args[3], const_args[3]);
34
fold_xi_to_not(ctx, op, 0)) {
39
break;
35
return true;
40
case INDEX_op_add_i32:
36
}
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
37
42
break;
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
39
- & arg_info(op->args[2])->s_mask;
44
case INDEX_op_brcond_i32:
40
- return false;
45
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
41
+ s_mask = arg_info(op->args[1])->s_mask
46
- args[0], args[1], const_args[1]);
42
+ & arg_info(op->args[2])->s_mask;
47
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
43
+ return fold_masks_s(ctx, op, s_mask);
48
- arg_label(args[3]));
44
}
49
+ c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
45
50
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
51
break;
52
case INDEX_op_setcond_i32:
53
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
54
- args[1], args[2], const_args[2]);
55
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
56
+ c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
57
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
58
ARITH_MOV, args[0], 0, 1);
59
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
60
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
61
ARITH_MOV, args[0], 0, 0);
62
break;
63
case INDEX_op_negsetcond_i32:
64
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
65
- args[1], args[2], const_args[2]);
66
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
67
+ c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
68
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
69
ARITH_MVN, args[0], 0, 0);
70
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
71
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
72
ARITH_MOV, args[0], 0, 0);
73
break;
74
75
--
47
--
76
2.34.1
48
2.43.0
77
78
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
6
---
3
tcg/s390x/tcg-target.h | 2 +-
7
tcg/optimize.c | 29 ++++++++++++-----------------
4
tcg/s390x/tcg-target.c.inc | 139 +++++++++++++++++++++++++------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
5
2 files changed, 97 insertions(+), 44 deletions(-)
6
9
7
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.h
12
--- a/tcg/optimize.c
10
+++ b/tcg/s390x/tcg-target.h
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
15
13
#define TCG_TARGET_HAS_qemu_ldst_i128 1
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
14
15
-#define TCG_TARGET_HAS_tst 0
16
+#define TCG_TARGET_HAS_tst 1
17
18
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
19
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
20
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/s390x/tcg-target.c.inc
23
+++ b/tcg/s390x/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
25
RI_OILH = 0xa50a,
26
RI_OILL = 0xa50b,
27
RI_TMLL = 0xa701,
28
+ RI_TMLH = 0xa700,
29
+ RI_TMHL = 0xa703,
30
+ RI_TMHH = 0xa702,
31
32
RIEb_CGRJ = 0xec64,
33
RIEb_CLGRJ = 0xec65,
34
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
35
#define S390_CC_NEVER 0
36
#define S390_CC_ALWAYS 15
37
38
+#define S390_TM_EQ 8 /* CC == 0 */
39
+#define S390_TM_NE 7 /* CC in {1,2,3} */
40
+
41
/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
42
-static const uint8_t tcg_cond_to_s390_cond[] = {
43
+static const uint8_t tcg_cond_to_s390_cond[16] = {
44
[TCG_COND_EQ] = S390_CC_EQ,
45
[TCG_COND_NE] = S390_CC_NE,
46
+ [TCG_COND_TSTEQ] = S390_CC_EQ,
47
+ [TCG_COND_TSTNE] = S390_CC_NE,
48
[TCG_COND_LT] = S390_CC_LT,
49
[TCG_COND_LE] = S390_CC_LE,
50
[TCG_COND_GT] = S390_CC_GT,
51
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_s390_cond[] = {
52
/* Condition codes that result from a LOAD AND TEST. Here, we have no
53
unsigned instruction variation, however since the test is vs zero we
54
can re-map the outcomes appropriately. */
55
-static const uint8_t tcg_cond_to_ltr_cond[] = {
56
+static const uint8_t tcg_cond_to_ltr_cond[16] = {
57
[TCG_COND_EQ] = S390_CC_EQ,
58
[TCG_COND_NE] = S390_CC_NE,
59
+ [TCG_COND_TSTEQ] = S390_CC_ALWAYS,
60
+ [TCG_COND_TSTNE] = S390_CC_NEVER,
61
[TCG_COND_LT] = S390_CC_LT,
62
[TCG_COND_LE] = S390_CC_LE,
63
[TCG_COND_GT] = S390_CC_GT,
64
@@ -XXX,XX +XXX,XX @@ static bool risbg_mask(uint64_t c)
65
static bool tcg_target_const_match(int64_t val, int ct,
66
TCGType type, TCGCond cond, int vece)
67
{
17
{
68
+ uint64_t uval = val;
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
69
+
19
+ uint64_t s_mask_old, s_mask, z_mask;
70
if (ct & TCG_CT_CONST) {
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
71
return true;
24
return true;
72
}
25
}
73
if (type == TCG_TYPE_I32) {
26
74
+ uval = (uint32_t)val;
27
- z_mask = arg_info(op->args[1])->z_mask;
75
val = (int32_t)val;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
76
}
58
}
77
59
78
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
60
- if (z_mask & sign) {
79
case TCG_COND_GTU:
61
- z_mask |= sign;
80
ct |= TCG_CT_CONST_U32; /* CLGFI */
62
- }
81
break;
63
- s_mask |= sign << 1;
82
+ case TCG_COND_TSTNE:
64
-
83
+ case TCG_COND_TSTEQ:
65
- ctx->z_mask = z_mask;
84
+ if (is_const_p16(uval) >= 0) {
66
- ctx->s_mask = s_mask;
85
+ return true; /* TMxx */
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
86
+ }
87
+ if (risbg_mask(uval)) {
88
+ return true; /* RISBG */
89
+ }
90
+ break;
91
default:
92
g_assert_not_reached();
93
}
94
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
95
if (ct & TCG_CT_CONST_INV) {
96
val = ~val;
97
}
98
- /*
99
- * Note that is_const_p16 is a subset of is_const_p32,
100
- * so we don't need both constraints.
101
- */
102
if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
103
return true;
68
return true;
104
}
69
}
105
@@ -XXX,XX +XXX,XX @@ static const S390Opcode oi_insns[4] = {
70
106
static const S390Opcode lif_insns[2] = {
71
- return fold_masks(ctx, op);
107
RIL_LLILF, RIL_LLIHF,
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
108
};
73
}
109
+static const S390Opcode tm_insns[4] = {
74
110
+ RI_TMLL, RI_TMLH, RI_TMHL, RI_TMHH
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
111
+};
112
113
/* load a register with an immediate value */
114
static void tcg_out_movi(TCGContext *s, TCGType type,
115
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
116
TCGCond inv_c = tcg_invert_cond(c);
117
S390Opcode op;
118
119
+ if (is_tst_cond(c)) {
120
+ tcg_debug_assert(!need_carry);
121
+
122
+ if (!c2const) {
123
+ if (type == TCG_TYPE_I32) {
124
+ tcg_out_insn(s, RRFa, NRK, TCG_REG_R0, r1, c2);
125
+ } else {
126
+ tcg_out_insn(s, RRFa, NGRK, TCG_REG_R0, r1, c2);
127
+ }
128
+ goto exit;
129
+ }
130
+
131
+ if (type == TCG_TYPE_I32) {
132
+ c2 = (uint32_t)c2;
133
+ }
134
+
135
+ int i = is_const_p16(c2);
136
+ if (i >= 0) {
137
+ tcg_out_insn_RI(s, tm_insns[i], r1, c2 >> (i * 16));
138
+ *inv_cc = TCG_COND_TSTEQ ? S390_TM_NE : S390_TM_EQ;
139
+ return *inv_cc ^ 15;
140
+ }
141
+
142
+ if (risbg_mask(c2)) {
143
+ tgen_andi_risbg(s, TCG_REG_R0, r1, c2);
144
+ goto exit;
145
+ }
146
+ g_assert_not_reached();
147
+ }
148
+
149
if (c2const) {
150
if (c2 == 0) {
151
if (!(is_unsigned && need_carry)) {
152
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
153
TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
154
{
155
int cc;
156
- bool is_unsigned = is_unsigned_cond(c);
157
- bool in_range;
158
- S390Opcode opc;
159
160
- cc = tcg_cond_to_s390_cond[c];
161
+ if (!is_tst_cond(c)) {
162
+ bool is_unsigned = is_unsigned_cond(c);
163
+ bool in_range;
164
+ S390Opcode opc;
165
166
- if (!c2const) {
167
- opc = (type == TCG_TYPE_I32
168
- ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
169
- : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
170
- tgen_compare_branch(s, opc, cc, r1, c2, l);
171
- return;
172
- }
173
+ cc = tcg_cond_to_s390_cond[c];
174
175
- /*
176
- * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
177
- * If the immediate we've been given does not fit that range, we'll
178
- * fall back to separate compare and branch instructions using the
179
- * larger comparison range afforded by COMPARE IMMEDIATE.
180
- */
181
- if (type == TCG_TYPE_I32) {
182
- if (is_unsigned) {
183
- opc = RIEc_CLIJ;
184
- in_range = (uint32_t)c2 == (uint8_t)c2;
185
- } else {
186
- opc = RIEc_CIJ;
187
- in_range = (int32_t)c2 == (int8_t)c2;
188
+ if (!c2const) {
189
+ opc = (type == TCG_TYPE_I32
190
+ ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
191
+ : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
192
+ tgen_compare_branch(s, opc, cc, r1, c2, l);
193
+ return;
194
}
195
- } else {
196
- if (is_unsigned) {
197
- opc = RIEc_CLGIJ;
198
- in_range = (uint64_t)c2 == (uint8_t)c2;
199
+
200
+ /*
201
+ * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
202
+ * If the immediate we've been given does not fit that range, we'll
203
+ * fall back to separate compare and branch instructions using the
204
+ * larger comparison range afforded by COMPARE IMMEDIATE.
205
+ */
206
+ if (type == TCG_TYPE_I32) {
207
+ if (is_unsigned) {
208
+ opc = RIEc_CLIJ;
209
+ in_range = (uint32_t)c2 == (uint8_t)c2;
210
+ } else {
211
+ opc = RIEc_CIJ;
212
+ in_range = (int32_t)c2 == (int8_t)c2;
213
+ }
214
} else {
215
- opc = RIEc_CGIJ;
216
- in_range = (int64_t)c2 == (int8_t)c2;
217
+ if (is_unsigned) {
218
+ opc = RIEc_CLGIJ;
219
+ in_range = (uint64_t)c2 == (uint8_t)c2;
220
+ } else {
221
+ opc = RIEc_CGIJ;
222
+ in_range = (int64_t)c2 == (int8_t)c2;
223
+ }
224
+ }
225
+ if (in_range) {
226
+ tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
227
+ return;
228
}
229
- }
230
- if (in_range) {
231
- tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
232
- return;
233
}
234
235
cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
236
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
237
ldst->oi = oi;
238
ldst->addrlo_reg = addr_reg;
239
240
- /* We are expecting a_bits to max out at 7, much lower than TMLL. */
241
tcg_debug_assert(a_mask <= 0xffff);
242
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
243
244
- tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
245
+ tcg_out16(s, RI_BRC | (S390_TM_NE << 4));
246
ldst->label_ptr[0] = s->code_ptr++;
247
}
248
249
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
250
l2 = gen_new_label();
251
252
tcg_out_insn(s, RI, TMLL, addr_reg, 15);
253
- tgen_branch(s, 7, l1); /* CC in {1,2,3} */
254
+ tgen_branch(s, S390_TM_NE, l1);
255
}
256
257
tcg_debug_assert(!need_bswap);
258
--
76
--
259
2.34.1
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
1
Handle modifications to the arguments and condition
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
in a single place.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/optimize.c | 57 ++++++++++++++++++++++++--------------------------
6
tcg/optimize.c | 19 +++++++++++--------
8
1 file changed, 27 insertions(+), 30 deletions(-)
7
1 file changed, 11 insertions(+), 8 deletions(-)
9
8
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
11
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
15
return false;
16
}
17
18
+static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
19
+ TCGArg *p1, TCGArg *p2, TCGArg *pcond)
20
+{
21
+ TCGCond cond;
22
+ bool swap;
23
+ int r;
24
+
25
+ swap = swap_commutative(dest, p1, p2);
26
+ cond = *pcond;
27
+ if (swap) {
28
+ *pcond = cond = tcg_swap_cond(cond);
29
+ }
30
+
31
+ r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
32
+ return r;
33
+}
34
+
35
static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
36
{
37
for (int i = 0; i < nb_args; i++) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
39
40
static bool fold_brcond(OptContext *ctx, TCGOp *op)
41
{
42
- TCGCond cond = op->args[2];
43
- int i;
44
-
45
- if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
46
- op->args[2] = cond = tcg_swap_cond(cond);
47
- }
48
-
49
- i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
50
+ int i = do_constant_folding_cond1(ctx, NO_DEST, &op->args[0],
51
+ &op->args[1], &op->args[2]);
52
if (i == 0) {
53
tcg_op_remove(ctx->tcg, op);
54
return true;
55
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
56
14
57
static bool fold_movcond(OptContext *ctx, TCGOp *op)
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
58
{
16
{
59
- TCGCond cond = op->args[5];
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
60
int i;
19
int i;
61
20
62
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
/* If true and false values are the same, eliminate the cmp. */
63
- op->args[5] = cond = tcg_swap_cond(cond);
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
64
- }
65
/*
66
* Canonicalize the "false" input reg to match the destination reg so
67
* that the tcg backend can implement a "move if true" operation.
68
*/
69
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
70
- op->args[5] = cond = tcg_invert_cond(cond);
71
+ op->args[5] = tcg_invert_cond(op->args[5]);
72
}
73
74
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
75
+ i = do_constant_folding_cond1(ctx, NO_DEST, &op->args[1],
76
+ &op->args[2], &op->args[5]);
77
if (i >= 0) {
78
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
79
}
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
80
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
81
uint64_t tv = arg_info(op->args[3])->val;
45
}
82
uint64_t fv = arg_info(op->args[4])->val;
46
}
83
TCGOpcode opc, negopc = 0;
84
+ TCGCond cond = op->args[5];
85
86
switch (ctx->type) {
87
case TCG_TYPE_I32:
88
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
89
90
static bool fold_setcond(OptContext *ctx, TCGOp *op)
91
{
92
- TCGCond cond = op->args[3];
93
- int i;
94
-
95
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
96
- op->args[3] = cond = tcg_swap_cond(cond);
97
- }
98
-
99
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
100
+ int i = do_constant_folding_cond1(ctx, op->args[0], &op->args[1],
101
+ &op->args[2], &op->args[3]);
102
if (i >= 0) {
103
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
104
}
47
}
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
48
- return false;
106
49
+
107
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
108
{
51
}
109
- TCGCond cond = op->args[3];
52
110
- int i;
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
111
-
112
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
113
- op->args[3] = cond = tcg_swap_cond(cond);
114
- }
115
-
116
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
117
+ int i = do_constant_folding_cond1(ctx, op->args[0], &op->args[1],
118
+ &op->args[2], &op->args[3]);
119
if (i >= 0) {
120
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
121
}
122
--
54
--
123
2.34.1
55
2.43.0
124
125
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
target/alpha/translate.c | 49 +++++++++++++++++++---------------------
4
tcg/optimize.c | 6 +++---
5
1 file changed, 23 insertions(+), 26 deletions(-)
5
1 file changed, 3 insertions(+), 3 deletions(-)
6
6
7
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/alpha/translate.c
9
--- a/tcg/optimize.c
10
+++ b/target/alpha/translate.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
12
fold_xi_to_x(ctx, op, 1)) {
13
/* Fold -0.0 for comparison with COND. */
13
return true;
14
15
-static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
16
+static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
17
{
18
- uint64_t mzero = 1ull << 63;
19
+ TCGv_i64 tmp;
20
21
- switch (cond) {
22
+ *pimm = 0;
23
+ switch (*pcond) {
24
case TCG_COND_LE:
25
case TCG_COND_GT:
26
/* For <= or >, the -0.0 value directly compares the way we want. */
27
- tcg_gen_mov_i64(dest, src);
28
- break;
29
+ return src;
30
31
case TCG_COND_EQ:
32
case TCG_COND_NE:
33
- /* For == or !=, we can simply mask off the sign bit and compare. */
34
- tcg_gen_andi_i64(dest, src, mzero - 1);
35
- break;
36
+ /* For == or !=, we can compare without the sign bit. */
37
+ *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
38
+ *pimm = INT64_MAX;
39
+ return src;
40
41
case TCG_COND_GE:
42
case TCG_COND_LT:
43
/* For >= or <, map -0.0 to +0.0. */
44
- tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
45
- src, tcg_constant_i64(0));
46
- break;
47
+ tmp = tcg_temp_new_i64();
48
+ tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
49
+ src, tcg_constant_i64(INT64_MIN),
50
+ tcg_constant_i64(0), src);
51
+ return tmp;
52
53
default:
54
- abort();
55
+ g_assert_not_reached();
56
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
57
}
17
}
58
18
59
static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
60
int32_t disp)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
61
{
21
fold_xi_to_i(ctx, op, 0)) {
62
- TCGv cmp_tmp = tcg_temp_new();
22
return true;
63
- DisasJumpType ret;
23
}
64
-
24
- return false;
65
- gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
25
+ return finish_folding(ctx, op);
66
- ret = gen_bcond_internal(ctx, cond, cmp_tmp, 0, disp);
67
- return ret;
68
+ uint64_t imm;
69
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
70
+ return gen_bcond_internal(ctx, cond, tmp, imm, disp);
71
}
26
}
72
27
73
static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
74
{
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
75
- TCGv_i64 va, vb, z;
30
tcg_opt_gen_movi(ctx, op2, rh, h);
76
-
31
return true;
77
- z = load_zero(ctx);
32
}
78
- vb = load_fpr(ctx, rb);
33
- return false;
79
- va = tcg_temp_new();
34
+ return finish_folding(ctx, op);
80
- gen_fold_mzero(cond, va, load_fpr(ctx, ra));
81
-
82
- tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
83
+ uint64_t imm;
84
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
85
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
86
+ tmp, tcg_constant_i64(imm),
87
+ load_fpr(ctx, rb), load_fpr(ctx, rc));
88
}
35
}
89
36
90
#define QUAL_RM_N 0x080 /* Round mode nearest even */
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
91
--
38
--
92
2.34.1
39
2.43.0
93
94
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
1
Signed 33-bit == signed 32-bit + unsigned 32-bit.
1
Avoid the use of the OptContext slots.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/s390x/tcg-target-con-set.h | 8 ++++----
6
tcg/optimize.c | 7 +------
7
tcg/s390x/tcg-target-con-str.h | 2 +-
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
tcg/s390x/tcg-target.c.inc | 36 +++++++++++++++++-----------------
9
3 files changed, 23 insertions(+), 23 deletions(-)
10
8
11
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/s390x/tcg-target-con-set.h
11
--- a/tcg/optimize.c
14
+++ b/tcg/s390x/tcg-target-con-set.h
12
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
16
C_O0_I1(r)
14
if (fold_const1(ctx, op)) {
17
C_O0_I2(r, r)
18
C_O0_I2(r, ri)
19
-C_O0_I2(r, rA)
20
+C_O0_I2(r, rJU)
21
C_O0_I2(v, r)
22
C_O0_I3(o, m, r)
23
C_O1_I1(r, r)
24
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rI)
25
C_O1_I2(r, 0, rJ)
26
C_O1_I2(r, r, r)
27
C_O1_I2(r, r, ri)
28
-C_O1_I2(r, r, rA)
29
+C_O1_I2(r, r, rJU)
30
C_O1_I2(r, r, rI)
31
C_O1_I2(r, r, rJ)
32
C_O1_I2(r, r, rK)
33
@@ -XXX,XX +XXX,XX @@ C_O1_I2(v, v, r)
34
C_O1_I2(v, v, v)
35
C_O1_I3(v, v, v, v)
36
C_O1_I4(r, r, ri, rI, r)
37
-C_O1_I4(r, r, rA, rI, r)
38
+C_O1_I4(r, r, rJU, rI, r)
39
C_O2_I1(o, m, r)
40
C_O2_I2(o, m, 0, r)
41
C_O2_I2(o, m, r, r)
42
C_O2_I3(o, m, 0, 1, r)
43
C_N1_O1_I4(r, r, 0, 1, ri, r)
44
-C_N1_O1_I4(r, r, 0, 1, rA, r)
45
+C_N1_O1_I4(r, r, 0, 1, rJU, r)
46
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/s390x/tcg-target-con-str.h
49
+++ b/tcg/s390x/tcg-target-con-str.h
50
@@ -XXX,XX +XXX,XX @@ REGS('o', 0xaaaa) /* odd numbered general regs */
51
* Define constraint letters for constants:
52
* CONST(letter, TCG_CT_CONST_* bit set)
53
*/
54
-CONST('A', TCG_CT_CONST_S33)
55
CONST('I', TCG_CT_CONST_S16)
56
CONST('J', TCG_CT_CONST_S32)
57
CONST('K', TCG_CT_CONST_P32)
58
CONST('N', TCG_CT_CONST_INV)
59
CONST('R', TCG_CT_CONST_INVRISBG)
60
+CONST('U', TCG_CT_CONST_U32)
61
CONST('Z', TCG_CT_CONST_ZERO)
62
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/s390x/tcg-target.c.inc
65
+++ b/tcg/s390x/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@
67
68
#define TCG_CT_CONST_S16 (1 << 8)
69
#define TCG_CT_CONST_S32 (1 << 9)
70
-#define TCG_CT_CONST_S33 (1 << 10)
71
+#define TCG_CT_CONST_U32 (1 << 10)
72
#define TCG_CT_CONST_ZERO (1 << 11)
73
#define TCG_CT_CONST_P32 (1 << 12)
74
#define TCG_CT_CONST_INV (1 << 13)
75
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
76
TCGType type, TCGCond cond, int vece)
77
{
78
if (ct & TCG_CT_CONST) {
79
- return 1;
80
+ return true;
81
}
82
-
83
if (type == TCG_TYPE_I32) {
84
val = (int32_t)val;
85
}
86
87
- /* The following are mutually exclusive. */
88
- if (ct & TCG_CT_CONST_S16) {
89
- return val == (int16_t)val;
90
- } else if (ct & TCG_CT_CONST_S32) {
91
- return val == (int32_t)val;
92
- } else if (ct & TCG_CT_CONST_S33) {
93
- return val >= -0xffffffffll && val <= 0xffffffffll;
94
- } else if (ct & TCG_CT_CONST_ZERO) {
95
- return val == 0;
96
+ if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
97
+ return true;
98
+ }
99
+ if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
100
+ return true;
101
+ }
102
+ if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
103
+ return true;
104
+ }
105
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
106
+ return true;
107
}
108
109
if (ct & TCG_CT_CONST_INV) {
110
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
111
if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
112
return true;
15
return true;
113
}
16
}
114
-
17
-
115
- return 0;
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
116
+ return false;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
117
}
24
}
118
25
119
/* Emit instructions according to the given instruction format. */
26
static bool fold_or(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
121
return C_O1_I2(r, r, ri);
122
case INDEX_op_setcond_i64:
123
case INDEX_op_negsetcond_i64:
124
- return C_O1_I2(r, r, rA);
125
+ return C_O1_I2(r, r, rJU);
126
127
case INDEX_op_clz_i64:
128
return C_O1_I2(r, r, rI);
129
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
130
case INDEX_op_brcond_i32:
131
return C_O0_I2(r, ri);
132
case INDEX_op_brcond_i64:
133
- return C_O0_I2(r, rA);
134
+ return C_O0_I2(r, rJU);
135
136
case INDEX_op_bswap16_i32:
137
case INDEX_op_bswap16_i64:
138
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
139
case INDEX_op_movcond_i32:
140
return C_O1_I4(r, r, ri, rI, r);
141
case INDEX_op_movcond_i64:
142
- return C_O1_I4(r, r, rA, rI, r);
143
+ return C_O1_I4(r, r, rJU, rI, r);
144
145
case INDEX_op_div2_i32:
146
case INDEX_op_div2_i64:
147
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
148
149
case INDEX_op_add2_i64:
150
case INDEX_op_sub2_i64:
151
- return C_N1_O1_I4(r, r, 0, 1, rA, r);
152
+ return C_N1_O1_I4(r, r, 0, 1, rJU, r);
153
154
case INDEX_op_st_vec:
155
return C_O0_I2(v, r);
156
--
27
--
157
2.34.1
28
2.43.0
158
159
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
1
Using cr0 means we could choose to use rc=1 to compute the condition.
1
Stores have no output operands, and so need no further work.
2
Adjust the tables and tcg_out_cmp that feeds them.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/ppc/tcg-target.c.inc | 68 ++++++++++++++++++++--------------------
6
tcg/optimize.c | 11 +++++------
8
1 file changed, 34 insertions(+), 34 deletions(-)
7
1 file changed, 5 insertions(+), 6 deletions(-)
9
8
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
11
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.c.inc
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ enum {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
15
};
16
17
static const uint32_t tcg_to_bc[] = {
18
- [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
19
- [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
20
- [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
21
- [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
22
- [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
23
- [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
24
- [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
25
- [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
26
- [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
27
- [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
28
+ [TCG_COND_EQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
29
+ [TCG_COND_NE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
30
+ [TCG_COND_LT] = BC | BI(0, CR_LT) | BO_COND_TRUE,
31
+ [TCG_COND_GE] = BC | BI(0, CR_LT) | BO_COND_FALSE,
32
+ [TCG_COND_LE] = BC | BI(0, CR_GT) | BO_COND_FALSE,
33
+ [TCG_COND_GT] = BC | BI(0, CR_GT) | BO_COND_TRUE,
34
+ [TCG_COND_LTU] = BC | BI(0, CR_LT) | BO_COND_TRUE,
35
+ [TCG_COND_GEU] = BC | BI(0, CR_LT) | BO_COND_FALSE,
36
+ [TCG_COND_LEU] = BC | BI(0, CR_GT) | BO_COND_FALSE,
37
+ [TCG_COND_GTU] = BC | BI(0, CR_GT) | BO_COND_TRUE,
38
};
39
40
/* The low bit here is set if the RA and RB fields must be inverted. */
41
static const uint32_t tcg_to_isel[] = {
42
- [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
43
- [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
44
- [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
45
- [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
46
- [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
47
- [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
48
- [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
49
- [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
50
- [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
51
- [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
52
+ [TCG_COND_EQ] = ISEL | BC_(0, CR_EQ),
53
+ [TCG_COND_NE] = ISEL | BC_(0, CR_EQ) | 1,
54
+ [TCG_COND_LT] = ISEL | BC_(0, CR_LT),
55
+ [TCG_COND_GE] = ISEL | BC_(0, CR_LT) | 1,
56
+ [TCG_COND_LE] = ISEL | BC_(0, CR_GT) | 1,
57
+ [TCG_COND_GT] = ISEL | BC_(0, CR_GT),
58
+ [TCG_COND_LTU] = ISEL | BC_(0, CR_LT),
59
+ [TCG_COND_GEU] = ISEL | BC_(0, CR_LT) | 1,
60
+ [TCG_COND_LEU] = ISEL | BC_(0, CR_GT) | 1,
61
+ [TCG_COND_GTU] = ISEL | BC_(0, CR_GT),
62
};
63
64
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
66
if (have_isa_3_10) {
67
tcg_insn_unit bi, opc;
68
69
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
70
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
71
72
/* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
73
bi = tcg_to_bc[cond] & (0x1f << 16);
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
75
if (have_isel) {
76
int isel, tab;
77
78
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
79
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
80
81
isel = tcg_to_isel[cond];
82
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond,
84
TCGArg arg1, TCGArg arg2, int const_arg2,
85
TCGLabel *l, TCGType type)
86
{
14
{
87
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
15
/* Opcodes that touch guest memory stop the mb optimization. */
88
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
16
ctx->prev_mb = NULL;
89
tcg_out_bc_lab(s, cond, l);
17
- return false;
18
+ return true;
90
}
19
}
91
20
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
93
return;
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
94
}
28
}
95
29
96
- tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
30
switch (op->opc) {
97
+ tcg_out_cmp(s, cond, c1, c2, const_c2, 0, type);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
98
32
g_assert_not_reached();
99
if (have_isel) {
33
}
100
int isel = tcg_to_isel[cond];
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
101
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
35
- return false;
102
if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
36
+ return true;
103
tcg_out32(s, opc | RA(a0) | RS(a1));
104
} else {
105
- tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
106
+ tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 0, type);
107
/* Note that the only other valid constant for a2 is 0. */
108
if (have_isel) {
109
tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
111
do_equality:
112
tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
113
tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
114
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
115
+ tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
116
break;
117
118
case TCG_COND_LT:
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
120
121
tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
122
tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
123
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
124
- tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
125
+ tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
126
+ tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ));
127
break;
128
129
default:
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
131
const int *const_args)
132
{
133
tcg_out_cmp2(s, args + 1, const_args + 1);
134
- tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
135
- tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
136
+ tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
137
+ tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
138
}
37
}
139
38
140
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
141
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
142
tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
41
TCGType type;
143
0, 6, TCG_TYPE_I32);
42
144
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
145
- /* Combine comparisons into cr7. */
44
- fold_tcg_st(ctx, op);
146
- tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
45
- return false;
147
+ /* Combine comparisons into cr0. */
46
+ return fold_tcg_st(ctx, op);
148
+ tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
47
}
149
} else {
48
150
- /* Full comparison into cr7. */
49
src = arg_temp(op->args[0]);
151
+ /* Full comparison into cr0. */
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
152
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
51
last = ofs + tcg_type_size(type) - 1;
153
- 0, 7, addr_type);
52
remove_mem_copy_in(ctx, ofs, last);
154
+ 0, 0, addr_type);
53
record_mem_copy(ctx, type, src, ofs, last);
155
}
54
- return false;
156
55
+ return true;
157
/* Load a pointer into the current opcode w/conditional branch-link. */
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
158
--
59
--
159
2.34.1
60
2.43.0
160
161
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Fold constant comparisons.
1
Change return from bool to int; distinguish between
2
Canonicalize "tst x,x" to equality vs zero.
2
complete folding, simplification, and no change.
3
Canonicalize "tst x,sign" to sign test vs zero.
4
Fold double-word comparisons with zero parts.
5
Fold setcond of "tst x,pow2" to a bit extract.
6
3
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
tcg/optimize.c | 240 ++++++++++++++++++++++++++++++++++++++++++++-----
7
tcg/optimize.c | 22 ++++++++++++++--------
11
1 file changed, 218 insertions(+), 22 deletions(-)
8
1 file changed, 14 insertions(+), 8 deletions(-)
12
9
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
18
return x <= y;
15
return finish_folding(ctx, op);
19
case TCG_COND_GTU:
16
}
20
return x > y;
17
21
- default:
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
22
- g_assert_not_reached();
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
23
+ case TCG_COND_TSTEQ:
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
24
+ return (x & y) == 0;
21
{
25
+ case TCG_COND_TSTNE:
22
uint64_t a_zmask, b_val;
26
+ return (x & y) != 0;
23
TCGCond cond;
27
+ case TCG_COND_ALWAYS:
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
28
+ case TCG_COND_NEVER:
25
op->opc = xor_opc;
29
+ break;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
30
}
31
}
31
+ g_assert_not_reached();
32
-
33
- return false;
34
+ return 0;
32
}
35
}
33
36
34
static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
35
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
36
return x <= y;
37
case TCG_COND_GTU:
38
return x > y;
39
- default:
40
- g_assert_not_reached();
41
+ case TCG_COND_TSTEQ:
42
+ return (x & y) == 0;
43
+ case TCG_COND_TSTNE:
44
+ return (x & y) != 0;
45
+ case TCG_COND_ALWAYS:
46
+ case TCG_COND_NEVER:
47
+ break;
48
}
49
+ g_assert_not_reached();
50
}
51
52
-static bool do_constant_folding_cond_eq(TCGCond c)
53
+static int do_constant_folding_cond_eq(TCGCond c)
54
{
55
switch (c) {
56
case TCG_COND_GT:
57
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
58
case TCG_COND_LEU:
59
case TCG_COND_EQ:
60
return 1;
61
- default:
62
- g_assert_not_reached();
63
+ case TCG_COND_TSTEQ:
64
+ case TCG_COND_TSTNE:
65
+ return -1;
66
+ case TCG_COND_ALWAYS:
67
+ case TCG_COND_NEVER:
68
+ break;
69
}
70
+ g_assert_not_reached();
71
}
72
73
/*
74
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
75
} else if (arg_is_const_val(y, 0)) {
76
switch (c) {
77
case TCG_COND_LTU:
78
+ case TCG_COND_TSTNE:
79
return 0;
80
case TCG_COND_GEU:
81
+ case TCG_COND_TSTEQ:
82
return 1;
83
default:
84
return -1;
85
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
86
}
87
88
r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
89
- return r;
90
+ if (r >= 0) {
91
+ return r;
92
+ }
93
+ if (!is_tst_cond(cond)) {
94
+ return -1;
95
+ }
96
+
97
+ /*
98
+ * TSTNE x,x -> NE x,0
99
+ * TSTNE x,-1 -> NE x,0
100
+ */
101
+ if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
102
+ *p2 = arg_new_constant(ctx, 0);
103
+ *pcond = tcg_tst_eqne_cond(cond);
104
+ return -1;
105
+ }
106
+
107
+ /* TSTNE x,sign -> LT x,0 */
108
+ if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
109
+ ? INT32_MIN : INT64_MIN))) {
110
+ *p2 = arg_new_constant(ctx, 0);
111
+ *pcond = tcg_tst_ltge_cond(cond);
112
+ }
113
+ return -1;
114
}
115
116
static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
117
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
118
TCGArg al, ah, bl, bh;
119
TCGCond c;
120
bool swap;
121
+ int r;
122
123
swap = swap_commutative2(args, args + 2);
124
c = args[4];
125
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
126
tcg_target_ulong alv = arg_info(al)->val;
127
tcg_target_ulong ahv = arg_info(ah)->val;
128
uint64_t a = deposit64(alv, 32, 32, ahv);
129
- return do_constant_folding_cond_64(a, b, c);
130
+
131
+ r = do_constant_folding_cond_64(a, b, c);
132
+ if (r >= 0) {
133
+ return r;
134
+ }
135
}
136
+
137
if (b == 0) {
138
switch (c) {
139
case TCG_COND_LTU:
140
+ case TCG_COND_TSTNE:
141
return 0;
142
case TCG_COND_GEU:
143
+ case TCG_COND_TSTEQ:
144
return 1;
145
default:
146
break;
147
}
148
}
149
+
150
+ /* TSTNE x,-1 -> NE x,0 */
151
+ if (b == -1 && is_tst_cond(c)) {
152
+ args[3] = args[2] = arg_new_constant(ctx, 0);
153
+ args[4] = tcg_tst_eqne_cond(c);
154
+ return -1;
155
+ }
156
+
157
+ /* TSTNE x,sign -> LT x,0 */
158
+ if (b == INT64_MIN && is_tst_cond(c)) {
159
+ /* bl must be 0, so copy that to bh */
160
+ args[3] = bl;
161
+ args[4] = tcg_tst_ltge_cond(c);
162
+ return -1;
163
+ }
164
}
165
+
166
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
167
- return do_constant_folding_cond_eq(c);
168
+ r = do_constant_folding_cond_eq(c);
169
+ if (r >= 0) {
170
+ return r;
171
+ }
172
+
173
+ /* TSTNE x,x -> NE x,0 */
174
+ if (is_tst_cond(c)) {
175
+ args[3] = args[2] = arg_new_constant(ctx, 0);
176
+ args[4] = tcg_tst_eqne_cond(c);
177
+ return -1;
178
+ }
179
}
180
return -1;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
183
case 0:
184
goto do_brcond_const;
185
case 1:
186
- op->opc = INDEX_op_brcond_i32;
187
- op->args[1] = op->args[2];
188
- op->args[2] = cond;
189
- op->args[3] = label;
190
- break;
191
+ goto do_brcond_low;
192
+ }
193
+ break;
194
+
195
+ case TCG_COND_TSTEQ:
196
+ case TCG_COND_TSTNE:
197
+ if (arg_is_const_val(op->args[2], 0)) {
198
+ goto do_brcond_high;
199
+ }
200
+ if (arg_is_const_val(op->args[3], 0)) {
201
+ goto do_brcond_low;
202
}
203
break;
204
205
default:
206
break;
207
208
+ do_brcond_low:
209
+ op->opc = INDEX_op_brcond_i32;
210
+ op->args[1] = op->args[2];
211
+ op->args[2] = cond;
212
+ op->args[3] = label;
213
+ return fold_brcond(ctx, op);
214
+
215
do_brcond_high:
216
op->opc = INDEX_op_brcond_i32;
217
op->args[0] = op->args[1];
218
op->args[1] = op->args[3];
219
op->args[2] = cond;
220
op->args[3] = label;
221
- break;
222
+ return fold_brcond(ctx, op);
223
224
do_brcond_const:
225
if (i == 0) {
226
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
227
return false;
228
}
229
230
+static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
231
+{
232
+ TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc, uext_opc, sext_opc;
233
+ TCGCond cond = op->args[3];
234
+ TCGArg ret, src1, src2;
235
+ TCGOp *op2;
236
+ uint64_t val;
237
+ int sh;
238
+ bool inv;
239
+
240
+ if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
241
+ return;
242
+ }
243
+
244
+ src2 = op->args[2];
245
+ val = arg_info(src2)->val;
246
+ if (!is_power_of_2(val)) {
247
+ return;
248
+ }
249
+ sh = ctz64(val);
250
+
251
+ switch (ctx->type) {
252
+ case TCG_TYPE_I32:
253
+ and_opc = INDEX_op_and_i32;
254
+ sub_opc = INDEX_op_sub_i32;
255
+ xor_opc = INDEX_op_xor_i32;
256
+ shr_opc = INDEX_op_shr_i32;
257
+ neg_opc = INDEX_op_neg_i32;
258
+ if (TCG_TARGET_extract_i32_valid(sh, 1)) {
259
+ uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
260
+ sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
261
+ }
262
+ break;
263
+ case TCG_TYPE_I64:
264
+ and_opc = INDEX_op_and_i64;
265
+ sub_opc = INDEX_op_sub_i64;
266
+ xor_opc = INDEX_op_xor_i64;
267
+ shr_opc = INDEX_op_shr_i64;
268
+ neg_opc = INDEX_op_neg_i64;
269
+ if (TCG_TARGET_extract_i64_valid(sh, 1)) {
270
+ uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
271
+ sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
272
+ }
273
+ break;
274
+ default:
275
+ g_assert_not_reached();
276
+ }
277
+
278
+ ret = op->args[0];
279
+ src1 = op->args[1];
280
+ inv = cond == TCG_COND_TSTEQ;
281
+
282
+ if (sh && sext_opc && neg && !inv) {
283
+ op->opc = sext_opc;
284
+ op->args[1] = src1;
285
+ op->args[2] = sh;
286
+ op->args[3] = 1;
287
+ return;
288
+ } else if (sh && uext_opc) {
289
+ op->opc = uext_opc;
290
+ op->args[1] = src1;
291
+ op->args[2] = sh;
292
+ op->args[3] = 1;
293
+ } else {
294
+ if (sh) {
295
+ op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
296
+ op2->args[0] = ret;
297
+ op2->args[1] = src1;
298
+ op2->args[2] = arg_new_constant(ctx, sh);
299
+ src1 = ret;
300
+ }
301
+ op->opc = and_opc;
302
+ op->args[1] = src1;
303
+ op->args[2] = arg_new_constant(ctx, 1);
304
+ }
305
+
306
+ if (neg && inv) {
307
+ op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
308
+ op2->args[0] = ret;
309
+ op2->args[1] = ret;
310
+ op2->args[2] = arg_new_constant(ctx, 1);
311
+ } else if (inv) {
312
+ op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
313
+ op2->args[0] = ret;
314
+ op2->args[1] = ret;
315
+ op2->args[2] = arg_new_constant(ctx, 1);
316
+ } else if (neg) {
317
+ op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
318
+ op2->args[0] = ret;
319
+ op2->args[1] = ret;
320
+ }
321
+}
322
+
323
static bool fold_setcond(OptContext *ctx, TCGOp *op)
324
{
325
int i = do_constant_folding_cond1(ctx, op->args[0], &op->args[1],
326
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
327
if (i >= 0) {
328
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
329
}
40
}
330
+ fold_setcond_tst_pow2(ctx, op, false);
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
331
51
332
ctx->z_mask = 1;
52
ctx->z_mask = 1;
333
ctx->s_mask = smask_from_zmask(1);
53
return false;
334
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
335
if (i >= 0) {
336
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
337
}
56
}
338
+ fold_setcond_tst_pow2(ctx, op, true);
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
339
67
340
/* Value is {0,-1} so all bits are repetitions of the sign. */
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
341
ctx->s_mask = -1;
69
ctx->s_mask = -1;
342
return false;
343
}
344
345
-
346
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
347
{
348
TCGCond cond;
349
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
350
case 0:
351
goto do_setcond_const;
352
case 1:
353
- op->args[2] = op->args[3];
354
- op->args[3] = cond;
355
- op->opc = INDEX_op_setcond_i32;
356
- break;
357
+ goto do_setcond_low;
358
+ }
359
+ break;
360
+
361
+ case TCG_COND_TSTEQ:
362
+ case TCG_COND_TSTNE:
363
+ if (arg_is_const_val(op->args[2], 0)) {
364
+ goto do_setcond_high;
365
+ }
366
+ if (arg_is_const_val(op->args[4], 0)) {
367
+ goto do_setcond_low;
368
}
369
break;
370
371
default:
372
break;
373
374
+ do_setcond_low:
375
+ op->args[2] = op->args[3];
376
+ op->args[3] = cond;
377
+ op->opc = INDEX_op_setcond_i32;
378
+ return fold_setcond(ctx, op);
379
+
380
do_setcond_high:
381
op->args[1] = op->args[2];
382
op->args[2] = op->args[4];
383
op->args[3] = cond;
384
op->opc = INDEX_op_setcond_i32;
385
- break;
386
+ return fold_setcond(ctx, op);
387
}
388
389
ctx->z_mask = 1;
390
--
70
--
391
2.34.1
71
2.43.0
392
393
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Better constraint for tcg_out_cmp, based on the comparison.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/s390x/tcg-target-con-set.h | 6 +--
6
tcg/optimize.c | 24 +++++++++---------------
7
tcg/s390x/tcg-target-con-str.h | 1 +
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
tcg/s390x/tcg-target.c.inc | 72 +++++++++++++++++++++++++---------
9
3 files changed, 58 insertions(+), 21 deletions(-)
10
8
11
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/s390x/tcg-target-con-set.h
11
--- a/tcg/optimize.c
14
+++ b/tcg/s390x/tcg-target-con-set.h
12
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
16
C_O0_I1(r)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
17
C_O0_I2(r, r)
15
{
18
C_O0_I2(r, ri)
16
uint64_t z_mask, s_mask, s_mask_old;
19
-C_O0_I2(r, rJU)
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
+C_O0_I2(r, rC)
18
int pos = op->args[2];
21
C_O0_I2(v, r)
19
int len = op->args[3];
22
C_O0_I3(o, m, r)
20
23
C_O1_I1(r, r)
21
- if (arg_is_const(op->args[1])) {
24
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rI)
22
- uint64_t t;
25
C_O1_I2(r, 0, rJ)
23
-
26
C_O1_I2(r, r, r)
24
- t = arg_info(op->args[1])->val;
27
C_O1_I2(r, r, ri)
25
- t = sextract64(t, pos, len);
28
-C_O1_I2(r, r, rJU)
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
29
+C_O1_I2(r, r, rC)
27
+ if (ti_is_const(t1)) {
30
C_O1_I2(r, r, rI)
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
31
C_O1_I2(r, r, rJ)
29
+ sextract64(ti_const_val(t1), pos, len));
32
C_O1_I2(r, r, rK)
33
@@ -XXX,XX +XXX,XX @@ C_O1_I2(v, v, r)
34
C_O1_I2(v, v, v)
35
C_O1_I3(v, v, v, v)
36
C_O1_I4(r, r, ri, rI, r)
37
-C_O1_I4(r, r, rJU, rI, r)
38
+C_O1_I4(r, r, rC, rI, r)
39
C_O2_I1(o, m, r)
40
C_O2_I2(o, m, 0, r)
41
C_O2_I2(o, m, r, r)
42
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/s390x/tcg-target-con-str.h
45
+++ b/tcg/s390x/tcg-target-con-str.h
46
@@ -XXX,XX +XXX,XX @@ REGS('o', 0xaaaa) /* odd numbered general regs */
47
* Define constraint letters for constants:
48
* CONST(letter, TCG_CT_CONST_* bit set)
49
*/
50
+CONST('C', TCG_CT_CONST_CMP)
51
CONST('I', TCG_CT_CONST_S16)
52
CONST('J', TCG_CT_CONST_S32)
53
CONST('K', TCG_CT_CONST_P32)
54
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/s390x/tcg-target.c.inc
57
+++ b/tcg/s390x/tcg-target.c.inc
58
@@ -XXX,XX +XXX,XX @@
59
#define TCG_CT_CONST_P32 (1 << 12)
60
#define TCG_CT_CONST_INV (1 << 13)
61
#define TCG_CT_CONST_INVRISBG (1 << 14)
62
+#define TCG_CT_CONST_CMP (1 << 15)
63
64
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
65
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
66
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
67
val = (int32_t)val;
68
}
30
}
69
31
70
+ if (ct & TCG_CT_CONST_CMP) {
32
- z_mask = arg_info(op->args[1])->z_mask;
71
+ switch (cond) {
33
- z_mask = sextract64(z_mask, pos, len);
72
+ case TCG_COND_EQ:
34
- ctx->z_mask = z_mask;
73
+ case TCG_COND_NE:
35
-
74
+ ct |= TCG_CT_CONST_S32 | TCG_CT_CONST_U32; /* CGFI or CLGFI */
36
- s_mask_old = arg_info(op->args[1])->s_mask;
75
+ break;
37
- s_mask = sextract64(s_mask_old, pos, len);
76
+ case TCG_COND_LT:
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
77
+ case TCG_COND_GE:
39
- ctx->s_mask = s_mask;
78
+ case TCG_COND_LE:
40
+ s_mask_old = t1->s_mask;
79
+ case TCG_COND_GT:
41
+ s_mask = s_mask_old >> pos;
80
+ ct |= TCG_CT_CONST_S32; /* CGFI */
42
+ s_mask |= -1ull << (len - 1);
81
+ break;
43
82
+ case TCG_COND_LTU:
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
83
+ case TCG_COND_GEU:
84
+ case TCG_COND_LEU:
85
+ case TCG_COND_GTU:
86
+ ct |= TCG_CT_CONST_U32; /* CLGFI */
87
+ break;
88
+ default:
89
+ g_assert_not_reached();
90
+ }
91
+ }
92
+
93
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
94
return true;
45
return true;
95
}
46
}
96
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
47
97
goto exit;
48
- return fold_masks(ctx, op);
98
}
49
+ z_mask = sextract64(t1->z_mask, pos, len);
99
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
100
- /*
51
}
101
- * Constraints are for a signed 33-bit operand, which is a
52
102
- * convenient superset of this signed/unsigned test.
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
103
- */
104
- if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
105
- op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
106
- tcg_out_insn_RIL(s, op, r1, c2);
107
- goto exit;
108
+ /* Should match TCG_CT_CONST_CMP. */
109
+ switch (c) {
110
+ case TCG_COND_LT:
111
+ case TCG_COND_GE:
112
+ case TCG_COND_LE:
113
+ case TCG_COND_GT:
114
+ tcg_debug_assert(c2 == (int32_t)c2);
115
+ op = RIL_CGFI;
116
+ break;
117
+ case TCG_COND_EQ:
118
+ case TCG_COND_NE:
119
+ if (c2 == (int32_t)c2) {
120
+ op = RIL_CGFI;
121
+ break;
122
+ }
123
+ /* fall through */
124
+ case TCG_COND_LTU:
125
+ case TCG_COND_GEU:
126
+ case TCG_COND_LEU:
127
+ case TCG_COND_GTU:
128
+ tcg_debug_assert(c2 == (uint32_t)c2);
129
+ op = RIL_CLGFI;
130
+ break;
131
+ default:
132
+ g_assert_not_reached();
133
}
134
-
135
- /* Load everything else into a register. */
136
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
137
- c2 = TCG_TMP0;
138
- }
139
-
140
- if (type == TCG_TYPE_I32) {
141
+ tcg_out_insn_RIL(s, op, r1, c2);
142
+ } else if (type == TCG_TYPE_I32) {
143
op = (is_unsigned ? RR_CLR : RR_CR);
144
tcg_out_insn_RR(s, op, r1, c2);
145
} else {
146
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
147
return C_O1_I2(r, r, ri);
148
case INDEX_op_setcond_i64:
149
case INDEX_op_negsetcond_i64:
150
- return C_O1_I2(r, r, rJU);
151
+ return C_O1_I2(r, r, rC);
152
153
case INDEX_op_clz_i64:
154
return C_O1_I2(r, r, rI);
155
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
156
case INDEX_op_brcond_i32:
157
return C_O0_I2(r, ri);
158
case INDEX_op_brcond_i64:
159
- return C_O0_I2(r, rJU);
160
+ return C_O0_I2(r, rC);
161
162
case INDEX_op_bswap16_i32:
163
case INDEX_op_bswap16_i64:
164
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
165
case INDEX_op_movcond_i32:
166
return C_O1_I4(r, r, ri, rI, r);
167
case INDEX_op_movcond_i64:
168
- return C_O1_I4(r, r, rJU, rI, r);
169
+ return C_O1_I4(r, r, rC, rI, r);
170
171
case INDEX_op_div2_i32:
172
case INDEX_op_div2_i64:
173
--
54
--
174
2.34.1
55
2.43.0
175
176
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
target/m68k/translate.c | 74 ++++++++++++++++++-----------------------
6
tcg/optimize.c | 27 ++++++++++++++-------------
5
1 file changed, 33 insertions(+), 41 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
6
8
7
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/target/m68k/translate.c
11
--- a/tcg/optimize.c
10
+++ b/target/m68k/translate.c
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ undef:
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
12
static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
13
{
15
{
14
TCGv fpsr;
16
uint64_t s_mask, z_mask, sign;
15
+ int imm = 0;
17
+ TempOptInfo *t1, *t2;
16
18
17
- c->v2 = tcg_constant_i32(0);
19
if (fold_const2(ctx, op) ||
18
/* TODO: Raise BSUN exception. */
20
fold_ix_to_i(ctx, op, 0) ||
19
fpsr = tcg_temp_new();
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
20
gen_load_fcr(s, fpsr, M68K_FPSR);
22
return true;
21
+ c->v1 = fpsr;
23
}
22
+
24
23
switch (cond) {
25
- s_mask = arg_info(op->args[1])->s_mask;
24
case 0: /* False */
26
- z_mask = arg_info(op->args[1])->z_mask;
25
case 16: /* Signaling False */
27
+ t1 = arg_info(op->args[1]);
26
- c->v1 = c->v2;
28
+ t2 = arg_info(op->args[2]);
27
c->tcond = TCG_COND_NEVER;
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
28
break;
66
break;
29
case 1: /* EQual Z */
67
default:
30
case 17: /* Signaling EQual Z */
31
- c->v1 = tcg_temp_new();
32
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
33
- c->tcond = TCG_COND_NE;
34
+ imm = FPSR_CC_Z;
35
+ c->tcond = TCG_COND_TSTNE;
36
break;
37
case 2: /* Ordered Greater Than !(A || Z || N) */
38
case 18: /* Greater Than !(A || Z || N) */
39
- c->v1 = tcg_temp_new();
40
- tcg_gen_andi_i32(c->v1, fpsr,
41
- FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
42
- c->tcond = TCG_COND_EQ;
43
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
44
+ c->tcond = TCG_COND_TSTEQ;
45
break;
46
case 3: /* Ordered Greater than or Equal Z || !(A || N) */
47
case 19: /* Greater than or Equal Z || !(A || N) */
48
c->v1 = tcg_temp_new();
49
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
50
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
51
- tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
52
tcg_gen_or_i32(c->v1, c->v1, fpsr);
53
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
54
- c->tcond = TCG_COND_NE;
55
+ imm = FPSR_CC_Z | FPSR_CC_N;
56
+ c->tcond = TCG_COND_TSTNE;
57
break;
58
case 4: /* Ordered Less Than !(!N || A || Z); */
59
case 20: /* Less Than !(!N || A || Z); */
60
c->v1 = tcg_temp_new();
61
tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
62
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
63
- c->tcond = TCG_COND_EQ;
64
+ imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
65
+ c->tcond = TCG_COND_TSTEQ;
66
break;
67
case 5: /* Ordered Less than or Equal Z || (N && !A) */
68
case 21: /* Less than or Equal Z || (N && !A) */
69
@@ -XXX,XX +XXX,XX @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
70
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
71
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
72
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
73
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
74
- c->tcond = TCG_COND_NE;
75
+ imm = FPSR_CC_Z | FPSR_CC_N;
76
+ c->tcond = TCG_COND_TSTNE;
77
break;
78
case 6: /* Ordered Greater or Less than !(A || Z) */
79
case 22: /* Greater or Less than !(A || Z) */
80
- c->v1 = tcg_temp_new();
81
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
82
- c->tcond = TCG_COND_EQ;
83
+ imm = FPSR_CC_A | FPSR_CC_Z;
84
+ c->tcond = TCG_COND_TSTEQ;
85
break;
86
case 7: /* Ordered !A */
87
case 23: /* Greater, Less or Equal !A */
88
- c->v1 = tcg_temp_new();
89
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
90
- c->tcond = TCG_COND_EQ;
91
+ imm = FPSR_CC_A;
92
+ c->tcond = TCG_COND_TSTEQ;
93
break;
94
case 8: /* Unordered A */
95
case 24: /* Not Greater, Less or Equal A */
96
- c->v1 = tcg_temp_new();
97
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
98
- c->tcond = TCG_COND_NE;
99
+ imm = FPSR_CC_A;
100
+ c->tcond = TCG_COND_TSTNE;
101
break;
102
case 9: /* Unordered or Equal A || Z */
103
case 25: /* Not Greater or Less then A || Z */
104
- c->v1 = tcg_temp_new();
105
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
106
- c->tcond = TCG_COND_NE;
107
+ imm = FPSR_CC_A | FPSR_CC_Z;
108
+ c->tcond = TCG_COND_TSTNE;
109
break;
110
case 10: /* Unordered or Greater Than A || !(N || Z)) */
111
case 26: /* Not Less or Equal A || !(N || Z)) */
112
c->v1 = tcg_temp_new();
113
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
114
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
115
- tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
116
tcg_gen_or_i32(c->v1, c->v1, fpsr);
117
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
118
- c->tcond = TCG_COND_NE;
119
+ imm = FPSR_CC_A | FPSR_CC_N;
120
+ c->tcond = TCG_COND_TSTNE;
121
break;
122
case 11: /* Unordered or Greater or Equal A || Z || !N */
123
case 27: /* Not Less Than A || Z || !N */
124
c->v1 = tcg_temp_new();
125
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
126
- tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
127
- c->tcond = TCG_COND_NE;
128
+ tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
129
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
130
+ c->tcond = TCG_COND_TSTNE;
131
break;
132
case 12: /* Unordered or Less Than A || (N && !Z) */
133
case 28: /* Not Greater than or Equal A || (N && !Z) */
134
@@ -XXX,XX +XXX,XX @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
135
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
136
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
137
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
138
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
139
- c->tcond = TCG_COND_NE;
140
+ imm = FPSR_CC_A | FPSR_CC_N;
141
+ c->tcond = TCG_COND_TSTNE;
142
break;
143
case 13: /* Unordered or Less or Equal A || Z || N */
144
case 29: /* Not Greater Than A || Z || N */
145
- c->v1 = tcg_temp_new();
146
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
147
- c->tcond = TCG_COND_NE;
148
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
149
+ c->tcond = TCG_COND_TSTNE;
150
break;
151
case 14: /* Not Equal !Z */
152
case 30: /* Signaling Not Equal !Z */
153
- c->v1 = tcg_temp_new();
154
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
155
- c->tcond = TCG_COND_EQ;
156
+ imm = FPSR_CC_Z;
157
+ c->tcond = TCG_COND_TSTEQ;
158
break;
159
case 15: /* True */
160
case 31: /* Signaling True */
161
- c->v1 = c->v2;
162
c->tcond = TCG_COND_ALWAYS;
163
break;
68
break;
164
}
69
}
165
+ c->v2 = tcg_constant_i32(imm);
70
71
- return false;
72
+ return finish_folding(ctx, op);
166
}
73
}
167
74
168
static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
169
--
76
--
170
2.34.1
77
2.43.0
171
172
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
2
3
3
In order to ease next commit review, modify tcg_out_brcond()
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
to switch over TCGCond. No logical change intended.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Message-Id: <20240119224737.48943-1-philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
tcg/aarch64/tcg-target.c.inc | 31 +++++++++++++++++++++++--------
7
tcg/optimize.c | 9 ++++++---
11
1 file changed, 23 insertions(+), 8 deletions(-)
8
1 file changed, 6 insertions(+), 3 deletions(-)
12
9
13
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/aarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
16
+++ b/tcg/aarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
18
TCGArg b, bool b_const, TCGLabel *l)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
19
{
23
{
20
intptr_t offset;
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
21
- bool need_cmp;
25
+ if (fold_const2(ctx, op) ||
22
+ bool need_cmp = true;
26
+ fold_xx_to_i(ctx, op, 0) ||
23
27
+ fold_xi_to_x(ctx, op, 0) ||
24
- if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
28
+ fold_sub_to_neg(ctx, op)) {
25
- need_cmp = false;
29
return true;
26
- } else {
27
- need_cmp = true;
28
+ switch (c) {
29
+ case TCG_COND_EQ:
30
+ case TCG_COND_NE:
31
+ if (b_const && b == 0) {
32
+ need_cmp = false;
33
+ }
34
+ break;
35
+ default:
36
+ break;
37
+ }
38
+
39
+ if (need_cmp) {
40
tcg_out_cmp(s, ext, c, a, b, b_const);
41
}
30
}
42
31
43
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
44
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
45
if (need_cmp) {
34
op->args[2] = arg_new_constant(ctx, -val);
46
tcg_out_insn(s, 3202, B_C, c, offset);
47
- } else if (c == TCG_COND_EQ) {
48
- tcg_out_insn(s, 3201, CBZ, ext, a, offset);
49
} else {
50
- tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
51
+ switch (c) {
52
+ case TCG_COND_EQ:
53
+ tcg_out_insn(s, 3201, CBZ, ext, a, offset);
54
+ break;
55
+ case TCG_COND_NE:
56
+ tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
57
+ break;
58
+ default:
59
+ g_assert_not_reached();
60
+ }
61
}
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
62
}
38
}
63
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
64
--
41
--
65
2.34.1
42
2.43.0
66
67
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/tci/tcg-target.h | 2 +-
6
tcg/optimize.c | 16 +++++++++-------
5
tcg/tci.c | 14 ++++++++++++++
7
1 file changed, 9 insertions(+), 7 deletions(-)
6
2 files changed, 15 insertions(+), 1 deletion(-)
7
8
8
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci/tcg-target.h
11
--- a/tcg/optimize.c
11
+++ b/tcg/tci/tcg-target.h
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
13
14
14
#define TCG_TARGET_HAS_qemu_ldst_i128 0
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
15
16
{
16
-#define TCG_TARGET_HAS_tst 0
17
+ uint64_t z_mask = -1, s_mask = 0;
17
+#define TCG_TARGET_HAS_tst 1
18
+
18
19
/* We can't do any folding with a load, but we can record bits. */
19
/* Number of registers available. */
20
switch (op->opc) {
20
#define TCG_TARGET_NB_REGS 16
21
CASE_OP_32_64(ld8s):
21
diff --git a/tcg/tci.c b/tcg/tci.c
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
22
index XXXXXXX..XXXXXXX 100644
23
+ s_mask = INT8_MIN;
23
--- a/tcg/tci.c
24
+++ b/tcg/tci.c
25
@@ -XXX,XX +XXX,XX @@ static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
26
case TCG_COND_GTU:
27
result = (u0 > u1);
28
break;
24
break;
29
+ case TCG_COND_TSTEQ:
25
CASE_OP_32_64(ld8u):
30
+ result = (u0 & u1) == 0;
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
31
+ break;
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
32
+ case TCG_COND_TSTNE:
28
break;
33
+ result = (u0 & u1) != 0;
29
CASE_OP_32_64(ld16s):
34
+ break;
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
35
default:
45
default:
36
g_assert_not_reached();
46
g_assert_not_reached();
37
}
47
}
38
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
48
- return false;
39
case TCG_COND_GTU:
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
40
result = (u0 > u1);
50
}
41
break;
51
42
+ case TCG_COND_TSTEQ:
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
43
+ result = (u0 & u1) == 0;
44
+ break;
45
+ case TCG_COND_TSTNE:
46
+ result = (u0 & u1) != 0;
47
+ break;
48
default:
49
g_assert_not_reached();
50
}
51
@@ -XXX,XX +XXX,XX @@ static const char *str_c(TCGCond c)
52
[TCG_COND_GEU] = "geu",
53
[TCG_COND_LEU] = "leu",
54
[TCG_COND_GTU] = "gtu",
55
+ [TCG_COND_TSTEQ] = "tsteq",
56
+ [TCG_COND_TSTNE] = "tstne",
57
};
58
59
assert((unsigned)c < ARRAY_SIZE(cond));
60
--
53
--
61
2.34.1
54
2.43.0
62
63
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Just like when testing against the sign bits, TEST r,r can be used when the
4
immediate is 0xff, 0xff00, 0xffff, 0xffffffff.
5
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
3
---
11
tcg/i386/tcg-target.c.inc | 17 +++++++++++++++++
4
tcg/optimize.c | 2 +-
12
1 file changed, 17 insertions(+)
5
1 file changed, 1 insertion(+), 1 deletion(-)
13
6
14
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/i386/tcg-target.c.inc
9
--- a/tcg/optimize.c
17
+++ b/tcg/i386/tcg-target.c.inc
10
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
19
tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1);
12
TCGType type;
20
return js;
13
21
}
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
22
+ if (arg2 == 0xff) {
15
- return false;
23
+ tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1);
16
+ return finish_folding(ctx, op);
24
+ return jz;
25
+ }
26
tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, arg1);
27
tcg_out8(s, arg2);
28
return jz;
29
@@ -XXX,XX +XXX,XX @@ static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
30
tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4);
31
return js;
32
}
33
+ if (arg2 == 0xff00) {
34
+ tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4);
35
+ return jz;
36
+ }
37
tcg_out_modrm(s, OPC_GRP3_Eb, EXT3_TESTi, arg1 + 4);
38
tcg_out8(s, arg2 >> 8);
39
return jz;
40
}
17
}
41
18
42
+ if (arg2 == 0xffff) {
19
type = ctx->type;
43
+ tcg_out_modrm(s, OPC_TESTL | P_DATA16, arg1, arg1);
44
+ return jz;
45
+ }
46
+ if (arg2 == 0xffffffffu) {
47
+ tcg_out_modrm(s, OPC_TESTL, arg1, arg1);
48
+ return jz;
49
+ }
50
+
51
if (is_power_of_2(rexw ? arg2 : (uint32_t)arg2)) {
52
int jc = (cond == TCG_COND_TSTNE ? JCC_JB : JCC_JAE);
53
int sh = ctz64(arg2);
54
--
20
--
55
2.34.1
21
2.43.0
56
57
diff view generated by jsdifflib
1
Mirror the new do_constant_folding_cond1 by doing all
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
argument and condition adjustment within one helper.
2
Remove fold_masks as the function becomes unused.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/optimize.c | 107 ++++++++++++++++++++++++++-----------------------
7
tcg/optimize.c | 18 ++++++++----------
8
1 file changed, 57 insertions(+), 50 deletions(-)
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
15
return -1;
15
return fold_masks_zs(ctx, op, -1, s_mask);
16
}
16
}
17
17
18
-/*
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
- * Return -1 if the condition can't be simplified,
20
- * and the result of the condition (0 or 1) if it can.
21
- */
22
-static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
23
-{
19
-{
24
- TCGArg al = p1[0], ah = p1[1];
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
25
- TCGArg bl = p2[0], bh = p2[1];
26
-
27
- if (arg_is_const(bl) && arg_is_const(bh)) {
28
- tcg_target_ulong blv = arg_info(bl)->val;
29
- tcg_target_ulong bhv = arg_info(bh)->val;
30
- uint64_t b = deposit64(blv, 32, 32, bhv);
31
-
32
- if (arg_is_const(al) && arg_is_const(ah)) {
33
- tcg_target_ulong alv = arg_info(al)->val;
34
- tcg_target_ulong ahv = arg_info(ah)->val;
35
- uint64_t a = deposit64(alv, 32, 32, ahv);
36
- return do_constant_folding_cond_64(a, b, c);
37
- }
38
- if (b == 0) {
39
- switch (c) {
40
- case TCG_COND_LTU:
41
- return 0;
42
- case TCG_COND_GEU:
43
- return 1;
44
- default:
45
- break;
46
- }
47
- }
48
- }
49
- if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
50
- return do_constant_folding_cond_eq(c);
51
- }
52
- return -1;
53
-}
21
-}
54
-
22
-
55
/**
23
/*
56
* swap_commutative:
24
* An "affected" mask bit is 0 if and only if the result is identical
57
* @dest: TCGArg of the destination argument, or NO_DEST.
25
* to the first input. Thus if the entire mask is 0, the operation
58
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
59
return false;
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
60
}
50
}
61
51
62
+/*
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
63
+ * Return -1 if the condition can't be simplified,
64
+ * and the result of the condition (0 or 1) if it can.
65
+ */
66
static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
67
TCGArg *p1, TCGArg *p2, TCGArg *pcond)
68
{
69
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond1(OptContext *ctx, TCGArg dest,
70
return r;
71
}
72
73
+static int do_constant_folding_cond2(OptContext *ctx, TCGArg *args)
74
+{
75
+ TCGArg al, ah, bl, bh;
76
+ TCGCond c;
77
+ bool swap;
78
+
79
+ swap = swap_commutative2(args, args + 2);
80
+ c = args[4];
81
+ if (swap) {
82
+ args[4] = c = tcg_swap_cond(c);
83
+ }
84
+
85
+ al = args[0];
86
+ ah = args[1];
87
+ bl = args[2];
88
+ bh = args[3];
89
+
90
+ if (arg_is_const(bl) && arg_is_const(bh)) {
91
+ tcg_target_ulong blv = arg_info(bl)->val;
92
+ tcg_target_ulong bhv = arg_info(bh)->val;
93
+ uint64_t b = deposit64(blv, 32, 32, bhv);
94
+
95
+ if (arg_is_const(al) && arg_is_const(ah)) {
96
+ tcg_target_ulong alv = arg_info(al)->val;
97
+ tcg_target_ulong ahv = arg_info(ah)->val;
98
+ uint64_t a = deposit64(alv, 32, 32, ahv);
99
+ return do_constant_folding_cond_64(a, b, c);
100
+ }
101
+ if (b == 0) {
102
+ switch (c) {
103
+ case TCG_COND_LTU:
104
+ return 0;
105
+ case TCG_COND_GEU:
106
+ return 1;
107
+ default:
108
+ break;
109
+ }
110
+ }
111
+ }
112
+ if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
113
+ return do_constant_folding_cond_eq(c);
114
+ }
115
+ return -1;
116
+}
117
+
118
static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
119
{
120
for (int i = 0; i < nb_args; i++) {
121
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
122
123
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
124
{
125
- TCGCond cond = op->args[4];
126
- TCGArg label = op->args[5];
127
+ TCGCond cond;
128
+ TCGArg label;
129
int i, inv = 0;
130
131
- if (swap_commutative2(&op->args[0], &op->args[2])) {
132
- op->args[4] = cond = tcg_swap_cond(cond);
133
- }
134
-
135
- i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
136
+ i = do_constant_folding_cond2(ctx, &op->args[0]);
137
+ cond = op->args[4];
138
+ label = op->args[5];
139
if (i >= 0) {
140
goto do_brcond_const;
141
}
142
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
143
144
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
145
{
146
- TCGCond cond = op->args[5];
147
+ TCGCond cond;
148
int i, inv = 0;
149
150
- if (swap_commutative2(&op->args[1], &op->args[3])) {
151
- op->args[5] = cond = tcg_swap_cond(cond);
152
- }
153
-
154
- i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
155
+ i = do_constant_folding_cond2(ctx, &op->args[1]);
156
+ cond = op->args[5];
157
if (i >= 0) {
158
goto do_setcond_const;
159
}
160
--
53
--
161
2.34.1
54
2.43.0
162
163
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Message-Id: <20231028194522.245170-12-richard.henderson@linaro.org>
4
[PMD: Split from bigger patch, part 2/2]
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-Id: <20231108145244.72421-2-philmd@linaro.org>
7
---
3
---
8
tcg/arm/tcg-target.h | 2 +-
4
tcg/optimize.c | 2 +-
9
tcg/arm/tcg-target.c.inc | 29 ++++++++++++++++++++++++++++-
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
2 files changed, 29 insertions(+), 2 deletions(-)
11
6
12
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/arm/tcg-target.h
9
--- a/tcg/optimize.c
15
+++ b/tcg/arm/tcg-target.h
10
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
17
12
return fold_orc(ctx, op);
18
#define TCG_TARGET_HAS_qemu_ldst_i128 0
13
}
19
14
}
20
-#define TCG_TARGET_HAS_tst 0
15
- return false;
21
+#define TCG_TARGET_HAS_tst 1
16
+ return finish_folding(ctx, op);
22
23
#define TCG_TARGET_HAS_v64 use_neon_instructions
24
#define TCG_TARGET_HAS_v128 use_neon_instructions
25
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/arm/tcg-target.c.inc
28
+++ b/tcg/arm/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
30
static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
31
TCGArg b, int b_const)
32
{
33
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
34
+ if (!is_tst_cond(cond)) {
35
+ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
36
+ return cond;
37
+ }
38
+
39
+ cond = tcg_tst_eqne_cond(cond);
40
+ if (b_const) {
41
+ int imm12 = encode_imm(b);
42
+
43
+ /*
44
+ * The compare constraints allow rIN, but TST does not support N.
45
+ * Be prepared to load the constant into a scratch register.
46
+ */
47
+ if (imm12 >= 0) {
48
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
49
+ return cond;
50
+ }
51
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
52
+ b = TCG_REG_TMP;
53
+ }
54
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
55
return cond;
56
}
17
}
57
18
58
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
19
/* Propagate constants and copies, fold constant expressions. */
59
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
60
return cond;
61
62
+ case TCG_COND_TSTEQ:
63
+ case TCG_COND_TSTNE:
64
+ /* Similar, but with TST instead of CMP. */
65
+ tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
66
+ tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
67
+ return tcg_tst_eqne_cond(cond);
68
+
69
case TCG_COND_LT:
70
case TCG_COND_GE:
71
/* We perform a double-word subtraction and examine the result.
72
--
20
--
73
2.34.1
21
2.43.0
74
75
diff view generated by jsdifflib
1
Avoid code duplication by handling 7 of the 14 cases
1
All non-default cases now finish folding within each function.
2
by inverting the test for the other 7 cases.
2
Do the same with the default case and assert it is done after.
3
3
4
Use TCG_COND_TSTNE for cc in {1,3}.
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Use (cc - 1) <= 1 for cc in {1,2}.
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
target/s390x/tcg/translate.c | 82 +++++++++++++-----------------------
7
tcg/optimize.c | 6 ++----
12
1 file changed, 30 insertions(+), 52 deletions(-)
8
1 file changed, 2 insertions(+), 4 deletions(-)
13
9
14
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/target/s390x/tcg/translate.c
12
--- a/tcg/optimize.c
17
+++ b/target/s390x/tcg/translate.c
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
case CC_OP_STATIC:
15
done = true;
20
c->is_64 = false;
21
c->u.s32.a = cc_op;
22
- switch (mask) {
23
- case 0x8 | 0x4 | 0x2: /* cc != 3 */
24
- cond = TCG_COND_NE;
25
+
26
+ /* Fold half of the cases using bit 3 to invert. */
27
+ switch (mask & 8 ? mask ^ 0xf : mask) {
28
+ case 0x1: /* cc == 3 */
29
+ cond = TCG_COND_EQ;
30
c->u.s32.b = tcg_constant_i32(3);
31
break;
32
- case 0x8 | 0x4 | 0x1: /* cc != 2 */
33
- cond = TCG_COND_NE;
34
- c->u.s32.b = tcg_constant_i32(2);
35
- break;
36
- case 0x8 | 0x2 | 0x1: /* cc != 1 */
37
- cond = TCG_COND_NE;
38
- c->u.s32.b = tcg_constant_i32(1);
39
- break;
40
- case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
41
- cond = TCG_COND_EQ;
42
- c->u.s32.a = tcg_temp_new_i32();
43
- c->u.s32.b = tcg_constant_i32(0);
44
- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
45
- break;
46
- case 0x8 | 0x4: /* cc < 2 */
47
- cond = TCG_COND_LTU;
48
- c->u.s32.b = tcg_constant_i32(2);
49
- break;
50
- case 0x8: /* cc == 0 */
51
- cond = TCG_COND_EQ;
52
- c->u.s32.b = tcg_constant_i32(0);
53
- break;
54
- case 0x4 | 0x2 | 0x1: /* cc != 0 */
55
- cond = TCG_COND_NE;
56
- c->u.s32.b = tcg_constant_i32(0);
57
- break;
58
- case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
59
- cond = TCG_COND_NE;
60
- c->u.s32.a = tcg_temp_new_i32();
61
- c->u.s32.b = tcg_constant_i32(0);
62
- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
63
- break;
64
- case 0x4: /* cc == 1 */
65
- cond = TCG_COND_EQ;
66
- c->u.s32.b = tcg_constant_i32(1);
67
- break;
68
- case 0x2 | 0x1: /* cc > 1 */
69
- cond = TCG_COND_GTU;
70
- c->u.s32.b = tcg_constant_i32(1);
71
- break;
72
case 0x2: /* cc == 2 */
73
cond = TCG_COND_EQ;
74
c->u.s32.b = tcg_constant_i32(2);
75
break;
76
- case 0x1: /* cc == 3 */
77
+ case 0x4: /* cc == 1 */
78
cond = TCG_COND_EQ;
79
- c->u.s32.b = tcg_constant_i32(3);
80
+ c->u.s32.b = tcg_constant_i32(1);
81
+ break;
82
+ case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
83
+ cond = TCG_COND_GTU;
84
+ c->u.s32.b = tcg_constant_i32(1);
85
+ break;
86
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
87
+ cond = TCG_COND_TSTNE;
88
+ c->u.s32.b = tcg_constant_i32(1);
89
+ break;
90
+ case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
91
+ cond = TCG_COND_LEU;
92
+ c->u.s32.a = tcg_temp_new_i32();
93
+ c->u.s32.b = tcg_constant_i32(1);
94
+ tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
95
+ break;
96
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
97
+ cond = TCG_COND_NE;
98
+ c->u.s32.b = tcg_constant_i32(0);
99
break;
16
break;
100
default:
17
default:
101
- /* CC is masked by something else: (8 >> cc) & mask. */
18
+ done = finish_folding(&ctx, op);
102
- cond = TCG_COND_NE;
19
break;
103
- c->u.s32.a = tcg_temp_new_i32();
104
- c->u.s32.b = tcg_constant_i32(0);
105
- tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
106
- tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
107
- break;
108
+ /* case 0: never, handled above. */
109
+ g_assert_not_reached();
110
+ }
111
+ if (mask & 8) {
112
+ cond = tcg_invert_cond(cond);
113
}
20
}
114
break;
21
-
115
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
116
--
28
--
117
2.34.1
29
2.43.0
118
119
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/ppc/tcg-target.c.inc | 27 ++++++++++++++++-----------
7
tcg/optimize.c | 4 ++--
5
1 file changed, 16 insertions(+), 11 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
6
9
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
12
}
15
g_assert_not_reached();
13
14
/* test if a constant matches the constraint */
15
-static bool tcg_target_const_match(int64_t val, int ct,
16
+static bool tcg_target_const_match(int64_t sval, int ct,
17
TCGType type, TCGCond cond, int vece)
18
{
19
+ uint64_t uval = sval;
20
+
21
if (ct & TCG_CT_CONST) {
22
return 1;
23
}
16
}
24
17
25
- /* The only 32-bit constraint we use aside from
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
26
- TCG_CT_CONST is TCG_CT_CONST_S16. */
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
27
if (type == TCG_TYPE_I32) {
20
return true;
28
- val = (int32_t)val;
29
+ uval = (uint32_t)sval;
30
+ sval = (int32_t)sval;
31
}
21
}
32
22
33
- if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
34
+ if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) {
24
s_mask = s_mask_old >> pos;
35
return 1;
25
s_mask |= -1ull << (len - 1);
36
- } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
26
37
+ }
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
38
+ if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
39
return 1;
29
return true;
40
- } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
41
+ }
42
+ if ((ct & TCG_CT_CONST_U32) && uval == (uint32_t)uval) {
43
return 1;
44
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
45
+ }
46
+ if ((ct & TCG_CT_CONST_ZERO) && sval == 0) {
47
return 1;
48
- } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
49
+ }
50
+ if ((ct & TCG_CT_CONST_MONE) && sval == -1) {
51
return 1;
52
- } else if ((ct & TCG_CT_CONST_WSZ)
53
- && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
54
+ }
55
+ if ((ct & TCG_CT_CONST_WSZ) && sval == (type == TCG_TYPE_I32 ? 32 : 64)) {
56
return 1;
57
}
30
}
58
return 0;
31
59
--
32
--
60
2.34.1
33
2.43.0
61
62
diff view generated by jsdifflib
1
Merge tcg_out_testi into tcg_out_cmp and adjust the two uses.
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/i386/tcg-target.h | 2 +-
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
7
tcg/i386/tcg-target.c.inc | 95 ++++++++++++++++++++++++---------------
8
1 file changed, 59 insertions(+), 55 deletions(-)
8
2 files changed, 60 insertions(+), 37 deletions(-)
9
9
10
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.h
12
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.h
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
15
#define TCG_TARGET_HAS_qemu_ldst_i128 \
15
* 3) those that produce information about the result value.
16
(TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
16
*/
17
17
18
-#define TCG_TARGET_HAS_tst 0
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+#define TCG_TARGET_HAS_tst 1
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
/* We do not support older SSE systems, only beginning with AVX1. */
21
+
22
#define TCG_TARGET_HAS_v64 have_avx1
22
static bool fold_add(OptContext *ctx, TCGOp *op)
23
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/i386/tcg-target.c.inc
26
+++ b/tcg/i386/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_jcc[] = {
28
[TCG_COND_GEU] = JCC_JAE,
29
[TCG_COND_LEU] = JCC_JBE,
30
[TCG_COND_GTU] = JCC_JA,
31
+ [TCG_COND_TSTEQ] = JCC_JE,
32
+ [TCG_COND_TSTNE] = JCC_JNE,
33
};
34
35
#if TCG_TARGET_REG_BITS == 64
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
37
static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
38
TCGArg arg2, int const_arg2, int rexw)
39
{
23
{
40
- if (const_arg2) {
24
if (fold_const2_commutative(ctx, op) ||
41
- if (arg2 == 0) {
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
42
- /* test r, r */
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
43
+ int jz;
27
}
44
+
28
45
+ if (!is_tst_cond(cond)) {
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
46
+ if (!const_arg2) {
30
+{
47
+ tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
31
+ /* If true and false values are the same, eliminate the cmp. */
48
+ } else if (arg2 == 0) {
32
+ if (args_are_copies(op->args[2], op->args[3])) {
49
tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
50
} else {
51
+ tcg_debug_assert(!rexw || arg2 == (int32_t)arg2);
52
tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
53
}
54
- } else {
55
- tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
56
+ return tcg_cond_to_jcc[cond];
57
}
58
- return tcg_cond_to_jcc[cond];
59
+
60
+ jz = tcg_cond_to_jcc[cond];
61
+
62
+ if (!const_arg2) {
63
+ tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg2);
64
+ return jz;
65
+ }
34
+ }
66
+
35
+
67
+ if (arg2 <= 0xff && (TCG_TARGET_REG_BITS == 64 || arg1 < 4)) {
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
68
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, arg1);
37
+ uint64_t tv = arg_info(op->args[2])->val;
69
+ tcg_out8(s, arg2);
38
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ return jz;
71
+ }
72
+
39
+
73
+ if ((arg2 & ~0xff00) == 0 && arg1 < 4) {
40
+ if (tv == -1 && fv == 0) {
74
+ tcg_out_modrm(s, OPC_GRP3_Eb, EXT3_TESTi, arg1 + 4);
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
75
+ tcg_out8(s, arg2 >> 8);
42
+ }
76
+ return jz;
43
+ if (tv == 0 && fv == -1) {
77
+ }
44
+ if (TCG_TARGET_HAS_not_vec) {
78
+
45
+ op->opc = INDEX_op_not_vec;
79
+ if (rexw) {
46
+ return fold_not(ctx, op);
80
+ if (arg2 == (uint32_t)arg2) {
47
+ } else {
81
+ rexw = 0;
48
+ op->opc = INDEX_op_xor_vec;
82
+ } else {
49
+ op->args[2] = arg_new_constant(ctx, -1);
83
+ tcg_debug_assert(arg2 == (int32_t)arg2);
50
+ return fold_xor(ctx, op);
51
+ }
84
+ }
52
+ }
85
+ }
53
+ }
86
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_TESTi, arg1);
54
+ if (arg_is_const(op->args[2])) {
87
+ tcg_out32(s, arg2);
55
+ uint64_t tv = arg_info(op->args[2])->val;
88
+ return jz;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
89
}
90
90
91
static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
93
{
94
TCGLabel *label_next = gen_new_label();
95
TCGLabel *label_this = arg_label(args[5]);
96
+ TCGCond cond = args[4];
97
98
- switch(args[4]) {
99
+ switch (cond) {
100
case TCG_COND_EQ:
101
- tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
102
- label_next, 1);
103
- tcg_out_brcond(s, 0, TCG_COND_EQ, args[1], args[3], const_args[3],
104
+ case TCG_COND_TSTEQ:
105
+ tcg_out_brcond(s, 0, tcg_invert_cond(cond),
106
+ args[0], args[2], const_args[2], label_next, 1);
107
+ tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
108
label_this, small);
109
break;
110
case TCG_COND_NE:
111
- tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
112
+ case TCG_COND_TSTNE:
113
+ tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2],
114
label_this, small);
115
- tcg_out_brcond(s, 0, TCG_COND_NE, args[1], args[3], const_args[3],
116
+ tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
117
label_this, small);
118
break;
119
case TCG_COND_LT:
120
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
121
tcg_out8(s, 0x90);
122
}
123
124
-/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */
125
-static void __attribute__((unused))
126
-tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i)
127
-{
92
-{
128
- /*
93
- /* If true and false values are the same, eliminate the cmp. */
129
- * This is used for testing alignment, so we can usually use testb.
94
- if (args_are_copies(op->args[2], op->args[3])) {
130
- * For i686, we have to use testl for %esi/%edi.
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
131
- */
132
- if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) {
133
- tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r);
134
- tcg_out8(s, i);
135
- } else {
136
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r);
137
- tcg_out32(s, i);
138
- }
96
- }
97
-
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
99
- uint64_t tv = arg_info(op->args[2])->val;
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
139
-}
144
-}
140
-
145
-
141
typedef struct {
146
/* Propagate constants and copies, fold constant expressions. */
142
TCGReg base;
147
void tcg_optimize(TCGContext *s)
143
int index;
148
{
144
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
145
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
146
offsetof(CPUTLBEntry, addend));
147
} else if (a_mask) {
148
- ldst = new_ldst_label(s);
149
+ int jcc;
150
151
+ ldst = new_ldst_label(s);
152
ldst->is_ld = is_ld;
153
ldst->oi = oi;
154
ldst->addrlo_reg = addrlo;
155
ldst->addrhi_reg = addrhi;
156
157
- tcg_out_testi(s, addrlo, a_mask);
158
/* jne slow_path */
159
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
160
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false);
161
+ tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0);
162
ldst->label_ptr[0] = s->code_ptr;
163
s->code_ptr += 4;
164
}
165
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
166
} else {
167
TCGLabel *l1 = gen_new_label();
168
TCGLabel *l2 = gen_new_label();
169
+ int jcc;
170
171
- tcg_out_testi(s, h.base, 15);
172
- tcg_out_jxx(s, JCC_JNE, l1, true);
173
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false);
174
+ tcg_out_jxx(s, jcc, l1, true);
175
176
tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg,
177
TCG_TMP_VEC, 0,
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
179
} else {
180
TCGLabel *l1 = gen_new_label();
181
TCGLabel *l2 = gen_new_label();
182
+ int jcc;
183
184
- tcg_out_testi(s, h.base, 15);
185
- tcg_out_jxx(s, JCC_JNE, l1, true);
186
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false);
187
+ tcg_out_jxx(s, jcc, l1, true);
188
189
tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg,
190
TCG_TMP_VEC, 0,
191
--
149
--
192
2.34.1
150
2.43.0
193
194
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
The big comment just above says functions should be sorted.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/ppc/tcg-target.h | 2 +-
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
5
tcg/ppc/tcg-target.c.inc | 122 ++++++++++++++++++++++++++++++++++++---
7
1 file changed, 30 insertions(+), 30 deletions(-)
6
2 files changed, 115 insertions(+), 9 deletions(-)
7
8
8
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.h
11
--- a/tcg/optimize.c
11
+++ b/tcg/ppc/tcg-target.h
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
#define TCG_TARGET_HAS_qemu_ldst_i128 \
14
(TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
15
16
-#define TCG_TARGET_HAS_tst 0
17
+#define TCG_TARGET_HAS_tst 1
18
19
/*
20
* While technically Altivec could support V64, it has no 64-bit store
21
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/ppc/tcg-target.c.inc
24
+++ b/tcg/ppc/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
26
return false;
27
}
28
29
+static bool mask_operand(uint32_t c, int *mb, int *me);
30
+static bool mask64_operand(uint64_t c, int *mb, int *me);
31
+
32
/* test if a constant matches the constraint */
33
static bool tcg_target_const_match(int64_t sval, int ct,
34
TCGType type, TCGCond cond, int vece)
35
{
36
uint64_t uval = sval;
37
+ int mb, me;
38
39
if (ct & TCG_CT_CONST) {
40
return 1;
41
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t sval, int ct,
42
case TCG_COND_GTU:
43
ct |= TCG_CT_CONST_U16;
44
break;
45
+ case TCG_COND_TSTEQ:
46
+ case TCG_COND_TSTNE:
47
+ if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) {
48
+ return 1;
49
+ }
50
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32
51
+ ? mask_operand(uval, &mb, &me)
52
+ : mask64_operand(uval << clz64(uval), &mb, &me)) {
53
+ return 1;
54
+ }
55
+ return 0;
56
default:
57
g_assert_not_reached();
58
}
59
@@ -XXX,XX +XXX,XX @@ enum {
60
CR_SO
61
};
62
63
-static const uint32_t tcg_to_bc[] = {
64
+static const uint32_t tcg_to_bc[16] = {
65
[TCG_COND_EQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
66
[TCG_COND_NE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
67
+ [TCG_COND_TSTEQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
68
+ [TCG_COND_TSTNE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
69
[TCG_COND_LT] = BC | BI(0, CR_LT) | BO_COND_TRUE,
70
[TCG_COND_GE] = BC | BI(0, CR_LT) | BO_COND_FALSE,
71
[TCG_COND_LE] = BC | BI(0, CR_GT) | BO_COND_FALSE,
72
@@ -XXX,XX +XXX,XX @@ static const uint32_t tcg_to_bc[] = {
73
};
74
75
/* The low bit here is set if the RA and RB fields must be inverted. */
76
-static const uint32_t tcg_to_isel[] = {
77
+static const uint32_t tcg_to_isel[16] = {
78
[TCG_COND_EQ] = ISEL | BC_(0, CR_EQ),
79
[TCG_COND_NE] = ISEL | BC_(0, CR_EQ) | 1,
80
+ [TCG_COND_TSTEQ] = ISEL | BC_(0, CR_EQ),
81
+ [TCG_COND_TSTNE] = ISEL | BC_(0, CR_EQ) | 1,
82
[TCG_COND_LT] = ISEL | BC_(0, CR_LT),
83
[TCG_COND_GE] = ISEL | BC_(0, CR_LT) | 1,
84
[TCG_COND_LE] = ISEL | BC_(0, CR_GT) | 1,
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
86
return true;
14
return true;
87
}
15
}
88
16
89
-static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
90
- int sh, int mb)
18
+{
91
+static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
19
+ /* Canonicalize the comparison to put immediate second. */
92
+ int sh, int mb, bool rc)
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
93
{
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
94
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
22
+ }
95
sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
23
+ return finish_folding(ctx, op);
96
mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
97
- tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
98
+ tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc);
99
}
100
101
-static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
102
- int sh, int mb, int me)
103
+static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
104
+ int sh, int mb)
105
{
106
- tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
107
+ tcg_out_rld_rc(s, op, ra, rs, sh, mb, false);
108
+}
24
+}
109
+
25
+
110
+static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
111
+ int sh, int mb, int me, bool rc)
112
+{
27
+{
113
+ tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
28
+ /* If true and false values are the same, eliminate the cmp. */
29
+ if (args_are_copies(op->args[3], op->args[4])) {
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
114
+}
45
+}
115
+
46
+
116
+static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
117
+ int sh, int mb, int me)
48
{
118
+{
49
uint64_t z_mask, s_mask;
119
+ tcg_out_rlw_rc(s, op, ra, rs, sh, mb, me, false);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
120
}
52
}
121
53
122
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
123
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
55
-{
124
return false;
56
- /* Canonicalize the comparison to put immediate second. */
125
}
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
126
58
- op->args[3] = tcg_swap_cond(op->args[3]);
127
+/*
59
- }
128
+ * Set dest non-zero if and only if (arg1 & arg2) is non-zero.
60
- return finish_folding(ctx, op);
129
+ * If RC, then also set RC0.
61
-}
130
+ */
62
-
131
+static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
132
+ bool const_arg2, TCGType type, bool rc)
64
-{
133
+{
65
- /* If true and false values are the same, eliminate the cmp. */
134
+ int mb, me;
66
- if (args_are_copies(op->args[3], op->args[4])) {
135
+
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
136
+ if (!const_arg2) {
68
- }
137
+ tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc);
69
-
138
+ return;
70
- /* Canonicalize the comparison to put immediate second. */
139
+ }
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
140
+
72
- op->args[5] = tcg_swap_cond(op->args[5]);
141
+ if (type == TCG_TYPE_I32) {
73
- }
142
+ arg2 = (uint32_t)arg2;
74
- /*
143
+ } else if (arg2 == (uint32_t)arg2) {
75
- * Canonicalize the "false" input reg to match the destination,
144
+ type = TCG_TYPE_I32;
76
- * so that the tcg backend can implement "move if true".
145
+ }
77
- */
146
+
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
147
+ if ((arg2 & ~0xffff) == 0) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
148
+ tcg_out32(s, ANDI | SAI(arg1, dest, arg2));
80
- }
149
+ return;
81
- return finish_folding(ctx, op);
150
+ }
82
-}
151
+ if ((arg2 & ~0xffff0000ull) == 0) {
83
-
152
+ tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16));
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
153
+ return;
154
+ }
155
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
156
+ if (mask_operand(arg2, &mb, &me)) {
157
+ tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
158
+ return;
159
+ }
160
+ } else {
161
+ int sh = clz64(arg2);
162
+ if (mask64_operand(arg2 << sh, &mb, &me)) {
163
+ tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc);
164
+ return;
165
+ }
166
+ }
167
+ /* Constraints should satisfy this. */
168
+ g_assert_not_reached();
169
+}
170
+
171
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
172
int const_arg2, int cr, TCGType type)
173
{
85
{
174
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
86
uint64_t z_mask, s_mask, s_mask_old;
175
imm = 0;
176
break;
177
178
+ case TCG_COND_TSTEQ:
179
+ case TCG_COND_TSTNE:
180
+ tcg_debug_assert(cr == 0);
181
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, true);
182
+ return;
183
+
184
case TCG_COND_LT:
185
case TCG_COND_GE:
186
case TCG_COND_LE:
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
188
tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
189
break;
190
191
+ case TCG_COND_TSTEQ:
192
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
193
+ tcg_out_setcond_eq0(s, type, arg0, TCG_REG_R0, neg);
194
+ break;
195
+
196
+ case TCG_COND_TSTNE:
197
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
198
+ tcg_out_setcond_ne0(s, type, arg0, TCG_REG_R0, neg);
199
+ break;
200
+
201
case TCG_COND_LE:
202
case TCG_COND_LEU:
203
inv = true;
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
205
tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
206
break;
207
208
+ case TCG_COND_TSTEQ:
209
+ case TCG_COND_TSTNE:
210
+ if (blconst) {
211
+ tcg_out_andi32(s, TCG_REG_R0, al, bl);
212
+ } else {
213
+ tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl));
214
+ }
215
+ if (bhconst) {
216
+ tcg_out_andi32(s, TCG_REG_TMP1, ah, bh);
217
+ } else {
218
+ tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh));
219
+ }
220
+ tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1);
221
+ break;
222
+
223
case TCG_COND_LT:
224
case TCG_COND_LE:
225
case TCG_COND_GT:
226
--
87
--
227
2.34.1
88
2.43.0
228
229
diff view generated by jsdifflib
1
Define as 0 for all tcg backends.
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/aarch64/tcg-target.h | 2 ++
7
include/fpu/softfloat.h | 6 ++++
7
tcg/arm/tcg-target.h | 2 ++
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
8
tcg/i386/tcg-target.h | 2 ++
9
fpu/softfloat-parts.c.inc | 7 +++--
9
tcg/loongarch64/tcg-target.h | 2 ++
10
3 files changed, 44 insertions(+), 27 deletions(-)
10
tcg/mips/tcg-target.h | 2 ++
11
11
tcg/ppc/tcg-target.h | 2 ++
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
tcg/riscv/tcg-target.h | 2 ++
13
tcg/s390x/tcg-target.h | 2 ++
14
tcg/sparc64/tcg-target.h | 2 ++
15
tcg/tci/tcg-target.h | 2 ++
16
10 files changed, 20 insertions(+)
17
18
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/aarch64/tcg-target.h
14
--- a/include/fpu/softfloat.h
21
+++ b/tcg/aarch64/tcg-target.h
15
+++ b/include/fpu/softfloat.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum {
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
23
#define TCG_TARGET_HAS_qemu_ldst_i128 1
17
float16 float16_sub(float16, float16, float_status *status);
24
#endif
18
float16 float16_mul(float16, float16, float_status *status);
25
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
26
+#define TCG_TARGET_HAS_tst 0
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
27
+
100
+
28
#define TCG_TARGET_HAS_v64 1
101
+float32 QEMU_SOFTFLOAT_ATTR
29
#define TCG_TARGET_HAS_v128 1
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
30
#define TCG_TARGET_HAS_v256 0
103
+ int scale, int flags, float_status *status)
31
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
32
index XXXXXXX..XXXXXXX 100644
190
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/arm/tcg-target.h
191
--- a/fpu/softfloat-parts.c.inc
34
+++ b/tcg/arm/tcg-target.h
192
+++ b/fpu/softfloat-parts.c.inc
35
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
36
194
* Requires A and C extracted into a double-sized structure to provide the
37
#define TCG_TARGET_HAS_qemu_ldst_i128 0
195
* extra space for the widening multiply.
38
196
*/
39
+#define TCG_TARGET_HAS_tst 0
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
40
+
198
- FloatPartsN *c, int flags, float_status *s)
41
#define TCG_TARGET_HAS_v64 use_neon_instructions
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
42
#define TCG_TARGET_HAS_v128 use_neon_instructions
200
+ FloatPartsN *c, int scale,
43
#define TCG_TARGET_HAS_v256 0
201
+ int flags, float_status *s)
44
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
202
{
45
index XXXXXXX..XXXXXXX 100644
203
int ab_mask, abc_mask;
46
--- a/tcg/i386/tcg-target.h
204
FloatPartsW p_widen, c_widen;
47
+++ b/tcg/i386/tcg-target.h
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
48
@@ -XXX,XX +XXX,XX @@ typedef enum {
206
a->exp = p_widen.exp;
49
#define TCG_TARGET_HAS_qemu_ldst_i128 \
207
50
(TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
208
return_normal:
51
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
52
+#define TCG_TARGET_HAS_tst 0
210
if (flags & float_muladd_halve_result) {
53
+
211
a->exp -= 1;
54
/* We do not support older SSE systems, only beginning with AVX1. */
212
}
55
#define TCG_TARGET_HAS_v64 have_avx1
213
+ a->exp += scale;
56
#define TCG_TARGET_HAS_v128 have_avx1
214
finish_sign:
57
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
215
if (flags & float_muladd_negate_result) {
58
index XXXXXXX..XXXXXXX 100644
216
a->sign ^= 1;
59
--- a/tcg/loongarch64/tcg-target.h
60
+++ b/tcg/loongarch64/tcg-target.h
61
@@ -XXX,XX +XXX,XX @@ typedef enum {
62
63
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
64
65
+#define TCG_TARGET_HAS_tst 0
66
+
67
#define TCG_TARGET_HAS_v64 0
68
#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
69
#define TCG_TARGET_HAS_v256 0
70
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/mips/tcg-target.h
73
+++ b/tcg/mips/tcg-target.h
74
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
75
76
#define TCG_TARGET_HAS_qemu_ldst_i128 0
77
78
+#define TCG_TARGET_HAS_tst 0
79
+
80
#define TCG_TARGET_DEFAULT_MO 0
81
#define TCG_TARGET_NEED_LDST_LABELS
82
#define TCG_TARGET_NEED_POOL_LABELS
83
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/ppc/tcg-target.h
86
+++ b/tcg/ppc/tcg-target.h
87
@@ -XXX,XX +XXX,XX @@ typedef enum {
88
#define TCG_TARGET_HAS_qemu_ldst_i128 \
89
(TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
90
91
+#define TCG_TARGET_HAS_tst 0
92
+
93
/*
94
* While technically Altivec could support V64, it has no 64-bit store
95
* instruction and substituting two 32-bit stores makes the generated
96
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/riscv/tcg-target.h
99
+++ b/tcg/riscv/tcg-target.h
100
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
101
102
#define TCG_TARGET_HAS_qemu_ldst_i128 0
103
104
+#define TCG_TARGET_HAS_tst 0
105
+
106
#define TCG_TARGET_DEFAULT_MO (0)
107
108
#define TCG_TARGET_NEED_LDST_LABELS
109
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
110
index XXXXXXX..XXXXXXX 100644
111
--- a/tcg/s390x/tcg-target.h
112
+++ b/tcg/s390x/tcg-target.h
113
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
114
115
#define TCG_TARGET_HAS_qemu_ldst_i128 1
116
117
+#define TCG_TARGET_HAS_tst 0
118
+
119
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
120
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
121
#define TCG_TARGET_HAS_v256 0
122
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/sparc64/tcg-target.h
125
+++ b/tcg/sparc64/tcg-target.h
126
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
127
128
#define TCG_TARGET_HAS_qemu_ldst_i128 0
129
130
+#define TCG_TARGET_HAS_tst 0
131
+
132
#define TCG_AREG0 TCG_REG_I0
133
134
#define TCG_TARGET_DEFAULT_MO (0)
135
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/tci/tcg-target.h
138
+++ b/tcg/tci/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@
140
141
#define TCG_TARGET_HAS_qemu_ldst_i128 0
142
143
+#define TCG_TARGET_HAS_tst 0
144
+
145
/* Number of registers available. */
146
#define TCG_TARGET_NB_REGS 16
147
148
--
217
--
149
2.34.1
218
2.43.0
150
219
151
220
diff view generated by jsdifflib
1
Return the x86 condition codes to use after the compare.
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/i386/tcg-target.c.inc | 24 +++++++++++++-----------
6
target/arm/tcg/helper-a64.c | 6 +++---
7
1 file changed, 13 insertions(+), 11 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
8
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
11
--- a/target/arm/tcg/helper-a64.c
12
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/target/arm/tcg/helper-a64.c
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
14
(float16_is_infinity(b) && float16_is_zero(a))) {
15
return float16_one_point_five;
14
}
16
}
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
15
}
19
}
16
20
17
-static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
18
- int const_arg2, int rexw)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
19
+static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
23
(float32_is_infinity(b) && float32_is_zero(a))) {
20
+ TCGArg arg2, int const_arg2, int rexw)
24
return float32_one_point_five;
21
{
22
if (const_arg2) {
23
if (arg2 == 0) {
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
25
} else {
26
tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
27
}
25
}
28
+ return tcg_cond_to_jcc[cond];
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
29
}
28
}
30
29
31
static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
TCGArg arg1, TCGArg arg2, int const_arg2,
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
33
TCGLabel *label, bool small)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
34
{
33
return float64_one_point_five;
35
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
34
}
36
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
37
+ int jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
38
+ tcg_out_jxx(s, jcc, label, small);
39
}
37
}
40
38
41
#if TCG_TARGET_REG_BITS == 32
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
43
{
44
bool inv = false;
45
bool cleared;
46
+ int jcc;
47
48
switch (cond) {
49
case TCG_COND_NE:
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
51
* We can then use NEG or INC to produce the desired result.
52
* This is always smaller than the SETCC expansion.
53
*/
54
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
55
+ tcg_out_cmp(s, TCG_COND_LTU, arg1, arg2, const_arg2, rexw);
56
57
/* X - X - C = -C = (C ? -1 : 0) */
58
tgen_arithr(s, ARITH_SBB + (neg ? rexw : 0), dest, dest);
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
60
cleared = true;
61
}
62
63
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
64
- tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
65
+ jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw);
66
+ tcg_out_modrm(s, OPC_SETCC | jcc, 0, dest);
67
68
if (!cleared) {
69
tcg_out_ext8u(s, dest, dest);
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
71
TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
72
TCGReg v1)
73
{
74
- tcg_out_cmp(s, c1, c2, const_c2, rexw);
75
- tcg_out_cmov(s, tcg_cond_to_jcc[cond], rexw, dest, v1);
76
+ int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw);
77
+ tcg_out_cmov(s, jcc, rexw, dest, v1);
78
}
79
80
static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
81
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
82
tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
83
84
/* Since we have destroyed the flags from BSR, we have to re-test. */
85
- tcg_out_cmp(s, arg1, 0, 1, rexw);
86
- tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
87
+ int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw);
88
+ tcg_out_cmov(s, jcc, rexw, dest, arg2);
89
}
90
}
91
92
--
40
--
93
2.34.1
41
2.43.0
94
42
95
43
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
2
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
target/sparc/translate.c | 4 ++--
6
target/sparc/helper.h | 4 +-
5
1 file changed, 2 insertions(+), 2 deletions(-)
7
target/sparc/fop_helper.c | 8 ++--
6
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
7
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
8
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
9
--- a/target/sparc/translate.c
61
--- a/target/sparc/translate.c
10
+++ b/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
11
@@ -XXX,XX +XXX,XX @@ static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
12
static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
64
13
{
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
14
TCGv zero = tcg_constant_tl(0);
66
{
15
+ TCGv one = tcg_constant_tl(1);
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
16
TCGv t_src1 = tcg_temp_new();
68
+ TCGv_i32 z = tcg_constant_i32(0);
17
TCGv t_src2 = tcg_temp_new();
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
18
TCGv t0 = tcg_temp_new();
70
}
19
@@ -XXX,XX +XXX,XX @@ static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
71
20
* if (!(env->y & 1))
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
21
* src2 = 0;
73
{
22
*/
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
23
- tcg_gen_andi_tl(t0, cpu_y, 0x1);
75
+ TCGv_i32 z = tcg_constant_i32(0);
24
- tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
25
+ tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
77
}
26
78
27
/*
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
28
* b2 = src1 & 1;
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
29
--
205
--
30
2.34.1
206
2.43.0
31
207
32
208
diff view generated by jsdifflib
1
Use a non-zero value here (an illegal encoding) as a better
1
All uses have been convered to float*_muladd_scalbn.
2
condition than is_unsigned_cond for when MOVR/BPR is usable.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/sparc64/tcg-target.c.inc | 25 ++++++++++++++-----------
6
include/fpu/softfloat.h | 3 ---
8
1 file changed, 14 insertions(+), 11 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
9
10
10
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/sparc64/tcg-target.c.inc
13
--- a/include/fpu/softfloat.h
13
+++ b/tcg/sparc64/tcg-target.c.inc
14
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_bcond[] = {
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
15
[TCG_COND_GTU] = COND_GU,
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
-| We also support halving the result before rounding, as a special
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
21
*----------------------------------------------------------------------------*/
22
enum {
23
float_muladd_negate_c = 1,
24
float_muladd_negate_product = 2,
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
16
};
27
};
17
28
18
-static const uint8_t tcg_cond_to_rcond[] = {
29
/*----------------------------------------------------------------------------
19
+static const uint8_t tcg_cond_to_rcond[16] = {
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
20
[TCG_COND_EQ] = RCOND_Z,
31
index XXXXXXX..XXXXXXX 100644
21
[TCG_COND_NE] = RCOND_NZ,
32
--- a/fpu/softfloat.c
22
[TCG_COND_LT] = RCOND_LZ,
33
+++ b/fpu/softfloat.c
23
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
24
int32_t arg2, int const_arg2, TCGLabel *l)
35
if (unlikely(!can_use_fpu(s))) {
25
{
36
goto soft;
26
/* For 64-bit signed comparisons vs zero, we can avoid the compare. */
37
}
27
- if (arg2 == 0 && !is_unsigned_cond(cond)) {
38
- if (unlikely(flags & float_muladd_halve_result)) {
28
+ int rcond = tcg_cond_to_rcond[cond];
39
- goto soft;
29
+ if (arg2 == 0 && rcond) {
40
- }
30
int off16 = 0;
41
31
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
32
if (l->has_value) {
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
34
tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
45
if (unlikely(!can_use_fpu(s))) {
35
}
46
goto soft;
36
tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
47
}
37
- | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
48
- if (unlikely(flags & float_muladd_halve_result)) {
38
+ | INSN_COND(rcond) | off16);
49
- goto soft;
39
} else {
50
- }
40
tcg_out_cmp(s, arg1, arg2, const_arg2);
51
41
tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
43
tcg_out_nop(s);
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
44
}
55
index XXXXXXX..XXXXXXX 100644
45
56
--- a/fpu/softfloat-parts.c.inc
46
-static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
57
+++ b/fpu/softfloat-parts.c.inc
47
+static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
48
int32_t v1, int v1const)
59
a->exp = p_widen.exp;
49
{
60
50
- tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
61
return_normal:
51
- | (tcg_cond_to_rcond[cond] << 10)
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
52
+ tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
63
- if (flags & float_muladd_halve_result) {
53
| (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
64
- a->exp -= 1;
54
}
65
- }
55
66
a->exp += scale;
56
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
67
finish_sign:
57
/* For 64-bit signed comparisons vs zero, we can avoid the compare.
68
if (flags & float_muladd_negate_result) {
58
Note that the immediate range is one bit smaller, so we must check
59
for that as well. */
60
- if (c2 == 0 && !is_unsigned_cond(cond)
61
- && (!v1const || check_fit_i32(v1, 10))) {
62
- tcg_out_movr(s, cond, ret, c1, v1, v1const);
63
+ int rcond = tcg_cond_to_rcond[cond];
64
+ if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
65
+ tcg_out_movr(s, rcond, ret, c1, v1, v1const);
66
} else {
67
tcg_out_cmp(s, c1, c2, c2const);
68
tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
70
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
71
TCGReg c1, int32_t c2, int c2const, bool neg)
72
{
73
+ int rcond;
74
+
75
if (use_vis3_instructions && !neg) {
76
switch (cond) {
77
case TCG_COND_NE:
78
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
79
80
/* For 64-bit signed comparisons vs zero, we can avoid the compare
81
if the input does not overlap the output. */
82
- if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
83
+ rcond = tcg_cond_to_rcond[cond];
84
+ if (c2 == 0 && rcond && c1 != ret) {
85
tcg_out_movi_s13(s, ret, 0);
86
- tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
87
+ tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
88
} else {
89
tcg_out_cmp(s, c1, c2, c2const);
90
tcg_out_movi_s13(s, ret, 0);
91
--
69
--
92
2.34.1
70
2.43.0
93
71
94
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
1
Use "test x,x" when the bit is one of the 4 sign bits.
1
Certain Hexagon instructions suppress changes to the result
2
Use "bt imm,x" otherwise.
2
when the product of fma() is a true zero.
3
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/i386/tcg-target-con-set.h | 6 ++--
6
include/fpu/softfloat.h | 5 +++++
7
tcg/i386/tcg-target-con-str.h | 1 +
7
fpu/softfloat.c | 3 +++
8
tcg/i386/tcg-target.c.inc | 54 +++++++++++++++++++++++++++++++----
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 53 insertions(+), 8 deletions(-)
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
10
11
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target-con-set.h
13
--- a/include/fpu/softfloat.h
14
+++ b/tcg/i386/tcg-target-con-set.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ C_O0_I2(L, L)
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
C_O0_I2(qi, r)
16
| Using these differs from negating an input or output before calling
17
C_O0_I2(re, r)
17
| the muladd function in that this means that a NaN doesn't have its
18
C_O0_I2(ri, r)
18
| sign bit inverted before it is propagated.
19
-C_O0_I2(r, re)
19
+|
20
+C_O0_I2(r, reT)
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
C_O0_I2(s, L)
21
+| such that the product is a true zero, then return C without addition.
22
C_O0_I2(x, r)
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
C_O0_I3(L, L, L)
23
*----------------------------------------------------------------------------*/
24
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, r)
24
enum {
25
C_O1_I1(x, r)
25
float_muladd_negate_c = 1,
26
C_O1_I1(x, x)
26
float_muladd_negate_product = 2,
27
C_O1_I2(q, 0, qi)
27
float_muladd_negate_result = 4,
28
-C_O1_I2(q, r, re)
28
+ float_muladd_suppress_add_product_zero = 8,
29
+C_O1_I2(q, r, reT)
29
};
30
C_O1_I2(r, 0, ci)
30
31
C_O1_I2(r, 0, r)
31
/*----------------------------------------------------------------------------
32
C_O1_I2(r, 0, re)
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
@@ -XXX,XX +XXX,XX @@ C_N1_I2(r, r, r)
34
C_N1_I2(r, r, rW)
35
C_O1_I3(x, 0, x, x)
36
C_O1_I3(x, x, x, x)
37
-C_O1_I4(r, r, re, r, 0)
38
+C_O1_I4(r, r, reT, r, 0)
39
C_O1_I4(r, r, r, ri, ri)
40
C_O2_I1(r, r, L)
41
C_O2_I2(a, d, a, r)
42
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
43
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/i386/tcg-target-con-str.h
34
--- a/fpu/softfloat.c
45
+++ b/tcg/i386/tcg-target-con-str.h
35
+++ b/fpu/softfloat.c
46
@@ -XXX,XX +XXX,XX @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
47
*/
37
if (unlikely(!can_use_fpu(s))) {
48
CONST('e', TCG_CT_CONST_S32)
38
goto soft;
49
CONST('I', TCG_CT_CONST_I32)
39
}
50
+CONST('T', TCG_CT_CONST_TST)
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
51
CONST('W', TCG_CT_CONST_WSZ)
41
+ goto soft;
52
CONST('Z', TCG_CT_CONST_U32)
42
+ }
53
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
54
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/i386/tcg-target.c.inc
48
--- a/fpu/softfloat-parts.c.inc
56
+++ b/tcg/i386/tcg-target.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
57
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
58
#define TCG_CT_CONST_U32 0x200
51
goto return_normal;
59
#define TCG_CT_CONST_I32 0x400
60
#define TCG_CT_CONST_WSZ 0x800
61
+#define TCG_CT_CONST_TST 0x1000
62
63
/* Registers used with L constraint, which are the first argument
64
registers on x86_64, and two random call clobbered registers on
65
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
66
return 1;
67
}
68
if (type == TCG_TYPE_I32) {
69
- if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) {
70
+ if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 |
71
+ TCG_CT_CONST_I32 | TCG_CT_CONST_TST)) {
72
return 1;
73
}
52
}
74
} else {
53
if (c->cls == float_class_zero) {
75
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
54
- if (a->sign != c->sign) {
76
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
77
return 1;
56
+ a->sign = c->sign;
78
}
57
+ } else if (a->sign != c->sign) {
79
+ /*
58
goto return_sub_zero;
80
+ * This will be used in combination with TCG_CT_CONST_S32,
59
}
81
+ * so "normal" TESTQ is already matched. Also accept:
60
goto return_zero;
82
+ * TESTQ -> TESTL (uint32_t)
83
+ * TESTQ -> BT (is_power_of_2)
84
+ */
85
+ if ((ct & TCG_CT_CONST_TST)
86
+ && is_tst_cond(cond)
87
+ && (val == (uint32_t)val || is_power_of_2(val))) {
88
+ return 1;
89
+ }
90
}
91
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
92
return 1;
93
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
94
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
95
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
96
#define OPC_SHRD_Ib (0xac | P_EXT)
97
+#define OPC_TESTB    (0x84)
98
#define OPC_TESTL    (0x85)
99
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
100
#define OPC_UD2 (0x0b | P_EXT)
101
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
102
#define OPC_GRP3_Ev (0xf7)
103
#define OPC_GRP5 (0xff)
104
#define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
105
+#define OPC_GRPBT (0xba | P_EXT)
106
+
107
+#define OPC_GRPBT_BT 4
108
+#define OPC_GRPBT_BTS 5
109
+#define OPC_GRPBT_BTR 6
110
+#define OPC_GRPBT_BTC 7
111
112
/* Group 1 opcode extensions for 0x80-0x83.
113
These are also used as modifiers for OPC_ARITH. */
114
@@ -XXX,XX +XXX,XX @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
115
static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
116
TCGArg arg2, int const_arg2, int rexw)
117
{
118
- int jz;
119
+ int jz, js;
120
121
if (!is_tst_cond(cond)) {
122
if (!const_arg2) {
123
@@ -XXX,XX +XXX,XX @@ static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
124
}
125
126
jz = tcg_cond_to_jcc[cond];
127
+ js = (cond == TCG_COND_TSTNE ? JCC_JS : JCC_JNS);
128
129
if (!const_arg2) {
130
tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg2);
131
@@ -XXX,XX +XXX,XX @@ static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
132
}
133
134
if (arg2 <= 0xff && (TCG_TARGET_REG_BITS == 64 || arg1 < 4)) {
135
+ if (arg2 == 0x80) {
136
+ tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1);
137
+ return js;
138
+ }
139
tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, arg1);
140
tcg_out8(s, arg2);
141
return jz;
142
}
143
144
if ((arg2 & ~0xff00) == 0 && arg1 < 4) {
145
+ if (arg2 == 0x8000) {
146
+ tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4);
147
+ return js;
148
+ }
149
tcg_out_modrm(s, OPC_GRP3_Eb, EXT3_TESTi, arg1 + 4);
150
tcg_out8(s, arg2 >> 8);
151
return jz;
152
}
153
154
+ if (is_power_of_2(rexw ? arg2 : (uint32_t)arg2)) {
155
+ int jc = (cond == TCG_COND_TSTNE ? JCC_JB : JCC_JAE);
156
+ int sh = ctz64(arg2);
157
+
158
+ rexw = (sh & 32 ? P_REXW : 0);
159
+ if ((sh & 31) == 31) {
160
+ tcg_out_modrm(s, OPC_TESTL | rexw, arg1, arg1);
161
+ return js;
162
+ } else {
163
+ tcg_out_modrm(s, OPC_GRPBT | rexw, OPC_GRPBT_BT, arg1);
164
+ tcg_out8(s, sh);
165
+ return jc;
166
+ }
167
+ }
168
+
169
if (rexw) {
170
if (arg2 == (uint32_t)arg2) {
171
rexw = 0;
172
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
173
174
case INDEX_op_brcond_i32:
175
case INDEX_op_brcond_i64:
176
- return C_O0_I2(r, re);
177
+ return C_O0_I2(r, reT);
178
179
case INDEX_op_bswap16_i32:
180
case INDEX_op_bswap16_i64:
181
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
182
case INDEX_op_setcond_i64:
183
case INDEX_op_negsetcond_i32:
184
case INDEX_op_negsetcond_i64:
185
- return C_O1_I2(q, r, re);
186
+ return C_O1_I2(q, r, reT);
187
188
case INDEX_op_movcond_i32:
189
case INDEX_op_movcond_i64:
190
- return C_O1_I4(r, r, re, r, 0);
191
+ return C_O1_I4(r, r, reT, r, 0);
192
193
case INDEX_op_div2_i32:
194
case INDEX_op_div2_i64:
195
--
61
--
196
2.34.1
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
There are multiple special cases for this instruction.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
8
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
11
---
4
tcg/aarch64/tcg-target-con-set.h | 5 +--
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
5
tcg/aarch64/tcg-target-con-str.h | 1 +
13
1 file changed, 26 insertions(+), 79 deletions(-)
6
tcg/aarch64/tcg-target.h | 2 +-
7
tcg/aarch64/tcg-target.c.inc | 56 ++++++++++++++++++++++----------
8
4 files changed, 44 insertions(+), 20 deletions(-)
9
14
10
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target-con-set.h
17
--- a/target/hexagon/op_helper.c
13
+++ b/tcg/aarch64/tcg-target-con-set.h
18
+++ b/target/hexagon/op_helper.c
14
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
15
* tcg-target-con-str.h; the constraint combination is inclusive or.
20
return RxV;
16
*/
21
}
17
C_O0_I1(r)
22
18
-C_O0_I2(r, rA)
23
-static bool is_zero_prod(float32 a, float32 b)
19
+C_O0_I2(r, rC)
24
-{
20
C_O0_I2(rZ, r)
25
- return ((float32_is_zero(a) && is_finite(b)) ||
21
C_O0_I2(w, r)
26
- (float32_is_zero(b) && is_finite(a)));
22
C_O0_I3(rZ, rZ, r)
27
-}
23
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
28
-
24
C_O1_I2(r, r, r)
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
25
C_O1_I2(r, r, rA)
30
-{
26
C_O1_I2(r, r, rAL)
31
- float32 ret = dst;
27
+C_O1_I2(r, r, rC)
32
- if (float32_is_any_nan(x)) {
28
C_O1_I2(r, r, ri)
33
- if (extract32(x, 22, 1) == 0) {
29
C_O1_I2(r, r, rL)
34
- float_raise(float_flag_invalid, fp_status);
30
C_O1_I2(r, rZ, rZ)
35
- }
31
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wN)
36
- ret = make_float32(0xffffffff); /* nan */
32
C_O1_I2(w, w, wO)
37
- }
33
C_O1_I2(w, w, wZ)
38
- return ret;
34
C_O1_I3(w, w, w, w)
39
-}
35
-C_O1_I4(r, r, rA, rZ, rZ)
40
-
36
+C_O1_I4(r, r, rC, rZ, rZ)
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
37
C_O2_I1(r, r, r)
42
float32 RsV, float32 RtV, float32 PuV)
38
C_O2_I4(r, r, rZ, rZ, rA, rMZ)
43
{
39
diff --git a/tcg/aarch64/tcg-target-con-str.h b/tcg/aarch64/tcg-target-con-str.h
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
40
index XXXXXXX..XXXXXXX 100644
45
return RxV;
41
--- a/tcg/aarch64/tcg-target-con-str.h
46
}
42
+++ b/tcg/aarch64/tcg-target-con-str.h
47
43
@@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS)
48
-static bool is_inf_prod(int32_t a, int32_t b)
44
* CONST(letter, TCG_CT_CONST_* bit set)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
45
*/
50
+ float32 RsV, float32 RtV, int negate)
46
CONST('A', TCG_CT_CONST_AIMM)
51
{
47
+CONST('C', TCG_CT_CONST_CMP)
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
48
CONST('L', TCG_CT_CONST_LIMM)
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
49
CONST('M', TCG_CT_CONST_MONE)
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
50
CONST('O', TCG_CT_CONST_ORRI)
55
+ int flags;
51
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/aarch64/tcg-target.h
54
+++ b/tcg/aarch64/tcg-target.h
55
@@ -XXX,XX +XXX,XX @@ typedef enum {
56
#define TCG_TARGET_HAS_qemu_ldst_i128 1
57
#endif
58
59
-#define TCG_TARGET_HAS_tst 0
60
+#define TCG_TARGET_HAS_tst 1
61
62
#define TCG_TARGET_HAS_v64 1
63
#define TCG_TARGET_HAS_v128 1
64
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/aarch64/tcg-target.c.inc
67
+++ b/tcg/aarch64/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
69
#define TCG_CT_CONST_MONE 0x800
70
#define TCG_CT_CONST_ORRI 0x1000
71
#define TCG_CT_CONST_ANDI 0x2000
72
+#define TCG_CT_CONST_CMP 0x4000
73
74
#define ALL_GENERAL_REGS 0xffffffffu
75
#define ALL_VECTOR_REGS 0xffffffff00000000ull
76
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, int ct,
77
if (type == TCG_TYPE_I32) {
78
val = (int32_t)val;
79
}
80
+
56
+
81
+ if (ct & TCG_CT_CONST_CMP) {
57
+ arch_fpop_start(env);
82
+ if (is_tst_cond(cond)) {
58
+
83
+ ct |= TCG_CT_CONST_LIMM;
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
84
+ } else {
60
+ RxV = float32_muladd(RsV, RtV, RxV,
85
+ ct |= TCG_CT_CONST_AIMM;
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
86
+ }
72
+ }
87
+ }
73
+ }
88
+
74
+
89
if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
75
+ arch_fpop_end(env);
90
return 1;
76
+ return RxV;
91
}
92
@@ -XXX,XX +XXX,XX @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
93
[TCG_COND_GTU] = COND_HI,
94
[TCG_COND_GEU] = COND_HS,
95
[TCG_COND_LEU] = COND_LS,
96
+ /* bit test */
97
+ [TCG_COND_TSTEQ] = COND_EQ,
98
+ [TCG_COND_TSTNE] = COND_NE,
99
};
100
101
typedef enum {
102
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
103
tcg_out_bfm(s, ext, rd, rn, a, b);
104
}
77
}
105
78
106
-static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
107
+static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
80
float32 RsV, float32 RtV)
108
tcg_target_long b, bool const_b)
109
{
81
{
110
- if (const_b) {
82
- bool infinp;
111
- /* Using CMP or CMN aliases. */
83
- bool infminusinf;
112
- if (b >= 0) {
84
- float32 tmp;
113
- tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
85
-
114
+ if (is_tst_cond(cond)) {
86
- arch_fpop_start(env);
115
+ if (!const_b) {
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
116
+ tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
88
- infminusinf = float32_is_infinity(RxV) &&
117
} else {
89
- is_inf_prod(RsV, RtV) &&
118
- tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
119
+ tcg_debug_assert(is_limm(b));
91
- infinp = float32_is_infinity(RxV) ||
120
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, a, b);
92
- float32_is_infinity(RtV) ||
121
}
93
- float32_is_infinity(RsV);
122
} else {
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
123
- /* Using CMP alias SUBS wzr, Wn, Wm */
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
124
- tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
125
+ if (!const_b) {
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
126
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
127
+ } else if (b >= 0) {
99
- RxV = tmp;
128
+ tcg_debug_assert(is_aimm(b));
100
- }
129
+ tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
101
- set_float_exception_flags(0, &env->fp_status);
130
+ } else {
102
- if (float32_is_infinity(RxV) && !infinp) {
131
+ tcg_debug_assert(is_aimm(-b));
103
- RxV = RxV - 1;
132
+ tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
104
- }
133
+ }
105
- if (infminusinf) {
134
}
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
135
}
111
}
136
112
137
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
138
need_cmp = false;
114
float32 RsV, float32 RtV)
139
} else {
115
{
140
need_cmp = true;
116
- bool infinp;
141
- tcg_out_cmp(s, ext, a, b, b_const);
117
- bool infminusinf;
142
+ tcg_out_cmp(s, ext, c, a, b, b_const);
118
- float32 tmp;
143
}
119
-
144
120
- arch_fpop_start(env);
145
if (!l->has_value) {
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
146
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
122
- infminusinf = float32_is_infinity(RxV) &&
147
} else {
123
- is_inf_prod(RsV, RtV) &&
148
AArch64Insn sel = I3506_CSEL;
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
149
125
- infinp = float32_is_infinity(RxV) ||
150
- tcg_out_cmp(s, ext, a0, 0, 1);
126
- float32_is_infinity(RtV) ||
151
+ tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1);
127
- float32_is_infinity(RsV);
152
tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
153
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
154
if (const_b) {
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
155
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
156
addr_adj, compare_mask);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
157
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
158
/* Perform the address comparison. */
134
- RxV = tmp;
159
- tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
135
- }
160
+ tcg_out_cmp(s, addr_type, TCG_COND_NE, TCG_REG_TMP0, TCG_REG_TMP2, 0);
136
- set_float_exception_flags(0, &env->fp_status);
161
137
- if (float32_is_infinity(RxV) && !infinp) {
162
/* If not equal, we jump to the slow path. */
138
- RxV = RxV - 1;
163
ldst->label_ptr[0] = s->code_ptr;
139
- }
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
140
- if (infminusinf) {
165
a2 = (int32_t)a2;
141
- RxV = 0;
166
/* FALLTHRU */
142
- }
167
case INDEX_op_setcond_i64:
143
- arch_fpop_end(env);
168
- tcg_out_cmp(s, ext, a1, a2, c2);
144
- return RxV;
169
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
170
/* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
146
}
171
tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
147
172
TCG_REG_XZR, tcg_invert_cond(args[3]));
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
173
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
174
a2 = (int32_t)a2;
175
/* FALLTHRU */
176
case INDEX_op_negsetcond_i64:
177
- tcg_out_cmp(s, ext, a1, a2, c2);
178
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
179
/* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
180
tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
181
TCG_REG_XZR, tcg_invert_cond(args[3]));
182
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
183
a2 = (int32_t)a2;
184
/* FALLTHRU */
185
case INDEX_op_movcond_i64:
186
- tcg_out_cmp(s, ext, a1, a2, c2);
187
+ tcg_out_cmp(s, ext, args[5], a1, a2, c2);
188
tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
189
break;
190
191
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
192
case INDEX_op_add_i64:
193
case INDEX_op_sub_i32:
194
case INDEX_op_sub_i64:
195
+ return C_O1_I2(r, r, rA);
196
+
197
case INDEX_op_setcond_i32:
198
case INDEX_op_setcond_i64:
199
case INDEX_op_negsetcond_i32:
200
case INDEX_op_negsetcond_i64:
201
- return C_O1_I2(r, r, rA);
202
+ return C_O1_I2(r, r, rC);
203
204
case INDEX_op_mul_i32:
205
case INDEX_op_mul_i64:
206
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
207
208
case INDEX_op_brcond_i32:
209
case INDEX_op_brcond_i64:
210
- return C_O0_I2(r, rA);
211
+ return C_O0_I2(r, rC);
212
213
case INDEX_op_movcond_i32:
214
case INDEX_op_movcond_i64:
215
- return C_O1_I4(r, r, rA, rZ, rZ);
216
+ return C_O1_I4(r, r, rC, rZ, rZ);
217
218
case INDEX_op_qemu_ld_a32_i32:
219
case INDEX_op_qemu_ld_a64_i32:
220
--
149
--
221
2.34.1
150
2.43.0
222
223
diff view generated by jsdifflib
New patch
1
The function is now unused.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/sparc64/tcg-target.h | 2 +-
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
5
tcg/sparc64/tcg-target.c.inc | 16 ++++++++++++++--
8
1 file changed, 127 insertions(+), 128 deletions(-)
6
2 files changed, 15 insertions(+), 3 deletions(-)
7
9
8
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/sparc64/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/sparc64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
13
14
#define TCG_TARGET_HAS_qemu_ldst_i128 0
15
16
-#define TCG_TARGET_HAS_tst 0
17
+#define TCG_TARGET_HAS_tst 1
18
19
#define TCG_AREG0 TCG_REG_I0
20
21
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/sparc64/tcg-target.c.inc
24
+++ b/tcg/sparc64/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
26
uns ? ARITH_UDIV : ARITH_SDIV);
27
}
15
}
28
16
29
-static const uint8_t tcg_cond_to_bcond[] = {
17
/* Return a maximum finite value with the requested sign */
30
+static const uint8_t tcg_cond_to_bcond[16] = {
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
31
[TCG_COND_EQ] = COND_E,
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
32
[TCG_COND_NE] = COND_NE,
20
-{ \
33
+ [TCG_COND_TSTEQ] = COND_E,
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
34
+ [TCG_COND_TSTNE] = COND_NE,
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
35
[TCG_COND_LT] = COND_L,
23
- /* result zero */ \
36
[TCG_COND_GE] = COND_GE,
24
- switch (fp_status->float_rounding_mode) { \
37
[TCG_COND_LE] = COND_LE,
25
- case float_round_down: \
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
26
- return zero_##SUFFIX(1); \
39
static void tcg_out_cmp(TCGContext *s, TCGCond cond,
27
- default: \
40
TCGReg c1, int32_t c2, int c2const)
28
- return zero_##SUFFIX(0); \
41
{
29
- } \
42
- tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
30
- } \
43
+ tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
31
- /* Normalize right */ \
44
+ is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
45
}
271
}
46
272
47
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
274
-
49
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
275
float64 internal_mpyhh(float64 a, float64 b,
50
    break;
276
unsigned long long int accumulated,
51
277
float_status *fp_status)
52
+ case TCG_COND_TSTEQ:
53
+ case TCG_COND_TSTNE:
54
+ /* Transform to inequality vs zero. */
55
+ tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
56
+ c1 = TCG_REG_G0;
57
+ c2 = TCG_REG_T1, c2const = 0;
58
+ cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
59
+    break;
60
+
61
case TCG_COND_GTU:
62
case TCG_COND_LEU:
63
/* If we don't need to load a constant into a register, we can
64
--
278
--
65
2.34.1
279
2.43.0
66
67
diff view generated by jsdifflib
1
... and the inverse, CBZ for TSTEQ.
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
2
3
3
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/aarch64/tcg-target.c.inc | 6 ++++++
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 6 insertions(+)
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
9
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
13
+++ b/tcg/aarch64/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
break;
15
};
16
case TCG_COND_TSTEQ:
16
} Double;
17
case TCG_COND_TSTNE:
17
18
+ /* tst xN,0xffffffff; b.ne L -> cbnz wN,L */
18
-typedef union {
19
+ if (b_const && b == UINT32_MAX) {
19
- float f;
20
+ ext = TCG_TYPE_I32;
20
- uint32_t i;
21
+ need_cmp = false;
21
- struct {
22
+ break;
22
- uint32_t mant:23;
23
+ }
23
- uint32_t exp:8;
24
/* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
24
- uint32_t sign:1;
25
if (b_const && is_power_of_2(b)) {
25
- };
26
tbit = ctz64(b);
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
27
--
47
--
28
2.34.1
48
2.43.0
29
30
diff view generated by jsdifflib
1
Simplify gen_bcond() by passing an immediate value.
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20231028194522.245170-33-richard.henderson@linaro.org>
6
[PMD: Split from bigger patch, part 1/2]
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20231108205247.83234-1-philmd@linaro.org>
9
---
6
---
10
target/alpha/translate.c | 21 +++++++--------------
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
11
1 file changed, 7 insertions(+), 14 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
12
9
13
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/target/alpha/translate.c
12
--- a/target/hexagon/fma_emu.c
16
+++ b/target/alpha/translate.c
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
14
@@ -XXX,XX +XXX,XX @@
15
16
#define WAY_BIG_EXP 4096
17
18
-typedef union {
19
- double f;
20
- uint64_t i;
21
- struct {
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
18
}
44
}
19
45
20
static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
46
int32_t float64_getexp(float64 f64)
21
- TCGv cmp, int32_t disp)
22
+ TCGv cmp, uint64_t imm, int32_t disp)
23
{
47
{
24
uint64_t dest = ctx->base.pc_next + (disp << 2);
48
- Double a = { .i = f64 };
25
TCGLabel *lab_true = gen_new_label();
49
+ int exp = extract64(f64, 52, 11);
26
50
if (float64_is_normal(f64)) {
27
if (use_goto_tb(ctx, dest)) {
51
- return a.exp;
28
- tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
52
+ return exp;
29
+ tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);
30
31
tcg_gen_goto_tb(0);
32
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
33
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
34
35
return DISAS_NORETURN;
36
} else {
37
- TCGv_i64 z = load_zero(ctx);
38
+ TCGv_i64 i = tcg_constant_i64(imm);
39
TCGv_i64 d = tcg_constant_i64(dest);
40
TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
41
42
- tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
43
+ tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p);
44
return DISAS_PC_UPDATED;
45
}
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
46
}
59
}
47
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
48
static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
61
/* Return a maximum finite value with the requested sign */
49
int32_t disp, int mask)
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
50
{
63
{
51
- if (mask) {
64
+ uint64_t ret;
52
- TCGv tmp = tcg_temp_new();
65
+
53
- DisasJumpType ret;
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
54
-
67
&& ((a.guard | a.round | a.sticky) == 0)) {
55
- tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
68
/* result zero */
56
- ret = gen_bcond_internal(ctx, cond, tmp, disp);
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
57
- return ret;
70
}
58
- }
71
}
59
- return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
72
/* Underflow? */
60
+ return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
61
+ mask, disp);
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
62
}
97
}
63
98
64
/* Fold -0.0 for comparison with COND. */
99
float64 internal_mpyhh(float64 a, float64 b,
65
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
66
DisasJumpType ret;
67
68
gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
69
- ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
70
+ ret = gen_bcond_internal(ctx, cond, cmp_tmp, 0, disp);
71
return ret;
72
}
73
74
--
100
--
75
2.34.1
101
2.43.0
76
77
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
No need to open-code 64x64->128-bit multiplication.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Message-Id: <20231028194522.245170-33-richard.henderson@linaro.org>
4
[PMD: Split from bigger patch, part 2/2]
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-Id: <20231108205247.83234-2-philmd@linaro.org>
7
---
5
---
8
target/alpha/translate.c | 20 ++++++++++----------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
9
1 file changed, 10 insertions(+), 10 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
10
8
11
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/target/alpha/translate.c
11
--- a/target/hexagon/fma_emu.c
14
+++ b/target/alpha/translate.c
12
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
14
return -1;
16
}
15
}
17
16
18
static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
17
-static uint32_t int128_getw0(Int128 x)
19
- int32_t disp, int mask)
18
-{
20
+ int32_t disp)
19
- return int128_getlo(x);
20
-}
21
-
22
-static uint32_t int128_getw1(Int128 x)
23
-{
24
- return int128_getlo(x) >> 32;
25
-}
26
-
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
21
{
28
{
22
return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
29
- Int128 a, b;
23
- mask, disp);
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
24
+ is_tst_cond(cond), disp);
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
48
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
50
+ mulu64(&l, &h, ai, bi);
51
+ return int128_make128(l, h);
25
}
52
}
26
53
27
/* Fold -0.0 for comparison with COND. */
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
28
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
29
break;
30
case 0x38:
31
/* BLBC */
32
- ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
33
+ ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
34
break;
35
case 0x39:
36
/* BEQ */
37
- ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
38
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
39
break;
40
case 0x3A:
41
/* BLT */
42
- ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
43
+ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
44
break;
45
case 0x3B:
46
/* BLE */
47
- ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
48
+ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
49
break;
50
case 0x3C:
51
/* BLBS */
52
- ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
53
+ ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
54
break;
55
case 0x3D:
56
/* BNE */
57
- ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
58
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
59
break;
60
case 0x3E:
61
/* BGE */
62
- ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
63
+ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
64
break;
65
case 0x3F:
66
/* BGT */
67
- ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
68
+ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
69
break;
70
invalid_opc:
71
ret = gen_invalid(ctx);
72
--
55
--
73
2.34.1
56
2.43.0
74
75
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
target/alpha/translate.c | 8 ++------
7
target/hexagon/fma_emu.c | 2 +-
5
1 file changed, 2 insertions(+), 6 deletions(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
6
9
7
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/target/alpha/translate.c
12
--- a/target/hexagon/fma_emu.c
10
+++ b/target/alpha/translate.c
13
+++ b/target/hexagon/fma_emu.c
11
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
12
break;
15
float64_is_infinity(b)) {
13
case 0x14:
16
return float64_mul(a, b, fp_status);
14
/* CMOVLBS */
17
}
15
- tmp = tcg_temp_new();
18
- x.mant = int128_mul_6464(accumulated, 1);
16
- tcg_gen_andi_i64(tmp, va, 1);
19
+ x.mant = int128_make64(accumulated);
17
- tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
20
x.sticky = sticky;
18
+ tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
19
vb, load_gpr(ctx, rc));
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
20
break;
21
case 0x16:
22
/* CMOVLBC */
23
- tmp = tcg_temp_new();
24
- tcg_gen_andi_i64(tmp, va, 1);
25
- tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
26
+ tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
27
vb, load_gpr(ctx, rc));
28
break;
29
case 0x20:
30
--
23
--
31
2.34.1
24
2.43.0
32
33
diff view generated by jsdifflib
1
These are all test-and-compare type instructions.
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
2
4
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
target/s390x/tcg/translate.c | 18 +++++++-----------
8
include/exec/translator.h | 14 --------------
7
1 file changed, 7 insertions(+), 11 deletions(-)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
8
71
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/include/exec/translator.h
75
+++ b/include/exec/translator.h
76
@@ -XXX,XX +XXX,XX @@
77
#include "qemu/bswap.h"
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
9
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
830
index XXXXXXX..XXXXXXX 100644
11
--- a/target/s390x/tcg/translate.c
831
--- a/target/s390x/tcg/translate.c
12
+++ b/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
14
case CC_OP_TM_64:
834
.disas_log = s390x_tr_disas_log,
15
switch (mask) {
835
};
16
case 8:
836
17
- cond = TCG_COND_EQ;
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
18
+ cond = TCG_COND_TSTEQ;
838
- vaddr pc, void *host_pc)
19
break;
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
20
case 4 | 2 | 1:
840
+ int *max_insns, vaddr pc, void *host_pc)
21
- cond = TCG_COND_NE;
841
{
22
+ cond = TCG_COND_TSTNE;
842
DisasContext dc;
23
break;
843
24
default:
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
25
goto do_dynamic;
845
index XXXXXXX..XXXXXXX 100644
26
@@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
846
--- a/target/sh4/cpu.c
27
case CC_OP_ICM:
847
+++ b/target/sh4/cpu.c
28
switch (mask) {
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
29
case 8:
849
30
- cond = TCG_COND_EQ;
850
static const TCGCPUOps superh_tcg_ops = {
31
+ cond = TCG_COND_TSTEQ;
851
.initialize = sh4_translate_init,
32
break;
852
+ .translate_code = sh4_translate_code,
33
case 4 | 2 | 1:
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
34
case 4 | 2:
854
.restore_state_to_opc = superh_restore_state_to_opc,
35
- cond = TCG_COND_NE;
855
36
+ cond = TCG_COND_TSTNE;
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
37
break;
857
index XXXXXXX..XXXXXXX 100644
38
default:
858
--- a/target/sh4/translate.c
39
goto do_dynamic;
859
+++ b/target/sh4/translate.c
40
@@ -XXX,XX +XXX,XX @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
41
c->u.s64.a = cc_dst;
861
.tb_stop = sh4_tr_tb_stop,
42
c->u.s64.b = tcg_constant_i64(0);
862
};
43
break;
863
44
+
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
45
case CC_OP_LTGT_64:
865
- vaddr pc, void *host_pc)
46
case CC_OP_LTUGTU_64:
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
47
- c->u.s64.a = cc_src;
867
+ int *max_insns, vaddr pc, void *host_pc)
48
- c->u.s64.b = cc_dst;
868
{
49
- break;
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
50
-
918
-
51
case CC_OP_TM_32:
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
52
case CC_OP_TM_64:
920
- vaddr pc, void *host_pc)
53
case CC_OP_ICM:
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
54
- c->u.s64.a = tcg_temp_new_i64();
922
+ int *max_insns, vaddr pc, void *host_pc)
55
- c->u.s64.b = tcg_constant_i64(0);
923
{
56
- tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
924
DisasContext ctx;
57
+ c->u.s64.a = cc_src;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
58
+ c->u.s64.b = cc_dst;
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
59
break;
927
index XXXXXXX..XXXXXXX 100644
60
928
--- a/target/xtensa/cpu.c
61
case CC_OP_ADDU:
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
62
--
953
--
63
2.34.1
954
2.43.0
64
955
65
956
diff view generated by jsdifflib