1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
1
The following changes since commit c52d69e7dbaaed0ffdef8125e79218672c30161d:
2
2
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
3
Merge remote-tracking branch 'remotes/cschoenebeck/tags/pull-9p-20211027' into staging (2021-10-27 11:45:18 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211028
8
8
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
9
for you to fetch changes up to efd629fb21e2ff6a8f62642d9ed7a23dfee4d320:
10
10
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
11
softmmu: fix for "after access" watchpoints (2021-10-28 20:55:07 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
tcg/optimize: Remove in-flight mask data from OptContext
14
Improvements to qemu/int128
15
fpu: Add float*_muladd_scalbn
15
Fixes for 128/64 division.
16
fpu: Remove float_muladd_halve_result
16
Cleanup tcg/optimize.c
17
fpu: Add float_round_nearest_even_max
17
Optimize redundant sign extensions
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
18
22
----------------------------------------------------------------
19
----------------------------------------------------------------
23
Ilya Leoshkevich (1):
20
Frédéric Pétrot (1):
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
21
qemu/int128: Add int128_{not,xor}
25
22
26
Pierrick Bouvier (1):
23
Luis Pires (4):
27
plugins: optimize cpu_index code generation
24
host-utils: move checks out of divu128/divs128
25
host-utils: move udiv_qrnnd() to host-utils
26
host-utils: add 128-bit quotient support to divu128/divs128
27
host-utils: add unit tests for divu128/divs128
28
28
29
Richard Henderson (70):
29
Pavel Dovgalyuk (3):
30
tcg/optimize: Split out finish_bb, finish_ebb
30
softmmu: fix watchpoint processing in icount mode
31
tcg/optimize: Split out fold_affected_mask
31
softmmu: remove useless condition in watchpoint check
32
tcg/optimize: Copy mask writeback to fold_masks
32
softmmu: fix for "after access" watchpoints
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
100
33
101
include/exec/translator.h | 14 -
34
Richard Henderson (52):
102
include/fpu/softfloat-types.h | 2 +
35
tcg/optimize: Rename "mask" to "z_mask"
103
include/fpu/softfloat.h | 14 +-
36
tcg/optimize: Split out OptContext
104
include/hw/core/tcg-cpu-ops.h | 13 +
37
tcg/optimize: Remove do_default label
105
target/alpha/cpu.h | 2 +
38
tcg/optimize: Change tcg_opt_gen_{mov,movi} interface
106
target/arm/internals.h | 2 +
39
tcg/optimize: Move prev_mb into OptContext
107
target/avr/cpu.h | 2 +
40
tcg/optimize: Split out init_arguments
108
target/hexagon/cpu.h | 2 +
41
tcg/optimize: Split out copy_propagate
109
target/hexagon/fma_emu.h | 3 -
42
tcg/optimize: Split out fold_call
110
target/hppa/cpu.h | 2 +
43
tcg/optimize: Drop nb_oargs, nb_iargs locals
111
target/i386/tcg/helper-tcg.h | 2 +
44
tcg/optimize: Change fail return for do_constant_folding_cond*
112
target/loongarch/internals.h | 2 +
45
tcg/optimize: Return true from tcg_opt_gen_{mov,movi}
113
target/m68k/cpu.h | 2 +
46
tcg/optimize: Split out finish_folding
114
target/microblaze/cpu.h | 2 +
47
tcg/optimize: Use a boolean to avoid a mass of continues
115
target/mips/tcg/tcg-internal.h | 2 +
48
tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
116
target/openrisc/cpu.h | 2 +
49
tcg/optimize: Split out fold_const{1,2}
117
target/ppc/cpu.h | 2 +
50
tcg/optimize: Split out fold_setcond2
118
target/riscv/cpu.h | 3 +
51
tcg/optimize: Split out fold_brcond2
119
target/rx/cpu.h | 2 +
52
tcg/optimize: Split out fold_brcond
120
target/s390x/s390x-internal.h | 2 +
53
tcg/optimize: Split out fold_setcond
121
target/sh4/cpu.h | 2 +
54
tcg/optimize: Split out fold_mulu2_i32
122
target/sparc/cpu.h | 2 +
55
tcg/optimize: Split out fold_addsub2_i32
123
target/sparc/helper.h | 4 +-
56
tcg/optimize: Split out fold_movcond
124
target/tricore/cpu.h | 2 +
57
tcg/optimize: Split out fold_extract2
125
target/xtensa/cpu.h | 2 +
58
tcg/optimize: Split out fold_extract, fold_sextract
126
accel/tcg/cpu-exec.c | 8 +-
59
tcg/optimize: Split out fold_deposit
127
accel/tcg/plugin-gen.c | 9 +
60
tcg/optimize: Split out fold_count_zeros
128
accel/tcg/translate-all.c | 8 +-
61
tcg/optimize: Split out fold_bswap
129
fpu/softfloat.c | 63 +--
62
tcg/optimize: Split out fold_dup, fold_dup2
130
target/alpha/cpu.c | 1 +
63
tcg/optimize: Split out fold_mov
131
target/alpha/translate.c | 4 +-
64
tcg/optimize: Split out fold_xx_to_i
132
target/arm/cpu.c | 1 +
65
tcg/optimize: Split out fold_xx_to_x
133
target/arm/tcg/cpu-v7m.c | 1 +
66
tcg/optimize: Split out fold_xi_to_i
134
target/arm/tcg/helper-a64.c | 6 +-
67
tcg/optimize: Add type to OptContext
135
target/arm/tcg/translate.c | 5 +-
68
tcg/optimize: Split out fold_to_not
136
target/avr/cpu.c | 1 +
69
tcg/optimize: Split out fold_sub_to_neg
137
target/avr/translate.c | 6 +-
70
tcg/optimize: Split out fold_xi_to_x
138
target/hexagon/cpu.c | 1 +
71
tcg/optimize: Split out fold_ix_to_i
139
target/hexagon/fma_emu.c | 496 ++++++---------------
72
tcg/optimize: Split out fold_masks
140
target/hexagon/op_helper.c | 125 ++----
73
tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies
141
target/hexagon/translate.c | 4 +-
74
tcg/optimize: Expand fold_addsub2_i32 to 64-bit ops
142
target/hppa/cpu.c | 1 +
75
tcg/optimize: Sink commutative operand swapping into fold functions
143
target/hppa/translate.c | 4 +-
76
tcg: Extend call args using the correct opcodes
144
target/i386/tcg/tcg-cpu.c | 1 +
77
tcg/optimize: Stop forcing z_mask to "garbage" for 32-bit values
145
target/i386/tcg/translate.c | 5 +-
78
tcg/optimize: Use fold_xx_to_i for orc
146
target/loongarch/cpu.c | 1 +
79
tcg/optimize: Use fold_xi_to_x for mul
147
target/loongarch/tcg/translate.c | 4 +-
80
tcg/optimize: Use fold_xi_to_x for div
148
target/m68k/cpu.c | 1 +
81
tcg/optimize: Use fold_xx_to_i for rem
149
target/m68k/translate.c | 4 +-
82
tcg/optimize: Optimize sign extensions
150
target/microblaze/cpu.c | 1 +
83
tcg/optimize: Propagate sign info for logical operations
151
target/microblaze/translate.c | 4 +-
84
tcg/optimize: Propagate sign info for setcond
152
target/mips/cpu.c | 1 +
85
tcg/optimize: Propagate sign info for bit counting
153
target/mips/tcg/translate.c | 4 +-
86
tcg/optimize: Propagate sign info for shifting
154
target/openrisc/cpu.c | 1 +
87
155
target/openrisc/translate.c | 4 +-
88
include/fpu/softfloat-macros.h | 82 --
156
target/ppc/cpu_init.c | 1 +
89
include/hw/clock.h | 5 +-
157
target/ppc/translate.c | 4 +-
90
include/qemu/host-utils.h | 121 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
91
include/qemu/int128.h | 20 +
159
target/riscv/translate.c | 4 +-
92
softmmu/physmem.c | 41 +-
160
target/rx/cpu.c | 1 +
93
target/ppc/int_helper.c | 23 +-
161
target/rx/translate.c | 4 +-
94
tcg/optimize.c | 2644 ++++++++++++++++++++++++----------------
162
target/s390x/cpu.c | 1 +
95
tcg/tcg.c | 6 +-
163
target/s390x/tcg/translate.c | 4 +-
96
tests/unit/test-div128.c | 197 +++
164
target/sh4/cpu.c | 1 +
97
util/host-utils.c | 147 ++-
165
target/sh4/translate.c | 4 +-
98
tests/unit/meson.build | 1 +
166
target/sparc/cpu.c | 1 +
99
11 files changed, 2075 insertions(+), 1212 deletions(-)
167
target/sparc/fop_helper.c | 8 +-
100
create mode 100644 tests/unit/test-div128.c
168
target/sparc/translate.c | 84 ++--
101
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
Deleted patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
1
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
Deleted patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
1
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
We currently have a flag, float_muladd_halve_result, to scale
1
From: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Addition of not and xor on 128-bit integers.
4
5
Signed-off-by: Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
6
Co-authored-by: Fabien Portas <fabien.portas@grenoble-inp.org>
7
Message-Id: <20211025122818.168890-3-frederic.petrot@univ-grenoble-alpes.fr>
8
[rth: Split out logical operations.]
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
11
---
7
include/fpu/softfloat.h | 6 ++++
12
include/qemu/int128.h | 20 ++++++++++++++++++++
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
13
1 file changed, 20 insertions(+)
9
fpu/softfloat-parts.c.inc | 7 +++--
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
14
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
15
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/include/fpu/softfloat.h
17
--- a/include/qemu/int128.h
15
+++ b/include/fpu/softfloat.h
18
+++ b/include/qemu/int128.h
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
19
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
17
float16 float16_sub(float16, float16, float_status *status);
20
return a;
18
float16 float16_mul(float16, float16, float_status *status);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
21
}
91
22
92
-static float32 QEMU_SOFTFLOAT_ATTR
23
+static inline Int128 int128_not(Int128 a)
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
24
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
25
+ return ~a;
99
+}
26
+}
100
+
27
+
101
+float32 QEMU_SOFTFLOAT_ATTR
28
static inline Int128 int128_and(Int128 a, Int128 b)
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
29
{
105
FloatParts64 pa, pb, pc, *pr;
30
return a & b;
106
31
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
107
float32_unpack_canonical(&pa, a, status);
32
return a | b;
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
33
}
115
34
116
-static float64 QEMU_SOFTFLOAT_ATTR
35
+static inline Int128 int128_xor(Int128 a, Int128 b)
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
36
+{
118
- float_status *status)
37
+ return a ^ b;
119
+float64 QEMU_SOFTFLOAT_ATTR
38
+}
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
39
+
121
+ int scale, int flags, float_status *status)
40
static inline Int128 int128_rshift(Int128 a, int n)
122
{
41
{
123
FloatParts64 pa, pb, pc, *pr;
42
return a >> n;
124
43
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_exts64(int64_t a)
125
float64_unpack_canonical(&pa, a, status);
44
return int128_make128(a, (a < 0) ? -1 : 0);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
45
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
46
134
return ur.s;
47
+static inline Int128 int128_not(Int128 a)
135
48
+{
136
soft:
49
+ return int128_make128(~a.lo, ~a.hi);
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
50
+}
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
51
+
52
static inline Int128 int128_and(Int128 a, Int128 b)
53
{
54
return int128_make128(a.lo & b.lo, a.hi & b.hi);
55
@@ -XXX,XX +XXX,XX @@ static inline Int128 int128_or(Int128 a, Int128 b)
56
return int128_make128(a.lo | b.lo, a.hi | b.hi);
139
}
57
}
140
58
141
float64 QEMU_FLATTEN
59
+static inline Int128 int128_xor(Int128 a, Int128 b)
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
60
+{
143
return ur.s;
61
+ return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi);
144
62
+}
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
63
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
64
static inline Int128 int128_rshift(Int128 a, int n)
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
65
{
203
int ab_mask, abc_mask;
66
int64_t h;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
217
--
67
--
218
2.43.0
68
2.25.1
219
69
220
70
diff view generated by jsdifflib
1
No need to open-code 64x64->128-bit multiplication.
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
In preparation for changing the divu128/divs128 implementations
4
to allow for quotients larger than 64 bits, move the div-by-zero
5
and overflow checks to the callers.
6
7
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20211025191154.350831-2-luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
11
---
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
12
include/hw/clock.h | 5 +++--
7
1 file changed, 3 insertions(+), 29 deletions(-)
13
include/qemu/host-utils.h | 34 ++++++++++++---------------------
8
14
target/ppc/int_helper.c | 14 +++++++++-----
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
util/host-utils.c | 40 ++++++++++++++++++---------------------
10
index XXXXXXX..XXXXXXX 100644
16
4 files changed, 42 insertions(+), 51 deletions(-)
11
--- a/target/hexagon/fma_emu.c
17
12
+++ b/target/hexagon/fma_emu.c
18
diff --git a/include/hw/clock.h b/include/hw/clock.h
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
19
index XXXXXXX..XXXXXXX 100644
14
return -1;
20
--- a/include/hw/clock.h
15
}
21
+++ b/include/hw/clock.h
16
22
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
17
-static uint32_t int128_getw0(Int128 x)
23
return 0;
18
-{
24
}
19
- return int128_getlo(x);
25
/*
20
-}
26
- * Ignore divu128() return value as we've caught div-by-zero and don't
27
- * need different behaviour for overflow.
28
+ * BUG: when CONFIG_INT128 is not defined, the current implementation of
29
+ * divu128 does not return a valid truncated quotient, so the result will
30
+ * be wrong.
31
*/
32
divu128(&lo, &hi, clk->period);
33
return lo;
34
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/qemu/host-utils.h
37
+++ b/include/qemu/host-utils.h
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
39
return (__int128_t)a * b / c;
40
}
41
42
-static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
43
+static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
{
45
- if (divisor == 0) {
46
- return 1;
47
- } else {
48
- __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
49
- __uint128_t result = dividend / divisor;
50
- *plow = result;
51
- *phigh = dividend % divisor;
52
- return result > UINT64_MAX;
53
- }
54
+ __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
55
+ __uint128_t result = dividend / divisor;
56
+ *plow = result;
57
+ *phigh = dividend % divisor;
58
}
59
60
-static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
61
+static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
62
{
63
- if (divisor == 0) {
64
- return 1;
65
- } else {
66
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
67
- __int128_t result = dividend / divisor;
68
- *plow = result;
69
- *phigh = dividend % divisor;
70
- return result != *plow;
71
- }
72
+ __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
73
+ __int128_t result = dividend / divisor;
74
+ *plow = result;
75
+ *phigh = dividend % divisor;
76
}
77
#else
78
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
79
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
80
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
81
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
82
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
83
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
84
85
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86
{
87
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/ppc/int_helper.c
90
+++ b/target/ppc/int_helper.c
91
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
92
uint64_t rt = 0;
93
int overflow = 0;
94
95
- overflow = divu128(&rt, &ra, rb);
21
-
96
-
22
-static uint32_t int128_getw1(Int128 x)
97
- if (unlikely(overflow)) {
23
-{
98
+ if (unlikely(rb == 0 || ra >= rb)) {
24
- return int128_getlo(x) >> 32;
99
+ overflow = 1;
25
-}
100
rt = 0; /* Undefined */
101
+ } else {
102
+ divu128(&rt, &ra, rb);
103
}
104
105
if (oe) {
106
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
107
int64_t rt = 0;
108
int64_t ra = (int64_t)rau;
109
int64_t rb = (int64_t)rbu;
110
- int overflow = divs128(&rt, &ra, rb);
111
+ int overflow = 0;
112
113
- if (unlikely(overflow)) {
114
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
115
+ overflow = 1;
116
rt = 0; /* Undefined */
117
+ } else {
118
+ divs128(&rt, &ra, rb);
119
}
120
121
if (oe) {
122
diff --git a/util/host-utils.c b/util/host-utils.c
123
index XXXXXXX..XXXXXXX 100644
124
--- a/util/host-utils.c
125
+++ b/util/host-utils.c
126
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
127
*phigh = rh;
128
}
129
130
-/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
131
-/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
132
-/* remainder via phigh. */
133
-int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
134
+/*
135
+ * Unsigned 128-by-64 division. Returns quotient via plow and
136
+ * remainder via phigh.
137
+ * The result must fit in 64 bits (plow) - otherwise, the result
138
+ * is undefined.
139
+ * This function will cause a division by zero if passed a zero divisor.
140
+ */
141
+void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
142
{
143
uint64_t dhi = *phigh;
144
uint64_t dlo = *plow;
145
unsigned i;
146
uint64_t carry = 0;
147
148
- if (divisor == 0) {
149
- return 1;
150
- } else if (dhi == 0) {
151
+ if (divisor == 0 || dhi == 0) {
152
*plow = dlo / divisor;
153
*phigh = dlo % divisor;
154
- return 0;
155
- } else if (dhi >= divisor) {
156
- return 1;
157
} else {
158
159
for (i = 0; i < 64; i++) {
160
@@ -XXX,XX +XXX,XX @@ int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
161
162
*plow = dlo;
163
*phigh = dhi;
164
- return 0;
165
}
166
}
167
168
-int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
169
+/*
170
+ * Signed 128-by-64 division. Returns quotient via plow and
171
+ * remainder via phigh.
172
+ * The result must fit in 64 bits (plow) - otherwise, the result
173
+ * is undefined.
174
+ * This function will cause a division by zero if passed a zero divisor.
175
+ */
176
+void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
177
{
178
int sgn_dvdnd = *phigh < 0;
179
int sgn_divsr = divisor < 0;
180
- int overflow = 0;
181
182
if (sgn_dvdnd) {
183
*plow = ~(*plow);
184
@@ -XXX,XX +XXX,XX @@ int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
185
divisor = 0 - divisor;
186
}
187
188
- overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
189
+ divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
190
191
if (sgn_dvdnd ^ sgn_divsr) {
192
*plow = 0 - *plow;
193
}
26
-
194
-
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
195
- if (!overflow) {
28
{
196
- if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
29
- Int128 a, b;
197
- overflow = 1;
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
198
- }
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
199
- }
48
-
200
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
201
- return overflow;
50
+ mulu64(&l, &h, ai, bi);
202
}
51
+ return int128_make128(l, h);
203
#endif
52
}
204
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
55
--
205
--
56
2.43.0
206
2.25.1
207
208
diff view generated by jsdifflib
1
This massive macro is now only used once.
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
Expand it for use only by float64.
2
3
3
Move udiv_qrnnd() from include/fpu/softfloat-macros.h to host-utils,
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
so it can be reused by divu128().
5
6
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20211025191154.350831-3-luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
11
include/fpu/softfloat-macros.h | 82 ----------------------------------
8
1 file changed, 127 insertions(+), 128 deletions(-)
12
include/qemu/host-utils.h | 81 +++++++++++++++++++++++++++++++++
9
13
2 files changed, 81 insertions(+), 82 deletions(-)
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
15
diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h
11
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
17
--- a/include/fpu/softfloat-macros.h
13
+++ b/target/hexagon/fma_emu.c
18
+++ b/include/fpu/softfloat-macros.h
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
19
@@ -XXX,XX +XXX,XX @@
20
* so some portions are provided under:
21
* the SoftFloat-2a license
22
* the BSD license
23
- * GPL-v2-or-later
24
*
25
* Any future contributions to this file after December 1st 2014 will be
26
* taken to be licensed under the Softfloat-2a license unless specifically
27
@@ -XXX,XX +XXX,XX @@ this code that are retained.
28
* THE POSSIBILITY OF SUCH DAMAGE.
29
*/
30
31
-/* Portions of this work are licensed under the terms of the GNU GPL,
32
- * version 2 or later. See the COPYING file in the top-level directory.
33
- */
34
-
35
#ifndef FPU_SOFTFLOAT_MACROS_H
36
#define FPU_SOFTFLOAT_MACROS_H
37
38
@@ -XXX,XX +XXX,XX @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b)
39
15
}
40
}
16
41
17
/* Return a maximum finite value with the requested sign */
42
-/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
43
- * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
44
- *
20
-{ \
45
- * Licensed under the GPLv2/LGPLv3
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
46
- */
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
47
-static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
23
- /* result zero */ \
48
- uint64_t n0, uint64_t d)
24
- switch (fp_status->float_rounding_mode) { \
49
-{
25
- case float_round_down: \
50
-#if defined(__x86_64__)
26
- return zero_##SUFFIX(1); \
51
- uint64_t q;
27
- default: \
52
- asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
28
- return zero_##SUFFIX(0); \
53
- return q;
29
- } \
54
-#elif defined(__s390x__) && !defined(__clang__)
30
- } \
55
- /* Need to use a TImode type to get an even register pair for DLGR. */
31
- /* Normalize right */ \
56
- unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
57
- asm("dlgr %0, %1" : "+r"(n) : "r"(d));
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
58
- *r = n >> 64;
34
- /* So we need to normalize right while the high word is non-zero and \
59
- return n;
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
60
-#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
36
- while ((int128_gethi(a.mant) != 0) || \
61
- /* From Power ISA 2.06, programming note for divdeu. */
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
62
- uint64_t q1, q2, Q, r1, r2, R;
38
- a = accum_norm_right(a, 1); \
63
- asm("divdeu %0,%2,%4; divdu %1,%3,%4"
39
- } \
64
- : "=&r"(q1), "=r"(q2)
40
- /* \
65
- : "r"(n1), "r"(n0), "r"(d));
41
- * OK, now normalize left \
66
- r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
42
- * We want to normalize left until we have a leading one in bit 24 \
67
- r2 = n0 - (q2 * d);
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
68
- Q = q1 + q2;
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
69
- R = r1 + r2;
45
- * should be 0 \
70
- if (R >= d || R < r2) { /* overflow implies R > d */
46
- */ \
71
- Q += 1;
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
72
- R -= d;
48
- a = accum_norm_left(a); \
73
- }
49
- } \
74
- *r = R;
50
- /* \
75
- return Q;
51
- * OK, now we might need to denormalize because of potential underflow. \
76
-#else
52
- * We need to do this before rounding, and rounding might make us normal \
77
- uint64_t d0, d1, q0, q1, r1, r0, m;
53
- * again \
78
-
54
- */ \
79
- d0 = (uint32_t)d;
55
- while (a.exp <= 0) { \
80
- d1 = d >> 32;
56
- a = accum_norm_right(a, 1 - a.exp); \
81
-
57
- /* \
82
- r1 = n1 % d1;
58
- * Do we have underflow? \
83
- q1 = n1 / d1;
59
- * That's when we get an inexact answer because we ran out of bits \
84
- m = q1 * d0;
60
- * in a denormal. \
85
- r1 = (r1 << 32) | (n0 >> 32);
61
- */ \
86
- if (r1 < m) {
62
- if (a.guard || a.round || a.sticky) { \
87
- q1 -= 1;
63
- float_raise(float_flag_underflow, fp_status); \
88
- r1 += d;
64
- } \
89
- if (r1 >= d) {
65
- } \
90
- if (r1 < m) {
66
- /* OK, we're relatively canonical... now we need to round */ \
91
- q1 -= 1;
67
- if (a.guard || a.round || a.sticky) { \
92
- r1 += d;
68
- float_raise(float_flag_inexact, fp_status); \
93
- }
69
- switch (fp_status->float_rounding_mode) { \
94
- }
70
- case float_round_to_zero: \
95
- }
71
- /* Chop and we're done */ \
96
- r1 -= m;
72
- break; \
97
-
73
- case float_round_up: \
98
- r0 = r1 % d1;
74
- if (a.sign == 0) { \
99
- q0 = r1 / d1;
75
- a.mant = int128_add(a.mant, int128_one()); \
100
- m = q0 * d0;
76
- } \
101
- r0 = (r0 << 32) | (uint32_t)n0;
77
- break; \
102
- if (r0 < m) {
78
- case float_round_down: \
103
- q0 -= 1;
79
- if (a.sign != 0) { \
104
- r0 += d;
80
- a.mant = int128_add(a.mant, int128_one()); \
105
- if (r0 >= d) {
81
- } \
106
- if (r0 < m) {
82
- break; \
107
- q0 -= 1;
83
- default: \
108
- r0 += d;
84
- if (a.round || a.sticky) { \
109
- }
85
- /* round up if guard is 1, down if guard is zero */ \
110
- }
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
111
- }
87
- } else if (a.guard) { \
112
- r0 -= m;
88
- /* exactly .5, round up if odd */ \
113
-
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
114
- *r = r0;
90
- } \
115
- return (q1 << 32) | q0;
91
- break; \
116
-#endif
92
- } \
117
-}
93
- } \
118
-
94
- /* \
119
/*----------------------------------------------------------------------------
95
- * OK, now we might have carried all the way up. \
120
| Returns an approximation to the square root of the 32-bit significand given
96
- * So we might need to shr once \
121
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
97
- * at least we know that the lsb should be zero if we rounded and \
122
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
98
- * got a carry out... \
123
index XXXXXXX..XXXXXXX 100644
99
- */ \
124
--- a/include/qemu/host-utils.h
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
125
+++ b/include/qemu/host-utils.h
101
- a = accum_norm_right(a, 1); \
126
@@ -XXX,XX +XXX,XX @@
102
- } \
127
* THE SOFTWARE.
103
- /* Overflow? */ \
128
*/
104
- if (a.exp >= INF_EXP) { \
129
105
- /* Yep, inf result */ \
130
+/* Portions of this work are licensed under the terms of the GNU GPL,
106
- float_raise(float_flag_overflow, fp_status); \
131
+ * version 2 or later. See the COPYING file in the top-level directory.
107
- float_raise(float_flag_inexact, fp_status); \
132
+ */
108
- switch (fp_status->float_rounding_mode) { \
133
+
109
- case float_round_to_zero: \
134
#ifndef HOST_UTILS_H
110
- return maxfinite_##SUFFIX(a.sign); \
135
#define HOST_UTILS_H
111
- case float_round_up: \
136
112
- if (a.sign == 0) { \
137
@@ -XXX,XX +XXX,XX @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
113
- return infinite_##SUFFIX(a.sign); \
138
*/
114
- } else { \
139
void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
115
- return maxfinite_##SUFFIX(a.sign); \
140
116
- } \
141
+/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
117
- case float_round_down: \
142
+ * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
118
- if (a.sign != 0) { \
143
+ *
119
- return infinite_##SUFFIX(a.sign); \
144
+ * Licensed under the GPLv2/LGPLv3
120
- } else { \
145
+ */
121
- return maxfinite_##SUFFIX(a.sign); \
146
+static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
122
- } \
147
+ uint64_t n0, uint64_t d)
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
148
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
149
+#if defined(__x86_64__)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
150
+ uint64_t q;
148
+ /* result zero */
151
+ asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
149
+ switch (fp_status->float_rounding_mode) {
152
+ return q;
150
+ case float_round_down:
153
+#elif defined(__s390x__) && !defined(__clang__)
151
+ return zero_float64(1);
154
+ /* Need to use a TImode type to get an even register pair for DLGR. */
152
+ default:
155
+ unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
153
+ return zero_float64(0);
156
+ asm("dlgr %0, %1" : "+r"(n) : "r"(d));
157
+ *r = n >> 64;
158
+ return n;
159
+#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
160
+ /* From Power ISA 2.06, programming note for divdeu. */
161
+ uint64_t q1, q2, Q, r1, r2, R;
162
+ asm("divdeu %0,%2,%4; divdu %1,%3,%4"
163
+ : "=&r"(q1), "=r"(q2)
164
+ : "r"(n1), "r"(n0), "r"(d));
165
+ r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
166
+ r2 = n0 - (q2 * d);
167
+ Q = q1 + q2;
168
+ R = r1 + r2;
169
+ if (R >= d || R < r2) { /* overflow implies R > d */
170
+ Q += 1;
171
+ R -= d;
172
+ }
173
+ *r = R;
174
+ return Q;
175
+#else
176
+ uint64_t d0, d1, q0, q1, r1, r0, m;
177
+
178
+ d0 = (uint32_t)d;
179
+ d1 = d >> 32;
180
+
181
+ r1 = n1 % d1;
182
+ q1 = n1 / d1;
183
+ m = q1 * d0;
184
+ r1 = (r1 << 32) | (n0 >> 32);
185
+ if (r1 < m) {
186
+ q1 -= 1;
187
+ r1 += d;
188
+ if (r1 >= d) {
189
+ if (r1 < m) {
190
+ q1 -= 1;
191
+ r1 += d;
192
+ }
154
+ }
193
+ }
155
+ }
194
+ }
156
+ /*
195
+ r1 -= m;
157
+ * Normalize right
196
+
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
197
+ r0 = r1 % d1;
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
198
+ q0 = r1 / d1;
160
+ * So we need to normalize right while the high word is non-zero and
199
+ m = q0 * d0;
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
200
+ r0 = (r0 << 32) | (uint32_t)n0;
162
+ */
201
+ if (r0 < m) {
163
+ while ((int128_gethi(a.mant) != 0) ||
202
+ q0 -= 1;
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
203
+ r0 += d;
165
+ a = accum_norm_right(a, 1);
204
+ if (r0 >= d) {
166
+ }
205
+ if (r0 < m) {
167
+ /*
206
+ q0 -= 1;
168
+ * OK, now normalize left
207
+ r0 += d;
169
+ * We want to normalize left until we have a leading one in bit 24
208
+ }
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
209
+ }
192
+ }
210
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
211
+ r0 -= m;
194
+ if (a.guard || a.round || a.sticky) {
212
+
195
+ float_raise(float_flag_inexact, fp_status);
213
+ *r = r0;
196
+ switch (fp_status->float_rounding_mode) {
214
+ return (q1 << 32) | q0;
197
+ case float_round_to_zero:
215
+#endif
198
+ /* Chop and we're done */
216
+}
199
+ break;
217
+
200
+ case float_round_up:
218
#endif
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
278
--
219
--
279
2.43.0
220
2.25.1
221
222
diff view generated by jsdifflib
1
There are multiple special cases for this instruction.
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
(1) The saturate to normal maximum instead of overflow to infinity is
2
3
handled by the new float_round_nearest_even_max rounding mode.
3
These will be used to implement new decimal floating point
4
(2) The 0 * n + c special case is handled by the new
4
instructions from Power ISA 3.1.
5
float_muladd_suppress_add_product_zero flag.
5
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
6
The remainder is now returned directly by divu128/divs128,
7
by examining float_flag_invalid_isi.
7
freeing up phigh to receive the high 64 bits of the quotient.
8
8
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
9
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20211025191154.350831-4-luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
13
---
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
14
include/hw/clock.h | 6 +-
13
1 file changed, 26 insertions(+), 79 deletions(-)
15
include/qemu/host-utils.h | 20 ++++--
14
16
target/ppc/int_helper.c | 9 +--
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
17
util/host-utils.c | 133 +++++++++++++++++++++++++-------------
16
index XXXXXXX..XXXXXXX 100644
18
4 files changed, 108 insertions(+), 60 deletions(-)
17
--- a/target/hexagon/op_helper.c
19
18
+++ b/target/hexagon/op_helper.c
20
diff --git a/include/hw/clock.h b/include/hw/clock.h
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
21
index XXXXXXX..XXXXXXX 100644
20
return RxV;
22
--- a/include/hw/clock.h
21
}
23
+++ b/include/hw/clock.h
22
24
@@ -XXX,XX +XXX,XX @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns)
23
-static bool is_zero_prod(float32 a, float32 b)
25
if (clk->period == 0) {
24
-{
26
return 0;
25
- return ((float32_is_zero(a) && is_finite(b)) ||
27
}
26
- (float32_is_zero(b) && is_finite(a)));
28
- /*
27
-}
29
- * BUG: when CONFIG_INT128 is not defined, the current implementation of
28
-
30
- * divu128 does not return a valid truncated quotient, so the result will
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
31
- * be wrong.
30
-{
32
- */
31
- float32 ret = dst;
33
+
32
- if (float32_is_any_nan(x)) {
34
divu128(&lo, &hi, clk->period);
33
- if (extract32(x, 22, 1) == 0) {
35
return lo;
34
- float_raise(float_flag_invalid, fp_status);
36
}
35
- }
37
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
36
- ret = make_float32(0xffffffff); /* nan */
38
index XXXXXXX..XXXXXXX 100644
37
- }
39
--- a/include/qemu/host-utils.h
38
- return ret;
40
+++ b/include/qemu/host-utils.h
39
-}
41
@@ -XXX,XX +XXX,XX @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
40
-
42
return (__int128_t)a * b / c;
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
43
}
42
float32 RsV, float32 RtV, float32 PuV)
44
43
{
45
-static inline void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
46
+static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
45
return RxV;
47
+ uint64_t divisor)
46
}
48
{
47
49
__uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
48
-static bool is_inf_prod(int32_t a, int32_t b)
50
__uint128_t result = dividend / divisor;
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
51
+
50
+ float32 RsV, float32 RtV, int negate)
52
*plow = result;
51
{
53
- *phigh = dividend % divisor;
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
54
+ *phigh = result >> 64;
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
55
+ return dividend % divisor;
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
56
}
55
+ int flags;
57
56
+
58
-static inline void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
57
+ arch_fpop_start(env);
59
+static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
58
+
60
+ int64_t divisor)
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
61
{
60
+ RxV = float32_muladd(RsV, RtV, RxV,
62
- __int128_t dividend = ((__int128_t)*phigh << 64) | (uint64_t)*plow;
61
+ negate | float_muladd_suppress_add_product_zero,
63
+ __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
62
+ &env->fp_status);
64
__int128_t result = dividend / divisor;
63
+
65
+
64
+ flags = get_float_exception_flags(&env->fp_status);
66
*plow = result;
65
+ if (flags) {
67
- *phigh = dividend % divisor;
66
+ /* Flags are suppressed by this instruction. */
68
+ *phigh = result >> 64;
67
+ set_float_exception_flags(0, &env->fp_status);
69
+ return dividend % divisor;
68
+
70
}
69
+ /* Return 0 for Inf - Inf. */
71
#else
70
+ if (flags & float_flag_invalid_isi) {
72
void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
71
+ RxV = 0;
73
void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
74
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
75
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
76
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
77
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
78
79
static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
80
{
81
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/ppc/int_helper.c
84
+++ b/target/ppc/int_helper.c
85
@@ -XXX,XX +XXX,XX @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
86
87
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
88
{
89
- int64_t rt = 0;
90
+ uint64_t rt = 0;
91
int64_t ra = (int64_t)rau;
92
int64_t rb = (int64_t)rbu;
93
int overflow = 0;
94
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
95
int cr;
96
uint64_t lo_value;
97
uint64_t hi_value;
98
+ uint64_t rem;
99
ppc_avr_t ret = { .u64 = { 0, 0 } };
100
101
if (b->VsrSD(0) < 0) {
102
@@ -XXX,XX +XXX,XX @@ uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
103
* In that case, we leave r unchanged.
104
*/
105
} else {
106
- divu128(&lo_value, &hi_value, 1000000000000000ULL);
107
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
108
109
- for (i = 1; i < 16; hi_value /= 10, i++) {
110
- bcd_put_digit(&ret, hi_value % 10, i);
111
+ for (i = 1; i < 16; rem /= 10, i++) {
112
+ bcd_put_digit(&ret, rem % 10, i);
113
}
114
115
for (; i < 32; lo_value /= 10, i++) {
116
diff --git a/util/host-utils.c b/util/host-utils.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/util/host-utils.c
119
+++ b/util/host-utils.c
120
@@ -XXX,XX +XXX,XX @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
121
}
122
123
/*
124
- * Unsigned 128-by-64 division. Returns quotient via plow and
125
- * remainder via phigh.
126
- * The result must fit in 64 bits (plow) - otherwise, the result
127
- * is undefined.
128
- * This function will cause a division by zero if passed a zero divisor.
129
+ * Unsigned 128-by-64 division.
130
+ * Returns the remainder.
131
+ * Returns quotient via plow and phigh.
132
+ * Also returns the remainder via the function return value.
133
*/
134
-void divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
135
+uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
136
{
137
uint64_t dhi = *phigh;
138
uint64_t dlo = *plow;
139
- unsigned i;
140
- uint64_t carry = 0;
141
+ uint64_t rem, dhighest;
142
+ int sh;
143
144
if (divisor == 0 || dhi == 0) {
145
*plow = dlo / divisor;
146
- *phigh = dlo % divisor;
147
+ *phigh = 0;
148
+ return dlo % divisor;
149
} else {
150
+ sh = clz64(divisor);
151
152
- for (i = 0; i < 64; i++) {
153
- carry = dhi >> 63;
154
- dhi = (dhi << 1) | (dlo >> 63);
155
- if (carry || (dhi >= divisor)) {
156
- dhi -= divisor;
157
- carry = 1;
158
- } else {
159
- carry = 0;
160
+ if (dhi < divisor) {
161
+ if (sh != 0) {
162
+ /* normalize the divisor, shifting the dividend accordingly */
163
+ divisor <<= sh;
164
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
165
+ dlo <<= sh;
166
}
167
- dlo = (dlo << 1) | carry;
168
+
169
+ *phigh = 0;
170
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
171
+ } else {
172
+ if (sh != 0) {
173
+ /* normalize the divisor, shifting the dividend accordingly */
174
+ divisor <<= sh;
175
+ dhighest = dhi >> (64 - sh);
176
+ dhi = (dhi << sh) | (dlo >> (64 - sh));
177
+ dlo <<= sh;
178
+
179
+ *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
180
+ } else {
181
+ /**
182
+ * dhi >= divisor
183
+ * Since the MSB of divisor is set (sh == 0),
184
+ * (dhi - divisor) < divisor
185
+ *
186
+ * Thus, the high part of the quotient is 1, and we can
187
+ * calculate the low part with a single call to udiv_qrnnd
188
+ * after subtracting divisor from dhi
189
+ */
190
+ dhi -= divisor;
191
+ *phigh = 1;
192
+ }
193
+
194
+ *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
195
}
196
197
- *plow = dlo;
198
- *phigh = dhi;
199
+ /*
200
+ * since the dividend/divisor might have been normalized,
201
+ * the remainder might also have to be shifted back
202
+ */
203
+ return rem >> sh;
204
}
205
}
206
207
/*
208
- * Signed 128-by-64 division. Returns quotient via plow and
209
- * remainder via phigh.
210
- * The result must fit in 64 bits (plow) - otherwise, the result
211
- * is undefined.
212
- * This function will cause a division by zero if passed a zero divisor.
213
+ * Signed 128-by-64 division.
214
+ * Returns quotient via plow and phigh.
215
+ * Also returns the remainder via the function return value.
216
*/
217
-void divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
218
+int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
219
{
220
- int sgn_dvdnd = *phigh < 0;
221
- int sgn_divsr = divisor < 0;
222
+ bool neg_quotient = false, neg_remainder = false;
223
+ uint64_t unsig_hi = *phigh, unsig_lo = *plow;
224
+ uint64_t rem;
225
226
- if (sgn_dvdnd) {
227
- *plow = ~(*plow);
228
- *phigh = ~(*phigh);
229
- if (*plow == (int64_t)-1) {
230
+ if (*phigh < 0) {
231
+ neg_quotient = !neg_quotient;
232
+ neg_remainder = !neg_remainder;
233
+
234
+ if (unsig_lo == 0) {
235
+ unsig_hi = -unsig_hi;
236
+ } else {
237
+ unsig_hi = ~unsig_hi;
238
+ unsig_lo = -unsig_lo;
72
+ }
239
+ }
73
+ }
240
+ }
74
+
241
+
75
+ arch_fpop_end(env);
242
+ if (divisor < 0) {
76
+ return RxV;
243
+ neg_quotient = !neg_quotient;
77
}
244
+
78
245
+ divisor = -divisor;
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
246
+ }
80
float32 RsV, float32 RtV)
247
+
81
{
248
+ rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
82
- bool infinp;
249
+
83
- bool infminusinf;
250
+ if (neg_quotient) {
84
- float32 tmp;
251
+ if (unsig_lo == 0) {
252
+ *phigh = -unsig_hi;
253
*plow = 0;
254
- (*phigh)++;
255
- } else {
256
- (*plow)++;
257
- }
258
+ } else {
259
+ *phigh = ~unsig_hi;
260
+ *plow = -unsig_lo;
261
+ }
262
+ } else {
263
+ *phigh = unsig_hi;
264
+ *plow = unsig_lo;
265
}
266
267
- if (sgn_divsr) {
268
- divisor = 0 - divisor;
269
- }
85
-
270
-
86
- arch_fpop_start(env);
271
- divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
111
}
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
115
{
116
- bool infinp;
117
- bool infminusinf;
118
- float32 tmp;
119
-
272
-
120
- arch_fpop_start(env);
273
- if (sgn_dvdnd ^ sgn_divsr) {
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
274
- *plow = 0 - *plow;
122
- infminusinf = float32_is_infinity(RxV) &&
275
+ if (neg_remainder) {
123
- is_inf_prod(RsV, RtV) &&
276
+ return -rem;
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
277
+ } else {
125
- infinp = float32_is_infinity(RxV) ||
278
+ return rem;
126
- float32_is_infinity(RtV) ||
279
}
127
- float32_is_infinity(RsV);
280
}
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
281
#endif
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
149
--
282
--
150
2.43.0
283
2.25.1
284
285
diff view generated by jsdifflib
1
Initialize x with accumulated via direct assignment,
1
From: Luis Pires <luis.pires@eldorado.org.br>
2
rather than multiplying by 1.
2
3
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20211025191154.350831-5-luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
target/hexagon/fma_emu.c | 2 +-
8
tests/unit/test-div128.c | 197 +++++++++++++++++++++++++++++++++++++++
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
tests/unit/meson.build | 1 +
9
10
2 files changed, 198 insertions(+)
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
create mode 100644 tests/unit/test-div128.c
12
13
diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
17
+++ b/tests/unit/test-div128.c
18
@@ -XXX,XX +XXX,XX @@
19
+/*
20
+ * Test 128-bit division functions
21
+ *
22
+ * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
23
+ *
24
+ * This library is free software; you can redistribute it and/or
25
+ * modify it under the terms of the GNU Lesser General Public
26
+ * License as published by the Free Software Foundation; either
27
+ * version 2.1 of the License, or (at your option) any later version.
28
+ *
29
+ * This library is distributed in the hope that it will be useful,
30
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
31
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
32
+ * Lesser General Public License for more details.
33
+ *
34
+ * You should have received a copy of the GNU Lesser General Public
35
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36
+ */
37
+
38
+#include "qemu/osdep.h"
39
+#include "qemu/host-utils.h"
40
+
41
+typedef struct {
42
+ uint64_t high;
43
+ uint64_t low;
44
+ uint64_t rhigh;
45
+ uint64_t rlow;
46
+ uint64_t divisor;
47
+ uint64_t remainder;
48
+} test_data_unsigned;
49
+
50
+typedef struct {
51
+ int64_t high;
52
+ uint64_t low;
53
+ int64_t rhigh;
54
+ uint64_t rlow;
55
+ int64_t divisor;
56
+ int64_t remainder;
57
+} test_data_signed;
58
+
59
+static const test_data_unsigned test_table_unsigned[] = {
60
+ /* Dividend fits in 64 bits */
61
+ { 0x0000000000000000ULL, 0x0000000000000000ULL,
62
+ 0x0000000000000000ULL, 0x0000000000000000ULL,
63
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
64
+ { 0x0000000000000000ULL, 0x0000000000000001ULL,
65
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
66
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
67
+ { 0x0000000000000000ULL, 0x0000000000000003ULL,
68
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
69
+ 0x0000000000000002ULL, 0x0000000000000001ULL},
70
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
71
+ 0x0000000000000000ULL, 0x8000000000000000ULL,
72
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
73
+ { 0x0000000000000000ULL, 0xa000000000000000ULL,
74
+ 0x0000000000000000ULL, 0x0000000000000002ULL,
75
+ 0x4000000000000000ULL, 0x2000000000000000ULL},
76
+ { 0x0000000000000000ULL, 0x8000000000000000ULL,
77
+ 0x0000000000000000ULL, 0x0000000000000001ULL,
78
+ 0x8000000000000000ULL, 0x0000000000000000ULL},
79
+
80
+ /* Dividend > 64 bits, with MSB 0 */
81
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
82
+ 0x123456789abcdefeULL, 0xefedcba987654321ULL,
83
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
84
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
85
+ 0x0000000000000001ULL, 0x000000000000000dULL,
86
+ 0x123456789abcdefeULL, 0x03456789abcdf03bULL},
87
+ { 0x123456789abcdefeULL, 0xefedcba987654321ULL,
88
+ 0x0123456789abcdefULL, 0xeefedcba98765432ULL,
89
+ 0x0000000000000010ULL, 0x0000000000000001ULL},
90
+
91
+ /* Dividend > 64 bits, with MSB 1 */
92
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
93
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
94
+ 0x0000000000000001ULL, 0x0000000000000000ULL},
95
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
96
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
97
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
98
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
99
+ 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL,
100
+ 0x0000000000000010ULL, 0x000000000000000fULL},
101
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
102
+ 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL,
103
+ 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL},
104
+
105
+ /**
106
+ * Divisor == 64 bits, with MSB 1
107
+ * and high 64 bits of dividend >= divisor
108
+ * (for testing normalization)
109
+ */
110
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
111
+ 0x0000000000000001ULL, 0x0000000000000000ULL,
112
+ 0xfeeddccbbaa99887ULL, 0x766554433221100fULL},
113
+ { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL,
114
+ 0x0000000000000001ULL, 0xfddbb9977553310aULL,
115
+ 0x8000000000000001ULL, 0x78899aabbccddf05ULL},
116
+
117
+ /* Dividend > 64 bits, divisor almost as big */
118
+ { 0x0000000000000001ULL, 0x23456789abcdef01ULL,
119
+ 0x0000000000000000ULL, 0x000000000000000fULL,
120
+ 0x123456789abcdefeULL, 0x123456789abcde1fULL},
121
+};
122
+
123
+static const test_data_signed test_table_signed[] = {
124
+ /* Positive dividend, positive/negative divisors */
125
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
126
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
127
+ 0x0000000000000001LL, 0x0000000000000000LL},
128
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
129
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
130
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
131
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
132
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
133
+ 0x0000000000000002LL, 0x0000000000000000LL},
134
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
135
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
136
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
137
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
138
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
139
+ 0x0000000000000008LL, 0x0000000000000006LL},
140
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
141
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
142
+ 0xfffffffffffffff8LL, 0x0000000000000006LL},
143
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
144
+ 0x0000000000000000LL, 0x000000000000550dULL,
145
+ 0x0000000000000237LL, 0x0000000000000183LL},
146
+ { 0x0000000000000000LL, 0x0000000000bc614eULL,
147
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
148
+ 0xfffffffffffffdc9LL, 0x0000000000000183LL},
149
+
150
+ /* Negative dividend, positive/negative divisors */
151
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
152
+ 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
153
+ 0x0000000000000001LL, 0x0000000000000000LL},
154
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
155
+ 0x0000000000000000LL, 0x0000000000bc614eULL,
156
+ 0xffffffffffffffffLL, 0x0000000000000000LL},
157
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
158
+ 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL,
159
+ 0x0000000000000002LL, 0x0000000000000000LL},
160
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
161
+ 0x0000000000000000LL, 0x00000000005e30a7ULL,
162
+ 0xfffffffffffffffeLL, 0x0000000000000000LL},
163
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
164
+ 0xffffffffffffffffLL, 0xffffffffffe873d7ULL,
165
+ 0x0000000000000008LL, 0xfffffffffffffffaLL},
166
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
167
+ 0x0000000000000000LL, 0x0000000000178c29ULL,
168
+ 0xfffffffffffffff8LL, 0xfffffffffffffffaLL},
169
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
170
+ 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL,
171
+ 0x0000000000000237LL, 0xfffffffffffffe7dLL},
172
+ { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL,
173
+ 0x0000000000000000LL, 0x000000000000550dULL,
174
+ 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL},
175
+};
176
+
177
+static void test_divu128(void)
178
+{
179
+ int i;
180
+ uint64_t rem;
181
+ test_data_unsigned tmp;
182
+
183
+ for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) {
184
+ tmp = test_table_unsigned[i];
185
+
186
+ rem = divu128(&tmp.low, &tmp.high, tmp.divisor);
187
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
188
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
189
+ g_assert_cmpuint(rem, ==, tmp.remainder);
190
+ }
191
+}
192
+
193
+static void test_divs128(void)
194
+{
195
+ int i;
196
+ int64_t rem;
197
+ test_data_signed tmp;
198
+
199
+ for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) {
200
+ tmp = test_table_signed[i];
201
+
202
+ rem = divs128(&tmp.low, &tmp.high, tmp.divisor);
203
+ g_assert_cmpuint(tmp.low, ==, tmp.rlow);
204
+ g_assert_cmpuint(tmp.high, ==, tmp.rhigh);
205
+ g_assert_cmpuint(rem, ==, tmp.remainder);
206
+ }
207
+}
208
+
209
+int main(int argc, char **argv)
210
+{
211
+ g_test_init(&argc, &argv, NULL);
212
+ g_test_add_func("/host-utils/test_divu128", test_divu128);
213
+ g_test_add_func("/host-utils/test_divs128", test_divs128);
214
+ return g_test_run();
215
+}
216
diff --git a/tests/unit/meson.build b/tests/unit/meson.build
11
index XXXXXXX..XXXXXXX 100644
217
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
218
--- a/tests/unit/meson.build
13
+++ b/target/hexagon/fma_emu.c
219
+++ b/tests/unit/meson.build
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
220
@@ -XXX,XX +XXX,XX @@ tests = {
15
float64_is_infinity(b)) {
221
# all code tested by test-x86-cpuid is inside topology.h
16
return float64_mul(a, b, fp_status);
222
'test-x86-cpuid': [],
17
}
223
'test-cutils': [],
18
- x.mant = int128_mul_6464(accumulated, 1);
224
+ 'test-div128': [],
19
+ x.mant = int128_make64(accumulated);
225
'test-shift128': [],
20
x.sticky = sticky;
226
'test-mul64': [],
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
227
# all code tested by test-int128 is inside int128.h
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
23
--
228
--
24
2.43.0
229
2.25.1
230
231
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Prepare for tracking different masks by renaming this one.
2
When we fold to and, use fold_and.
3
2
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
tcg/optimize.c | 142 +++++++++++++++++++++++++------------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
1 file changed, 72 insertions(+), 70 deletions(-)
9
10
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
15
16
TCGTemp *prev_copy;
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
TCGTemp *next_copy;
17
{
18
uint64_t val;
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
- uint64_t mask;
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
20
+ int ofs = op->args[3];
21
} TempOptInfo;
21
+ int len = op->args[4];
22
22
TCGOpcode and_opc;
23
static inline TempOptInfo *ts_info(TCGTemp *ts)
24
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
25
ti->next_copy = ts;
26
ti->prev_copy = ts;
27
ti->is_const = false;
28
- ti->mask = -1;
29
+ ti->z_mask = -1;
30
}
31
32
static void reset_temp(TCGArg arg)
33
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
34
if (ts->kind == TEMP_CONST) {
35
ti->is_const = true;
36
ti->val = ts->val;
37
- ti->mask = ts->val;
38
+ ti->z_mask = ts->val;
39
if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
40
/* High bits of a 32-bit quantity are garbage. */
41
- ti->mask |= ~0xffffffffull;
42
+ ti->z_mask |= ~0xffffffffull;
43
}
44
} else {
45
ti->is_const = false;
46
- ti->mask = -1;
47
+ ti->z_mask = -1;
48
}
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
52
const TCGOpDef *def;
53
TempOptInfo *di;
54
TempOptInfo *si;
55
- uint64_t mask;
23
+ uint64_t z_mask;
56
+ uint64_t z_mask;
24
57
TCGOpcode new_op;
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
58
26
- uint64_t t1 = arg_info(op->args[1])->val;
59
if (ts_are_copies(dst_ts, src_ts)) {
27
- uint64_t t2 = arg_info(op->args[2])->val;
60
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
-
61
op->args[0] = dst;
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
62
op->args[1] = src;
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
63
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
64
- mask = si->mask;
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
65
+ z_mask = si->z_mask;
33
+ deposit64(ti_const_val(t1), ofs, len,
66
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
34
+ ti_const_val(t2)));
67
/* High bits of the destination are now garbage. */
68
- mask |= ~0xffffffffull;
69
+ z_mask |= ~0xffffffffull;
35
}
70
}
36
71
- di->mask = mask;
37
switch (ctx->type) {
72
+ di->z_mask = z_mask;
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
73
74
if (src_ts->type == dst_ts->type) {
75
TempOptInfo *ni = ts_info(si->next_copy);
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
}
77
}
40
78
41
/* Inserting a value into zero at offset 0. */
79
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
80
- uint64_t mask, partmask, affected, tmp;
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
81
+ uint64_t z_mask, partmask, affected, tmp;
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
82
int nb_oargs, nb_iargs;
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
83
TCGOpcode opc = op->opc;
46
84
const TCGOpDef *def = &tcg_op_defs[opc];
47
op->opc = and_opc;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
op->args[1] = op->args[2];
86
49
op->args[2] = arg_new_constant(ctx, mask);
87
/* Simplify using known-zero bits. Currently only ops with a single
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
88
output argument is supported. */
51
- return false;
89
- mask = -1;
52
+ return fold_and(ctx, op);
90
+ z_mask = -1;
53
}
91
affected = -1;
54
92
switch (opc) {
55
/* Inserting zero into a value. */
93
CASE_OP_32_64(ext8s):
56
- if (arg_is_const_val(op->args[2], 0)) {
94
- if ((arg_info(op->args[1])->mask & 0x80) != 0) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
95
+ if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
58
+ if (ti_is_const_val(t2, 0)) {
96
break;
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
97
}
60
98
QEMU_FALLTHROUGH;
61
op->opc = and_opc;
99
CASE_OP_32_64(ext8u):
62
op->args[2] = arg_new_constant(ctx, mask);
100
- mask = 0xff;
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
101
+ z_mask = 0xff;
64
- return false;
102
goto and_const;
65
+ return fold_and(ctx, op);
103
CASE_OP_32_64(ext16s):
66
}
104
- if ((arg_info(op->args[1])->mask & 0x8000) != 0) {
67
105
+ if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
106
break;
69
- op->args[3], op->args[4],
107
}
70
- arg_info(op->args[2])->z_mask);
108
QEMU_FALLTHROUGH;
71
- return false;
109
CASE_OP_32_64(ext16u):
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
110
- mask = 0xffff;
73
+ return fold_masks_z(ctx, op, z_mask);
111
+ z_mask = 0xffff;
74
}
112
goto and_const;
75
113
case INDEX_op_ext32s_i64:
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
114
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
115
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
116
break;
117
}
118
QEMU_FALLTHROUGH;
119
case INDEX_op_ext32u_i64:
120
- mask = 0xffffffffU;
121
+ z_mask = 0xffffffffU;
122
goto and_const;
123
124
CASE_OP_32_64(and):
125
- mask = arg_info(op->args[2])->mask;
126
+ z_mask = arg_info(op->args[2])->z_mask;
127
if (arg_is_const(op->args[2])) {
128
and_const:
129
- affected = arg_info(op->args[1])->mask & ~mask;
130
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
131
}
132
- mask = arg_info(op->args[1])->mask & mask;
133
+ z_mask = arg_info(op->args[1])->z_mask & z_mask;
134
break;
135
136
case INDEX_op_ext_i32_i64:
137
- if ((arg_info(op->args[1])->mask & 0x80000000) != 0) {
138
+ if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
139
break;
140
}
141
QEMU_FALLTHROUGH;
142
case INDEX_op_extu_i32_i64:
143
/* We do not compute affected as it is a size changing op. */
144
- mask = (uint32_t)arg_info(op->args[1])->mask;
145
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
146
break;
147
148
CASE_OP_32_64(andc):
149
/* Known-zeros does not imply known-ones. Therefore unless
150
op->args[2] is constant, we can't infer anything from it. */
151
if (arg_is_const(op->args[2])) {
152
- mask = ~arg_info(op->args[2])->mask;
153
+ z_mask = ~arg_info(op->args[2])->z_mask;
154
goto and_const;
155
}
156
/* But we certainly know nothing outside args[1] may be set. */
157
- mask = arg_info(op->args[1])->mask;
158
+ z_mask = arg_info(op->args[1])->z_mask;
159
break;
160
161
case INDEX_op_sar_i32:
162
if (arg_is_const(op->args[2])) {
163
tmp = arg_info(op->args[2])->val & 31;
164
- mask = (int32_t)arg_info(op->args[1])->mask >> tmp;
165
+ z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
166
}
167
break;
168
case INDEX_op_sar_i64:
169
if (arg_is_const(op->args[2])) {
170
tmp = arg_info(op->args[2])->val & 63;
171
- mask = (int64_t)arg_info(op->args[1])->mask >> tmp;
172
+ z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
173
}
174
break;
175
176
case INDEX_op_shr_i32:
177
if (arg_is_const(op->args[2])) {
178
tmp = arg_info(op->args[2])->val & 31;
179
- mask = (uint32_t)arg_info(op->args[1])->mask >> tmp;
180
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
181
}
182
break;
183
case INDEX_op_shr_i64:
184
if (arg_is_const(op->args[2])) {
185
tmp = arg_info(op->args[2])->val & 63;
186
- mask = (uint64_t)arg_info(op->args[1])->mask >> tmp;
187
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
188
}
189
break;
190
191
case INDEX_op_extrl_i64_i32:
192
- mask = (uint32_t)arg_info(op->args[1])->mask;
193
+ z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
194
break;
195
case INDEX_op_extrh_i64_i32:
196
- mask = (uint64_t)arg_info(op->args[1])->mask >> 32;
197
+ z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
198
break;
199
200
CASE_OP_32_64(shl):
201
if (arg_is_const(op->args[2])) {
202
tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
203
- mask = arg_info(op->args[1])->mask << tmp;
204
+ z_mask = arg_info(op->args[1])->z_mask << tmp;
205
}
206
break;
207
208
CASE_OP_32_64(neg):
209
/* Set to 1 all bits to the left of the rightmost. */
210
- mask = -(arg_info(op->args[1])->mask
211
- & -arg_info(op->args[1])->mask);
212
+ z_mask = -(arg_info(op->args[1])->z_mask
213
+ & -arg_info(op->args[1])->z_mask);
214
break;
215
216
CASE_OP_32_64(deposit):
217
- mask = deposit64(arg_info(op->args[1])->mask,
218
- op->args[3], op->args[4],
219
- arg_info(op->args[2])->mask);
220
+ z_mask = deposit64(arg_info(op->args[1])->z_mask,
221
+ op->args[3], op->args[4],
222
+ arg_info(op->args[2])->z_mask);
223
break;
224
225
CASE_OP_32_64(extract):
226
- mask = extract64(arg_info(op->args[1])->mask,
227
- op->args[2], op->args[3]);
228
+ z_mask = extract64(arg_info(op->args[1])->z_mask,
229
+ op->args[2], op->args[3]);
230
if (op->args[2] == 0) {
231
- affected = arg_info(op->args[1])->mask & ~mask;
232
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
233
}
234
break;
235
CASE_OP_32_64(sextract):
236
- mask = sextract64(arg_info(op->args[1])->mask,
237
- op->args[2], op->args[3]);
238
- if (op->args[2] == 0 && (tcg_target_long)mask >= 0) {
239
- affected = arg_info(op->args[1])->mask & ~mask;
240
+ z_mask = sextract64(arg_info(op->args[1])->z_mask,
241
+ op->args[2], op->args[3]);
242
+ if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
243
+ affected = arg_info(op->args[1])->z_mask & ~z_mask;
244
}
245
break;
246
247
CASE_OP_32_64(or):
248
CASE_OP_32_64(xor):
249
- mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask;
250
+ z_mask = arg_info(op->args[1])->z_mask
251
+ | arg_info(op->args[2])->z_mask;
252
break;
253
254
case INDEX_op_clz_i32:
255
case INDEX_op_ctz_i32:
256
- mask = arg_info(op->args[2])->mask | 31;
257
+ z_mask = arg_info(op->args[2])->z_mask | 31;
258
break;
259
260
case INDEX_op_clz_i64:
261
case INDEX_op_ctz_i64:
262
- mask = arg_info(op->args[2])->mask | 63;
263
+ z_mask = arg_info(op->args[2])->z_mask | 63;
264
break;
265
266
case INDEX_op_ctpop_i32:
267
- mask = 32 | 31;
268
+ z_mask = 32 | 31;
269
break;
270
case INDEX_op_ctpop_i64:
271
- mask = 64 | 63;
272
+ z_mask = 64 | 63;
273
break;
274
275
CASE_OP_32_64(setcond):
276
case INDEX_op_setcond2_i32:
277
- mask = 1;
278
+ z_mask = 1;
279
break;
280
281
CASE_OP_32_64(movcond):
282
- mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask;
283
+ z_mask = arg_info(op->args[3])->z_mask
284
+ | arg_info(op->args[4])->z_mask;
285
break;
286
287
CASE_OP_32_64(ld8u):
288
- mask = 0xff;
289
+ z_mask = 0xff;
290
break;
291
CASE_OP_32_64(ld16u):
292
- mask = 0xffff;
293
+ z_mask = 0xffff;
294
break;
295
case INDEX_op_ld32u_i64:
296
- mask = 0xffffffffu;
297
+ z_mask = 0xffffffffu;
298
break;
299
300
CASE_OP_32_64(qemu_ld):
301
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
302
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
303
MemOp mop = get_memop(oi);
304
if (!(mop & MO_SIGN)) {
305
- mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
306
+ z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
307
}
308
}
309
break;
310
311
CASE_OP_32_64(bswap16):
312
- mask = arg_info(op->args[1])->mask;
313
- if (mask <= 0xffff) {
314
+ z_mask = arg_info(op->args[1])->z_mask;
315
+ if (z_mask <= 0xffff) {
316
op->args[2] |= TCG_BSWAP_IZ;
317
}
318
- mask = bswap16(mask);
319
+ z_mask = bswap16(z_mask);
320
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
321
case TCG_BSWAP_OZ:
322
break;
323
case TCG_BSWAP_OS:
324
- mask = (int16_t)mask;
325
+ z_mask = (int16_t)z_mask;
326
break;
327
default: /* undefined high bits */
328
- mask |= MAKE_64BIT_MASK(16, 48);
329
+ z_mask |= MAKE_64BIT_MASK(16, 48);
330
break;
331
}
332
break;
333
334
case INDEX_op_bswap32_i64:
335
- mask = arg_info(op->args[1])->mask;
336
- if (mask <= 0xffffffffu) {
337
+ z_mask = arg_info(op->args[1])->z_mask;
338
+ if (z_mask <= 0xffffffffu) {
339
op->args[2] |= TCG_BSWAP_IZ;
340
}
341
- mask = bswap32(mask);
342
+ z_mask = bswap32(z_mask);
343
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
344
case TCG_BSWAP_OZ:
345
break;
346
case TCG_BSWAP_OS:
347
- mask = (int32_t)mask;
348
+ z_mask = (int32_t)z_mask;
349
break;
350
default: /* undefined high bits */
351
- mask |= MAKE_64BIT_MASK(32, 32);
352
+ z_mask |= MAKE_64BIT_MASK(32, 32);
353
break;
354
}
355
break;
356
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
357
/* 32-bit ops generate 32-bit results. For the result is zero test
358
below, we can ignore high bits, but for further optimizations we
359
need to record that the high bits contain garbage. */
360
- partmask = mask;
361
+ partmask = z_mask;
362
if (!(def->flags & TCG_OPF_64BIT)) {
363
- mask |= ~(tcg_target_ulong)0xffffffffu;
364
+ z_mask |= ~(tcg_target_ulong)0xffffffffu;
365
partmask &= 0xffffffffu;
366
affected &= 0xffffffffu;
367
}
368
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
369
vs the high word of the input. */
370
do_setcond_high:
371
reset_temp(op->args[0]);
372
- arg_info(op->args[0])->mask = 1;
373
+ arg_info(op->args[0])->z_mask = 1;
374
op->opc = INDEX_op_setcond_i32;
375
op->args[1] = op->args[2];
376
op->args[2] = op->args[4];
377
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
378
}
379
do_setcond_low:
380
reset_temp(op->args[0]);
381
- arg_info(op->args[0])->mask = 1;
382
+ arg_info(op->args[0])->z_mask = 1;
383
op->opc = INDEX_op_setcond_i32;
384
op->args[2] = op->args[3];
385
op->args[3] = op->args[5];
386
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
387
/* Default case: we know nothing about operation (or were unable
388
to compute the operation result) so no propagation is done.
389
We trash everything if the operation is the end of a basic
390
- block, otherwise we only trash the output args. "mask" is
391
+ block, otherwise we only trash the output args. "z_mask" is
392
the non-zero bits mask for the first output arg. */
393
if (def->flags & TCG_OPF_BB_END) {
394
memset(&temps_used, 0, sizeof(temps_used));
395
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
396
/* Save the corresponding known-zero bits mask for the
397
first output argument (only one supported so far). */
398
if (i == 0) {
399
- arg_info(op->args[i])->mask = mask;
400
+ arg_info(op->args[i])->z_mask = z_mask;
401
}
402
}
403
}
77
--
404
--
78
2.43.0
405
2.25.1
406
407
diff view generated by jsdifflib
1
All instances of s_mask have been converted to the new
1
Provide what will become a larger context for splitting
2
representation. We can now re-enable usage.
2
the very large tcg_optimize function.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/optimize.c | 4 ++--
9
tcg/optimize.c | 77 ++++++++++++++++++++++++++------------------------
8
1 file changed, 2 insertions(+), 2 deletions(-)
10
1 file changed, 40 insertions(+), 37 deletions(-)
9
11
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
15
g_assert_not_reached();
17
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
18
} TempOptInfo;
19
20
+typedef struct OptContext {
21
+ TCGTempSet temps_used;
22
+} OptContext;
23
+
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
{
26
return ts->state_ptr;
27
@@ -XXX,XX +XXX,XX @@ static void reset_temp(TCGArg arg)
28
}
29
30
/* Initialize and activate a temporary. */
31
-static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
32
+static void init_ts_info(OptContext *ctx, TCGTemp *ts)
33
{
34
size_t idx = temp_idx(ts);
35
TempOptInfo *ti;
36
37
- if (test_bit(idx, temps_used->l)) {
38
+ if (test_bit(idx, ctx->temps_used.l)) {
39
return;
16
}
40
}
17
41
- set_bit(idx, temps_used->l);
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
42
+ set_bit(idx, ctx->temps_used.l);
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
43
20
return true;
44
ti = ts->state_ptr;
45
if (ti == NULL) {
46
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts)
21
}
47
}
22
48
}
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
49
24
s_mask = s_mask_old >> pos;
50
-static void init_arg_info(TCGTempSet *temps_used, TCGArg arg)
25
s_mask |= -1ull << (len - 1);
51
+static void init_arg_info(OptContext *ctx, TCGArg arg)
26
52
{
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
53
- init_ts_info(temps_used, arg_temp(arg));
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
54
+ init_ts_info(ctx, arg_temp(arg));
29
return true;
55
}
56
57
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
58
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
30
}
59
}
31
60
}
61
62
-static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
63
+static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
64
TCGOp *op, TCGArg dst, uint64_t val)
65
{
66
const TCGOpDef *def = &tcg_op_defs[op->opc];
67
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used,
68
69
/* Convert movi to mov with constant temp. */
70
tv = tcg_constant_internal(type, val);
71
- init_ts_info(temps_used, tv);
72
+ init_ts_info(ctx, tv);
73
tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
77
{
78
int nb_temps, nb_globals, i;
79
TCGOp *op, *op_next, *prev_mb = NULL;
80
- TCGTempSet temps_used;
81
+ OptContext ctx = {};
82
83
/* Array VALS has an element for each temp.
84
If this temp holds a constant then its value is kept in VALS' element.
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
nb_temps = s->nb_temps;
87
nb_globals = s->nb_globals;
88
89
- memset(&temps_used, 0, sizeof(temps_used));
90
for (i = 0; i < nb_temps; ++i) {
91
s->temps[i].state_ptr = NULL;
92
}
93
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
94
for (i = 0; i < nb_oargs + nb_iargs; i++) {
95
TCGTemp *ts = arg_temp(op->args[i]);
96
if (ts) {
97
- init_ts_info(&temps_used, ts);
98
+ init_ts_info(&ctx, ts);
99
}
100
}
101
} else {
102
nb_oargs = def->nb_oargs;
103
nb_iargs = def->nb_iargs;
104
for (i = 0; i < nb_oargs + nb_iargs; i++) {
105
- init_arg_info(&temps_used, op->args[i]);
106
+ init_arg_info(&ctx, op->args[i]);
107
}
108
}
109
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64(rotr):
112
if (arg_is_const(op->args[1])
113
&& arg_info(op->args[1])->val == 0) {
114
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
115
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
116
continue;
117
}
118
break;
119
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
120
121
if (partmask == 0) {
122
tcg_debug_assert(nb_oargs == 1);
123
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
124
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
125
continue;
126
}
127
if (affected == 0) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
129
CASE_OP_32_64(mulsh):
130
if (arg_is_const(op->args[2])
131
&& arg_info(op->args[2])->val == 0) {
132
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
133
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
134
continue;
135
}
136
break;
137
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
138
CASE_OP_32_64_VEC(sub):
139
CASE_OP_32_64_VEC(xor):
140
if (args_are_copies(op->args[1], op->args[2])) {
141
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0);
142
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
143
continue;
144
}
145
break;
146
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
147
if (arg_is_const(op->args[1])) {
148
tmp = arg_info(op->args[1])->val;
149
tmp = dup_const(TCGOP_VECE(op), tmp);
150
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
151
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
break;
153
}
154
goto do_default;
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_dup2_vec:
157
assert(TCG_TARGET_REG_BITS == 32);
158
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
159
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
160
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
deposit64(arg_info(op->args[1])->val, 32, 32,
162
arg_info(op->args[2])->val));
163
break;
164
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
165
case INDEX_op_extrh_i64_i32:
166
if (arg_is_const(op->args[1])) {
167
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
168
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
169
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
break;
171
}
172
goto do_default;
173
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
174
if (arg_is_const(op->args[1])) {
175
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
176
op->args[2]);
177
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
178
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
break;
180
}
181
goto do_default;
182
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
183
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
184
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
185
arg_info(op->args[2])->val);
186
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
187
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
break;
189
}
190
goto do_default;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
TCGArg v = arg_info(op->args[1])->val;
193
if (v != 0) {
194
tmp = do_constant_folding(opc, v, 0);
195
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
196
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
} else {
198
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
199
}
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
tmp = deposit64(arg_info(op->args[1])->val,
202
op->args[3], op->args[4],
203
arg_info(op->args[2])->val);
204
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
205
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
206
break;
207
}
208
goto do_default;
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
if (arg_is_const(op->args[1])) {
211
tmp = extract64(arg_info(op->args[1])->val,
212
op->args[2], op->args[3]);
213
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
214
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
215
break;
216
}
217
goto do_default;
218
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
219
if (arg_is_const(op->args[1])) {
220
tmp = sextract64(arg_info(op->args[1])->val,
221
op->args[2], op->args[3]);
222
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
223
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
224
break;
225
}
226
goto do_default;
227
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
228
tmp = (int32_t)(((uint32_t)v1 >> shr) |
229
((uint32_t)v2 << (32 - shr)));
230
}
231
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
232
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
233
break;
234
}
235
goto do_default;
236
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
237
tmp = do_constant_folding_cond(opc, op->args[1],
238
op->args[2], op->args[3]);
239
if (tmp != 2) {
240
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
241
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
242
break;
243
}
244
goto do_default;
245
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
246
op->args[1], op->args[2]);
247
if (tmp != 2) {
248
if (tmp) {
249
- memset(&temps_used, 0, sizeof(temps_used));
250
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
251
op->opc = INDEX_op_br;
252
op->args[0] = op->args[3];
253
} else {
254
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
255
256
rl = op->args[0];
257
rh = op->args[1];
258
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a);
259
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32));
260
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
261
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
262
break;
263
}
264
goto do_default;
265
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
266
267
rl = op->args[0];
268
rh = op->args[1];
269
- tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r);
270
- tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32));
271
+ tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
272
+ tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
273
break;
274
}
275
goto do_default;
276
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
277
if (tmp != 2) {
278
if (tmp) {
279
do_brcond_true:
280
- memset(&temps_used, 0, sizeof(temps_used));
281
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
282
op->opc = INDEX_op_br;
283
op->args[0] = op->args[5];
284
} else {
285
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
286
/* Simplify LT/GE comparisons vs zero to a single compare
287
vs the high word of the input. */
288
do_brcond_high:
289
- memset(&temps_used, 0, sizeof(temps_used));
290
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
op->opc = INDEX_op_brcond_i32;
292
op->args[0] = op->args[1];
293
op->args[1] = op->args[3];
294
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
295
goto do_default;
296
}
297
do_brcond_low:
298
- memset(&temps_used, 0, sizeof(temps_used));
299
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
300
op->opc = INDEX_op_brcond_i32;
301
op->args[1] = op->args[2];
302
op->args[2] = op->args[4];
303
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
304
op->args[5]);
305
if (tmp != 2) {
306
do_setcond_const:
307
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
308
+ tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
309
} else if ((op->args[5] == TCG_COND_LT
310
|| op->args[5] == TCG_COND_GE)
311
&& arg_is_const(op->args[3])
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (!(tcg_call_flags(op)
314
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
315
for (i = 0; i < nb_globals; i++) {
316
- if (test_bit(i, temps_used.l)) {
317
+ if (test_bit(i, ctx.temps_used.l)) {
318
reset_ts(&s->temps[i]);
319
}
320
}
321
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
322
block, otherwise we only trash the output args. "z_mask" is
323
the non-zero bits mask for the first output arg. */
324
if (def->flags & TCG_OPF_BB_END) {
325
- memset(&temps_used, 0, sizeof(temps_used));
326
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
327
} else {
328
do_reset_output:
329
for (i = 0; i < nb_oargs; i++) {
32
--
330
--
33
2.43.0
331
2.25.1
332
333
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Break the final cleanup clause out of the main switch
2
statement. When fully folding an opcode to mov/movi,
3
use "continue" to process the next opcode, else break
4
to fall into the final cleanup.
2
5
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
10
---
6
tcg/optimize.c | 3 +--
11
tcg/optimize.c | 190 ++++++++++++++++++++++++-------------------------
7
1 file changed, 1 insertion(+), 2 deletions(-)
12
1 file changed, 94 insertions(+), 96 deletions(-)
8
13
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
16
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
17
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
14
return fold_setcond(ctx, op);
19
switch (opc) {
15
}
20
CASE_OP_32_64_VEC(mov):
16
21
tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
17
- ctx->z_mask = 1;
22
- break;
18
- return false;
23
+ continue;
19
+ return fold_masks_z(ctx, op, 1);
24
20
25
case INDEX_op_dup_vec:
21
do_setcond_const:
26
if (arg_is_const(op->args[1])) {
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
27
tmp = arg_info(op->args[1])->val;
28
tmp = dup_const(TCGOP_VECE(op), tmp);
29
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
30
- break;
31
+ continue;
32
}
33
- goto do_default;
34
+ break;
35
36
case INDEX_op_dup2_vec:
37
assert(TCG_TARGET_REG_BITS == 32);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
tcg_opt_gen_movi(s, &ctx, op, op->args[0],
40
deposit64(arg_info(op->args[1])->val, 32, 32,
41
arg_info(op->args[2])->val));
42
- break;
43
+ continue;
44
} else if (args_are_copies(op->args[1], op->args[2])) {
45
op->opc = INDEX_op_dup_vec;
46
TCGOP_VECE(op) = MO_32;
47
nb_iargs = 1;
48
}
49
- goto do_default;
50
+ break;
51
52
CASE_OP_32_64(not):
53
CASE_OP_32_64(neg):
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
if (arg_is_const(op->args[1])) {
56
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
57
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
58
- break;
59
+ continue;
60
}
61
- goto do_default;
62
+ break;
63
64
CASE_OP_32_64(bswap16):
65
CASE_OP_32_64(bswap32):
66
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
67
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
68
op->args[2]);
69
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
70
- break;
71
+ continue;
72
}
73
- goto do_default;
74
+ break;
75
76
CASE_OP_32_64(add):
77
CASE_OP_32_64(sub):
78
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
79
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
80
arg_info(op->args[2])->val);
81
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
82
- break;
83
+ continue;
84
}
85
- goto do_default;
86
+ break;
87
88
CASE_OP_32_64(clz):
89
CASE_OP_32_64(ctz):
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
91
} else {
92
tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
93
}
94
- break;
95
+ continue;
96
}
97
- goto do_default;
98
+ break;
99
100
CASE_OP_32_64(deposit):
101
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
102
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
103
op->args[3], op->args[4],
104
arg_info(op->args[2])->val);
105
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
106
- break;
107
+ continue;
108
}
109
- goto do_default;
110
+ break;
111
112
CASE_OP_32_64(extract):
113
if (arg_is_const(op->args[1])) {
114
tmp = extract64(arg_info(op->args[1])->val,
115
op->args[2], op->args[3]);
116
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
117
- break;
118
+ continue;
119
}
120
- goto do_default;
121
+ break;
122
123
CASE_OP_32_64(sextract):
124
if (arg_is_const(op->args[1])) {
125
tmp = sextract64(arg_info(op->args[1])->val,
126
op->args[2], op->args[3]);
127
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
128
- break;
129
+ continue;
130
}
131
- goto do_default;
132
+ break;
133
134
CASE_OP_32_64(extract2):
135
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
136
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
137
((uint32_t)v2 << (32 - shr)));
138
}
139
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
140
- break;
141
+ continue;
142
}
143
- goto do_default;
144
+ break;
145
146
CASE_OP_32_64(setcond):
147
tmp = do_constant_folding_cond(opc, op->args[1],
148
op->args[2], op->args[3]);
149
if (tmp != 2) {
150
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
151
- break;
152
+ continue;
153
}
154
- goto do_default;
155
+ break;
156
157
CASE_OP_32_64(brcond):
158
tmp = do_constant_folding_cond(opc, op->args[0],
159
op->args[1], op->args[2]);
160
- if (tmp != 2) {
161
- if (tmp) {
162
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
163
- op->opc = INDEX_op_br;
164
- op->args[0] = op->args[3];
165
- } else {
166
- tcg_op_remove(s, op);
167
- }
168
+ switch (tmp) {
169
+ case 0:
170
+ tcg_op_remove(s, op);
171
+ continue;
172
+ case 1:
173
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
174
+ op->opc = opc = INDEX_op_br;
175
+ op->args[0] = op->args[3];
176
break;
177
}
178
- goto do_default;
179
+ break;
180
181
CASE_OP_32_64(movcond):
182
tmp = do_constant_folding_cond(opc, op->args[1],
183
op->args[2], op->args[5]);
184
if (tmp != 2) {
185
tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
186
- break;
187
+ continue;
188
}
189
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
190
uint64_t tv = arg_info(op->args[3])->val;
191
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
192
if (fv == 1 && tv == 0) {
193
cond = tcg_invert_cond(cond);
194
} else if (!(tv == 1 && fv == 0)) {
195
- goto do_default;
196
+ break;
197
}
198
op->args[3] = cond;
199
op->opc = opc = (opc == INDEX_op_movcond_i32
200
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
201
: INDEX_op_setcond_i64);
202
nb_iargs = 2;
203
}
204
- goto do_default;
205
+ break;
206
207
case INDEX_op_add2_i32:
208
case INDEX_op_sub2_i32:
209
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
210
rh = op->args[1];
211
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
212
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
213
- break;
214
+ continue;
215
}
216
- goto do_default;
217
+ break;
218
219
case INDEX_op_mulu2_i32:
220
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
221
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
222
rh = op->args[1];
223
tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
224
tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
225
- break;
226
+ continue;
227
}
228
- goto do_default;
229
+ break;
230
231
case INDEX_op_brcond2_i32:
232
tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
233
op->args[4]);
234
- if (tmp != 2) {
235
- if (tmp) {
236
- do_brcond_true:
237
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
238
- op->opc = INDEX_op_br;
239
- op->args[0] = op->args[5];
240
- } else {
241
+ if (tmp == 0) {
242
do_brcond_false:
243
- tcg_op_remove(s, op);
244
- }
245
- } else if ((op->args[4] == TCG_COND_LT
246
- || op->args[4] == TCG_COND_GE)
247
- && arg_is_const(op->args[2])
248
- && arg_info(op->args[2])->val == 0
249
- && arg_is_const(op->args[3])
250
- && arg_info(op->args[3])->val == 0) {
251
+ tcg_op_remove(s, op);
252
+ continue;
253
+ }
254
+ if (tmp == 1) {
255
+ do_brcond_true:
256
+ op->opc = opc = INDEX_op_br;
257
+ op->args[0] = op->args[5];
258
+ break;
259
+ }
260
+ if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
261
+ && arg_is_const(op->args[2])
262
+ && arg_info(op->args[2])->val == 0
263
+ && arg_is_const(op->args[3])
264
+ && arg_info(op->args[3])->val == 0) {
265
/* Simplify LT/GE comparisons vs zero to a single compare
266
vs the high word of the input. */
267
do_brcond_high:
268
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
269
- op->opc = INDEX_op_brcond_i32;
270
+ op->opc = opc = INDEX_op_brcond_i32;
271
op->args[0] = op->args[1];
272
op->args[1] = op->args[3];
273
op->args[2] = op->args[4];
274
op->args[3] = op->args[5];
275
- } else if (op->args[4] == TCG_COND_EQ) {
276
+ break;
277
+ }
278
+ if (op->args[4] == TCG_COND_EQ) {
279
/* Simplify EQ comparisons where one of the pairs
280
can be simplified. */
281
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
282
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
283
if (tmp == 0) {
284
goto do_brcond_false;
285
} else if (tmp != 1) {
286
- goto do_default;
287
+ break;
288
}
289
do_brcond_low:
290
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
291
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
292
op->args[1] = op->args[2];
293
op->args[2] = op->args[4];
294
op->args[3] = op->args[5];
295
- } else if (op->args[4] == TCG_COND_NE) {
296
+ break;
297
+ }
298
+ if (op->args[4] == TCG_COND_NE) {
299
/* Simplify NE comparisons where one of the pairs
300
can be simplified. */
301
tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
302
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
303
} else if (tmp == 1) {
304
goto do_brcond_true;
305
}
306
- goto do_default;
307
- } else {
308
- goto do_default;
309
}
310
break;
311
312
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
313
if (tmp != 2) {
314
do_setcond_const:
315
tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
316
- } else if ((op->args[5] == TCG_COND_LT
317
- || op->args[5] == TCG_COND_GE)
318
- && arg_is_const(op->args[3])
319
- && arg_info(op->args[3])->val == 0
320
- && arg_is_const(op->args[4])
321
- && arg_info(op->args[4])->val == 0) {
322
+ continue;
323
+ }
324
+ if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
325
+ && arg_is_const(op->args[3])
326
+ && arg_info(op->args[3])->val == 0
327
+ && arg_is_const(op->args[4])
328
+ && arg_info(op->args[4])->val == 0) {
329
/* Simplify LT/GE comparisons vs zero to a single compare
330
vs the high word of the input. */
331
do_setcond_high:
332
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
333
op->args[1] = op->args[2];
334
op->args[2] = op->args[4];
335
op->args[3] = op->args[5];
336
- } else if (op->args[5] == TCG_COND_EQ) {
337
+ break;
338
+ }
339
+ if (op->args[5] == TCG_COND_EQ) {
340
/* Simplify EQ comparisons where one of the pairs
341
can be simplified. */
342
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
343
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
344
if (tmp == 0) {
345
goto do_setcond_high;
346
} else if (tmp != 1) {
347
- goto do_default;
348
+ break;
349
}
350
do_setcond_low:
351
reset_temp(op->args[0]);
352
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
353
op->opc = INDEX_op_setcond_i32;
354
op->args[2] = op->args[3];
355
op->args[3] = op->args[5];
356
- } else if (op->args[5] == TCG_COND_NE) {
357
+ break;
358
+ }
359
+ if (op->args[5] == TCG_COND_NE) {
360
/* Simplify NE comparisons where one of the pairs
361
can be simplified. */
362
tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
363
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
364
} else if (tmp == 1) {
365
goto do_setcond_const;
366
}
367
- goto do_default;
368
- } else {
369
- goto do_default;
370
}
371
break;
372
373
- case INDEX_op_call:
374
- if (!(tcg_call_flags(op)
375
+ default:
376
+ break;
377
+ }
378
+
379
+ /* Some of the folding above can change opc. */
380
+ opc = op->opc;
381
+ def = &tcg_op_defs[opc];
382
+ if (def->flags & TCG_OPF_BB_END) {
383
+ memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
384
+ } else {
385
+ if (opc == INDEX_op_call &&
386
+ !(tcg_call_flags(op)
387
& (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
388
for (i = 0; i < nb_globals; i++) {
389
if (test_bit(i, ctx.temps_used.l)) {
390
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
391
}
392
}
393
}
394
- goto do_reset_output;
395
396
- default:
397
- do_default:
398
- /* Default case: we know nothing about operation (or were unable
399
- to compute the operation result) so no propagation is done.
400
- We trash everything if the operation is the end of a basic
401
- block, otherwise we only trash the output args. "z_mask" is
402
- the non-zero bits mask for the first output arg. */
403
- if (def->flags & TCG_OPF_BB_END) {
404
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
405
- } else {
406
- do_reset_output:
407
- for (i = 0; i < nb_oargs; i++) {
408
- reset_temp(op->args[i]);
409
- /* Save the corresponding known-zero bits mask for the
410
- first output argument (only one supported so far). */
411
- if (i == 0) {
412
- arg_info(op->args[i])->z_mask = z_mask;
413
- }
414
+ for (i = 0; i < nb_oargs; i++) {
415
+ reset_temp(op->args[i]);
416
+ /* Save the corresponding known-zero bits mask for the
417
+ first output argument (only one supported so far). */
418
+ if (i == 0) {
419
+ arg_info(op->args[i])->z_mask = z_mask;
420
}
421
}
422
- break;
423
}
424
425
/* Eliminate duplicate and redundant fence instructions. */
23
--
426
--
24
2.43.0
427
2.25.1
428
429
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Adjust the interface to take the OptContext parameter instead
2
2
of TCGContext or both.
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 8 +++++---
8
tcg/optimize.c | 67 +++++++++++++++++++++++++-------------------------
7
1 file changed, 5 insertions(+), 3 deletions(-)
9
1 file changed, 34 insertions(+), 33 deletions(-)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
14
16
} TempOptInfo;
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
17
18
typedef struct OptContext {
19
+ TCGContext *tcg;
20
TCGTempSet temps_used;
21
} OptContext;
22
23
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
24
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
25
}
26
27
-static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
28
+static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
16
{
29
{
17
+ uint64_t s_mask;
30
TCGTemp *dst_ts = arg_temp(dst);
18
+
31
TCGTemp *src_ts = arg_temp(src);
19
if (fold_const2_commutative(ctx, op) ||
32
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
20
fold_xi_to_not(ctx, op, -1)) {
33
TCGOpcode new_op;
21
return true;
34
35
if (ts_are_copies(dst_ts, src_ts)) {
36
- tcg_op_remove(s, op);
37
+ tcg_op_remove(ctx->tcg, op);
38
return;
22
}
39
}
23
40
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
41
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src)
25
- & arg_info(op->args[2])->s_mask;
42
}
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
43
}
31
44
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
45
-static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
46
- TCGOp *op, TCGArg dst, uint64_t val)
47
+static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
48
+ TCGArg dst, uint64_t val)
49
{
50
const TCGOpDef *def = &tcg_op_defs[op->opc];
51
TCGType type;
52
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(TCGContext *s, OptContext *ctx,
53
/* Convert movi to mov with constant temp. */
54
tv = tcg_constant_internal(type, val);
55
init_ts_info(ctx, tv);
56
- tcg_opt_gen_mov(s, op, dst, temp_arg(tv));
57
+ tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
58
}
59
60
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
{
63
int nb_temps, nb_globals, i;
64
TCGOp *op, *op_next, *prev_mb = NULL;
65
- OptContext ctx = {};
66
+ OptContext ctx = { .tcg = s };
67
68
/* Array VALS has an element for each temp.
69
If this temp holds a constant then its value is kept in VALS' element.
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
CASE_OP_32_64(rotr):
72
if (arg_is_const(op->args[1])
73
&& arg_info(op->args[1])->val == 0) {
74
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
76
continue;
77
}
78
break;
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
if (!arg_is_const(op->args[1])
81
&& arg_is_const(op->args[2])
82
&& arg_info(op->args[2])->val == 0) {
83
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
84
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
85
continue;
86
}
87
break;
88
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
89
if (!arg_is_const(op->args[1])
90
&& arg_is_const(op->args[2])
91
&& arg_info(op->args[2])->val == -1) {
92
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
93
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
94
continue;
95
}
96
break;
97
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
98
99
if (partmask == 0) {
100
tcg_debug_assert(nb_oargs == 1);
101
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
102
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
103
continue;
104
}
105
if (affected == 0) {
106
tcg_debug_assert(nb_oargs == 1);
107
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
108
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
109
continue;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
CASE_OP_32_64(mulsh):
114
if (arg_is_const(op->args[2])
115
&& arg_info(op->args[2])->val == 0) {
116
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
117
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
118
continue;
119
}
120
break;
121
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
122
CASE_OP_32_64_VEC(or):
123
CASE_OP_32_64_VEC(and):
124
if (args_are_copies(op->args[1], op->args[2])) {
125
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
126
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
127
continue;
128
}
129
break;
130
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
131
CASE_OP_32_64_VEC(sub):
132
CASE_OP_32_64_VEC(xor):
133
if (args_are_copies(op->args[1], op->args[2])) {
134
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], 0);
135
+ tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
136
continue;
137
}
138
break;
139
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
140
allocator where needed and possible. Also detect copies. */
141
switch (opc) {
142
CASE_OP_32_64_VEC(mov):
143
- tcg_opt_gen_mov(s, op, op->args[0], op->args[1]);
144
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
145
continue;
146
147
case INDEX_op_dup_vec:
148
if (arg_is_const(op->args[1])) {
149
tmp = arg_info(op->args[1])->val;
150
tmp = dup_const(TCGOP_VECE(op), tmp);
151
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
152
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
153
continue;
154
}
155
break;
156
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
157
case INDEX_op_dup2_vec:
158
assert(TCG_TARGET_REG_BITS == 32);
159
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
160
- tcg_opt_gen_movi(s, &ctx, op, op->args[0],
161
+ tcg_opt_gen_movi(&ctx, op, op->args[0],
162
deposit64(arg_info(op->args[1])->val, 32, 32,
163
arg_info(op->args[2])->val));
164
continue;
165
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
166
case INDEX_op_extrh_i64_i32:
167
if (arg_is_const(op->args[1])) {
168
tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
169
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
170
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
171
continue;
172
}
173
break;
174
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
175
if (arg_is_const(op->args[1])) {
176
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
177
op->args[2]);
178
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
179
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
180
continue;
181
}
182
break;
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
185
tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
186
arg_info(op->args[2])->val);
187
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
188
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
189
continue;
190
}
191
break;
192
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
193
TCGArg v = arg_info(op->args[1])->val;
194
if (v != 0) {
195
tmp = do_constant_folding(opc, v, 0);
196
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
197
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
198
} else {
199
- tcg_opt_gen_mov(s, op, op->args[0], op->args[2]);
200
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
201
}
202
continue;
203
}
204
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
205
tmp = deposit64(arg_info(op->args[1])->val,
206
op->args[3], op->args[4],
207
arg_info(op->args[2])->val);
208
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
209
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
210
continue;
211
}
212
break;
213
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
214
if (arg_is_const(op->args[1])) {
215
tmp = extract64(arg_info(op->args[1])->val,
216
op->args[2], op->args[3]);
217
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
218
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
219
continue;
220
}
221
break;
222
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
223
if (arg_is_const(op->args[1])) {
224
tmp = sextract64(arg_info(op->args[1])->val,
225
op->args[2], op->args[3]);
226
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
227
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
228
continue;
229
}
230
break;
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
tmp = (int32_t)(((uint32_t)v1 >> shr) |
233
((uint32_t)v2 << (32 - shr)));
234
}
235
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
236
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
237
continue;
238
}
239
break;
240
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
241
tmp = do_constant_folding_cond(opc, op->args[1],
242
op->args[2], op->args[3]);
243
if (tmp != 2) {
244
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
245
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
246
continue;
247
}
248
break;
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
tmp = do_constant_folding_cond(opc, op->args[1],
251
op->args[2], op->args[5]);
252
if (tmp != 2) {
253
- tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]);
254
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
255
continue;
256
}
257
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
258
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
259
260
rl = op->args[0];
261
rh = op->args[1];
262
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)a);
263
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(a >> 32));
264
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
265
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
266
continue;
267
}
268
break;
269
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
270
271
rl = op->args[0];
272
rh = op->args[1];
273
- tcg_opt_gen_movi(s, &ctx, op, rl, (int32_t)r);
274
- tcg_opt_gen_movi(s, &ctx, op2, rh, (int32_t)(r >> 32));
275
+ tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
276
+ tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
277
continue;
278
}
279
break;
280
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
281
op->args[5]);
282
if (tmp != 2) {
283
do_setcond_const:
284
- tcg_opt_gen_movi(s, &ctx, op, op->args[0], tmp);
285
+ tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
286
continue;
287
}
288
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
33
--
289
--
34
2.43.0
290
2.25.1
291
292
diff view generated by jsdifflib
1
All non-default cases now finish folding within each function.
1
This will expose the variable to subroutines that
2
Do the same with the default case and assert it is done after.
2
will be broken out of tcg_optimize.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/optimize.c | 6 ++----
9
tcg/optimize.c | 11 ++++++-----
8
1 file changed, 2 insertions(+), 4 deletions(-)
10
1 file changed, 6 insertions(+), 5 deletions(-)
9
11
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
18
typedef struct OptContext {
19
TCGContext *tcg;
20
+ TCGOp *prev_mb;
21
TCGTempSet temps_used;
22
} OptContext;
23
24
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
25
void tcg_optimize(TCGContext *s)
26
{
27
int nb_temps, nb_globals, i;
28
- TCGOp *op, *op_next, *prev_mb = NULL;
29
+ TCGOp *op, *op_next;
30
OptContext ctx = { .tcg = s };
31
32
/* Array VALS has an element for each temp.
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
34
}
21
-
35
22
- if (!done) {
36
/* Eliminate duplicate and redundant fence instructions. */
23
- finish_folding(&ctx, op);
37
- if (prev_mb) {
24
- }
38
+ if (ctx.prev_mb) {
25
+ tcg_debug_assert(done);
39
switch (opc) {
40
case INDEX_op_mb:
41
/* Merge two barriers of the same type into one,
42
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
43
* barrier. This is stricter than specified but for
44
* the purposes of TCG is better than not optimizing.
45
*/
46
- prev_mb->args[0] |= op->args[0];
47
+ ctx.prev_mb->args[0] |= op->args[0];
48
tcg_op_remove(s, op);
49
break;
50
51
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
case INDEX_op_qemu_st_i64:
53
case INDEX_op_call:
54
/* Opcodes that touch guest memory stop the optimization. */
55
- prev_mb = NULL;
56
+ ctx.prev_mb = NULL;
57
break;
58
}
59
} else if (opc == INDEX_op_mb) {
60
- prev_mb = op;
61
+ ctx.prev_mb = op;
62
}
26
}
63
}
27
}
64
}
28
--
65
--
29
2.43.0
66
2.25.1
67
68
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
There was no real reason for calls to have separate code here.
2
Unify init for calls vs non-calls using the call path, which
3
handles TCG_CALL_DUMMY_ARG.
2
4
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
9
---
6
tcg/optimize.c | 24 +++++++++---------------
10
tcg/optimize.c | 25 +++++++++++--------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
11
1 file changed, 11 insertions(+), 14 deletions(-)
8
12
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
15
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
16
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
17
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
18
}
19
}
20
21
-static void init_arg_info(OptContext *ctx, TCGArg arg)
22
-{
23
- init_ts_info(ctx, arg_temp(arg));
24
-}
25
-
26
static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
15
{
27
{
16
uint64_t z_mask, s_mask, s_mask_old;
28
TCGTemp *i, *g, *l;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
29
@@ -XXX,XX +XXX,XX @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
18
int pos = op->args[2];
30
return false;
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
31
}
52
32
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
33
+static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
34
+{
35
+ for (int i = 0; i < nb_args; i++) {
36
+ TCGTemp *ts = arg_temp(op->args[i]);
37
+ if (ts) {
38
+ init_ts_info(ctx, ts);
39
+ }
40
+ }
41
+}
42
+
43
/* Propagate constants and copies, fold constant expressions. */
44
void tcg_optimize(TCGContext *s)
45
{
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
if (opc == INDEX_op_call) {
48
nb_oargs = TCGOP_CALLO(op);
49
nb_iargs = TCGOP_CALLI(op);
50
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
51
- TCGTemp *ts = arg_temp(op->args[i]);
52
- if (ts) {
53
- init_ts_info(&ctx, ts);
54
- }
55
- }
56
} else {
57
nb_oargs = def->nb_oargs;
58
nb_iargs = def->nb_iargs;
59
- for (i = 0; i < nb_oargs + nb_iargs; i++) {
60
- init_arg_info(&ctx, op->args[i]);
61
- }
62
}
63
+ init_arguments(&ctx, op, nb_oargs + nb_iargs);
64
65
/* Do copy propagation */
66
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
54
--
67
--
55
2.43.0
68
2.25.1
69
70
diff view generated by jsdifflib
1
Change return from bool to int; distinguish between
1
Continue splitting tcg_optimize.
2
complete folding, simplification, and no change.
3
2
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
15
return finish_folding(ctx, op);
16
}
16
}
17
}
17
18
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+static void copy_propagate(OptContext *ctx, TCGOp *op,
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+ int nb_oargs, int nb_iargs)
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
+{
22
+ TCGContext *s = ctx->tcg;
23
+
24
+ for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25
+ TCGTemp *ts = arg_temp(op->args[i]);
26
+ if (ts && ts_is_copy(ts)) {
27
+ op->args[i] = temp_arg(find_better_copy(s, ts));
28
+ }
29
+ }
30
+}
31
+
32
/* Propagate constants and copies, fold constant expressions. */
33
void tcg_optimize(TCGContext *s)
21
{
34
{
22
uint64_t a_zmask, b_val;
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
23
TCGCond cond;
36
nb_iargs = def->nb_iargs;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
37
}
31
}
38
init_arguments(&ctx, op, nb_oargs + nb_iargs);
32
-
39
-
33
- return false;
40
- /* Do copy propagation */
34
+ return 0;
41
- for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
35
}
42
- TCGTemp *ts = arg_temp(op->args[i]);
36
43
- if (ts && ts_is_copy(ts)) {
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
44
- op->args[i] = temp_arg(find_better_copy(s, ts));
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
45
- }
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
46
- }
40
}
47
+ copy_propagate(&ctx, op, nb_oargs, nb_iargs);
41
48
42
- if (fold_setcond_zmask(ctx, op, false)) {
49
/* For commutative operations make constant second argument */
43
+ i = fold_setcond_zmask(ctx, op, false);
50
switch (opc) {
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
51
--
71
2.43.0
52
2.25.1
53
54
diff view generated by jsdifflib
1
Call them directly from the opcode switch statement in tcg_optimize,
1
Calls are special in that they have a variable number
2
rather than in finish_folding based on opcode flags. Adjust folding
2
of arguments, and need to be able to clobber globals.
3
of conditional branches to match.
4
3
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
8
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
9
1 file changed, 31 insertions(+), 16 deletions(-)
9
1 file changed, 41 insertions(+), 22 deletions(-)
10
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
16
}
17
}
17
}
18
18
19
+static void finish_bb(OptContext *ctx)
19
+static bool fold_call(OptContext *ctx, TCGOp *op)
20
+{
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
21
+ TCGContext *s = ctx->tcg;
22
+ int nb_oargs = TCGOP_CALLO(op);
23
+ int nb_iargs = TCGOP_CALLI(op);
24
+ int flags, i;
25
+
26
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
27
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
28
+
29
+ /* If the function reads or writes globals, reset temp data. */
30
+ flags = tcg_call_flags(op);
31
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
32
+ int nb_globals = s->nb_globals;
33
+
34
+ for (i = 0; i < nb_globals; i++) {
35
+ if (test_bit(i, ctx->temps_used.l)) {
36
+ reset_ts(&ctx->tcg->temps[i]);
37
+ }
38
+ }
39
+ }
40
+
41
+ /* Reset temp data for outputs. */
42
+ for (i = 0; i < nb_oargs; i++) {
43
+ reset_temp(op->args[i]);
44
+ }
45
+
46
+ /* Stop optimizing MB across calls. */
22
+ ctx->prev_mb = NULL;
47
+ ctx->prev_mb = NULL;
48
+ return true;
23
+}
49
+}
24
+
50
+
25
+static void finish_ebb(OptContext *ctx)
51
/* Propagate constants and copies, fold constant expressions. */
26
+{
52
void tcg_optimize(TCGContext *s)
27
+ finish_bb(ctx);
53
{
28
+ /* We only optimize across extended basic blocks. */
54
- int nb_temps, nb_globals, i;
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
55
+ int nb_temps, i;
30
+ remove_mem_copy_all(ctx);
56
TCGOp *op, *op_next;
31
+}
57
OptContext ctx = { .tcg = s };
58
59
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
60
available through the doubly linked circular list. */
61
62
nb_temps = s->nb_temps;
63
- nb_globals = s->nb_globals;
64
-
65
for (i = 0; i < nb_temps; ++i) {
66
s->temps[i].state_ptr = NULL;
67
}
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
uint64_t z_mask, partmask, affected, tmp;
70
int nb_oargs, nb_iargs;
71
TCGOpcode opc = op->opc;
72
- const TCGOpDef *def = &tcg_op_defs[opc];
73
+ const TCGOpDef *def;
74
75
- /* Count the arguments, and initialize the temps that are
76
- going to be used */
77
+ /* Calls are special. */
78
if (opc == INDEX_op_call) {
79
- nb_oargs = TCGOP_CALLO(op);
80
- nb_iargs = TCGOP_CALLI(op);
81
- } else {
82
- nb_oargs = def->nb_oargs;
83
- nb_iargs = def->nb_iargs;
84
+ fold_call(&ctx, op);
85
+ continue;
86
}
32
+
87
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
88
+ def = &tcg_op_defs[opc];
34
{
89
+ nb_oargs = def->nb_oargs;
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
90
+ nb_iargs = def->nb_iargs;
36
int i, nb_oargs;
91
init_arguments(&ctx, op, nb_oargs + nb_iargs);
37
92
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
38
- /*
93
39
- * We only optimize extended basic blocks. If the opcode ends a BB
94
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
- * and is not a conditional branch, reset all temp data.
95
if (def->flags & TCG_OPF_BB_END) {
41
- */
96
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
42
- if (def->flags & TCG_OPF_BB_END) {
97
} else {
43
- ctx->prev_mb = NULL;
98
- if (opc == INDEX_op_call &&
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
99
- !(tcg_call_flags(op)
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
100
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
46
- remove_mem_copy_all(ctx);
101
- for (i = 0; i < nb_globals; i++) {
47
- }
102
- if (test_bit(i, ctx.temps_used.l)) {
48
- return;
103
- reset_ts(&s->temps[i]);
49
- }
104
- }
105
- }
106
- }
50
-
107
-
51
nb_oargs = def->nb_oargs;
108
for (i = 0; i < nb_oargs; i++) {
52
for (i = 0; i < nb_oargs; i++) {
109
reset_temp(op->args[i]);
53
TCGTemp *ts = arg_temp(op->args[i]);
110
/* Save the corresponding known-zero bits mask for the
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
112
case INDEX_op_qemu_st_i32:
84
done = fold_xor(&ctx, op);
113
case INDEX_op_qemu_st8_i32:
85
break;
114
case INDEX_op_qemu_st_i64:
86
+ case INDEX_op_set_label:
115
- case INDEX_op_call:
87
+ case INDEX_op_br:
116
/* Opcodes that touch guest memory stop the optimization. */
88
+ case INDEX_op_exit_tb:
117
ctx.prev_mb = NULL;
89
+ case INDEX_op_goto_tb:
118
break;
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
97
--
119
--
98
2.43.0
120
2.25.1
121
122
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Rather than try to keep these up-to-date across folding,
2
re-read nb_oargs at the end, after re-reading the opcode.
3
4
A couple of asserts need dropping, but that will take care
5
of itself as we split the function further.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
10
---
4
tcg/optimize.c | 2 +-
11
tcg/optimize.c | 14 ++++----------
5
1 file changed, 1 insertion(+), 1 deletion(-)
12
1 file changed, 4 insertions(+), 10 deletions(-)
6
13
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
16
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
17
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
18
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
12
TCGType type;
19
13
20
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
21
uint64_t z_mask, partmask, affected, tmp;
15
- return false;
22
- int nb_oargs, nb_iargs;
16
+ return finish_folding(ctx, op);
23
TCGOpcode opc = op->opc;
17
}
24
const TCGOpDef *def;
18
25
19
type = ctx->type;
26
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
27
}
28
29
def = &tcg_op_defs[opc];
30
- nb_oargs = def->nb_oargs;
31
- nb_iargs = def->nb_iargs;
32
- init_arguments(&ctx, op, nb_oargs + nb_iargs);
33
- copy_propagate(&ctx, op, nb_oargs, nb_iargs);
34
+ init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
35
+ copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
36
37
/* For commutative operations make constant second argument */
38
switch (opc) {
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
41
CASE_OP_32_64(qemu_ld):
42
{
43
- MemOpIdx oi = op->args[nb_oargs + nb_iargs];
44
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
45
MemOp mop = get_memop(oi);
46
if (!(mop & MO_SIGN)) {
47
z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
}
50
51
if (partmask == 0) {
52
- tcg_debug_assert(nb_oargs == 1);
53
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
continue;
55
}
56
if (affected == 0) {
57
- tcg_debug_assert(nb_oargs == 1);
58
tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
59
continue;
60
}
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
62
} else if (args_are_copies(op->args[1], op->args[2])) {
63
op->opc = INDEX_op_dup_vec;
64
TCGOP_VECE(op) = MO_32;
65
- nb_iargs = 1;
66
}
67
break;
68
69
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
70
op->opc = opc = (opc == INDEX_op_movcond_i32
71
? INDEX_op_setcond_i32
72
: INDEX_op_setcond_i64);
73
- nb_iargs = 2;
74
}
75
break;
76
77
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
78
if (def->flags & TCG_OPF_BB_END) {
79
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
80
} else {
81
+ int nb_oargs = def->nb_oargs;
82
for (i = 0; i < nb_oargs; i++) {
83
reset_temp(op->args[i]);
84
/* Save the corresponding known-zero bits mask for the
20
--
85
--
21
2.43.0
86
2.25.1
87
88
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Return -1 instead of 2 for failure, so that we can
2
use comparisons against 0 for all cases.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
tcg/optimize.c | 2 +-
8
tcg/optimize.c | 145 +++++++++++++++++++++++++------------------------
5
1 file changed, 1 insertion(+), 1 deletion(-)
9
1 file changed, 74 insertions(+), 71 deletions(-)
6
10
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
12
return fold_orc(ctx, op);
16
}
17
}
18
19
-/* Return 2 if the condition can't be simplified, and the result
20
- of the condition (0 or 1) if it can */
21
-static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
22
- TCGArg y, TCGCond c)
23
+/*
24
+ * Return -1 if the condition can't be simplified,
25
+ * and the result of the condition (0 or 1) if it can.
26
+ */
27
+static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
28
+ TCGArg y, TCGCond c)
29
{
30
uint64_t xv = arg_info(x)->val;
31
uint64_t yv = arg_info(y)->val;
32
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
33
case TCG_COND_GEU:
34
return 1;
35
default:
36
- return 2;
37
+ return -1;
13
}
38
}
14
}
39
}
15
- return false;
40
- return 2;
16
+ return finish_folding(ctx, op);
41
+ return -1;
17
}
42
}
18
43
19
/* Propagate constants and copies, fold constant expressions. */
44
-/* Return 2 if the condition can't be simplified, and the result
45
- of the condition (0 or 1) if it can */
46
-static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
47
+/*
48
+ * Return -1 if the condition can't be simplified,
49
+ * and the result of the condition (0 or 1) if it can.
50
+ */
51
+static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
52
{
53
TCGArg al = p1[0], ah = p1[1];
54
TCGArg bl = p2[0], bh = p2[1];
55
@@ -XXX,XX +XXX,XX @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
56
if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
57
return do_constant_folding_cond_eq(c);
58
}
59
- return 2;
60
+ return -1;
61
}
62
63
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
67
CASE_OP_32_64(setcond):
68
- tmp = do_constant_folding_cond(opc, op->args[1],
69
- op->args[2], op->args[3]);
70
- if (tmp != 2) {
71
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
72
+ i = do_constant_folding_cond(opc, op->args[1],
73
+ op->args[2], op->args[3]);
74
+ if (i >= 0) {
75
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
76
continue;
77
}
78
break;
79
80
CASE_OP_32_64(brcond):
81
- tmp = do_constant_folding_cond(opc, op->args[0],
82
- op->args[1], op->args[2]);
83
- switch (tmp) {
84
- case 0:
85
+ i = do_constant_folding_cond(opc, op->args[0],
86
+ op->args[1], op->args[2]);
87
+ if (i == 0) {
88
tcg_op_remove(s, op);
89
continue;
90
- case 1:
91
+ } else if (i > 0) {
92
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
93
op->opc = opc = INDEX_op_br;
94
op->args[0] = op->args[3];
95
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
96
break;
97
98
CASE_OP_32_64(movcond):
99
- tmp = do_constant_folding_cond(opc, op->args[1],
100
- op->args[2], op->args[5]);
101
- if (tmp != 2) {
102
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4-tmp]);
103
+ i = do_constant_folding_cond(opc, op->args[1],
104
+ op->args[2], op->args[5]);
105
+ if (i >= 0) {
106
+ tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
107
continue;
108
}
109
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
break;
112
113
case INDEX_op_brcond2_i32:
114
- tmp = do_constant_folding_cond2(&op->args[0], &op->args[2],
115
- op->args[4]);
116
- if (tmp == 0) {
117
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2],
118
+ op->args[4]);
119
+ if (i == 0) {
120
do_brcond_false:
121
tcg_op_remove(s, op);
122
continue;
123
}
124
- if (tmp == 1) {
125
+ if (i > 0) {
126
do_brcond_true:
127
op->opc = opc = INDEX_op_br;
128
op->args[0] = op->args[5];
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
if (op->args[4] == TCG_COND_EQ) {
131
/* Simplify EQ comparisons where one of the pairs
132
can be simplified. */
133
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
134
- op->args[0], op->args[2],
135
- TCG_COND_EQ);
136
- if (tmp == 0) {
137
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
138
+ op->args[0], op->args[2],
139
+ TCG_COND_EQ);
140
+ if (i == 0) {
141
goto do_brcond_false;
142
- } else if (tmp == 1) {
143
+ } else if (i > 0) {
144
goto do_brcond_high;
145
}
146
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
147
- op->args[1], op->args[3],
148
- TCG_COND_EQ);
149
- if (tmp == 0) {
150
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
151
+ op->args[1], op->args[3],
152
+ TCG_COND_EQ);
153
+ if (i == 0) {
154
goto do_brcond_false;
155
- } else if (tmp != 1) {
156
+ } else if (i < 0) {
157
break;
158
}
159
do_brcond_low:
160
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
161
if (op->args[4] == TCG_COND_NE) {
162
/* Simplify NE comparisons where one of the pairs
163
can be simplified. */
164
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
165
- op->args[0], op->args[2],
166
- TCG_COND_NE);
167
- if (tmp == 0) {
168
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
169
+ op->args[0], op->args[2],
170
+ TCG_COND_NE);
171
+ if (i == 0) {
172
goto do_brcond_high;
173
- } else if (tmp == 1) {
174
+ } else if (i > 0) {
175
goto do_brcond_true;
176
}
177
- tmp = do_constant_folding_cond(INDEX_op_brcond_i32,
178
- op->args[1], op->args[3],
179
- TCG_COND_NE);
180
- if (tmp == 0) {
181
+ i = do_constant_folding_cond(INDEX_op_brcond_i32,
182
+ op->args[1], op->args[3],
183
+ TCG_COND_NE);
184
+ if (i == 0) {
185
goto do_brcond_low;
186
- } else if (tmp == 1) {
187
+ } else if (i > 0) {
188
goto do_brcond_true;
189
}
190
}
191
break;
192
193
case INDEX_op_setcond2_i32:
194
- tmp = do_constant_folding_cond2(&op->args[1], &op->args[3],
195
- op->args[5]);
196
- if (tmp != 2) {
197
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3],
198
+ op->args[5]);
199
+ if (i >= 0) {
200
do_setcond_const:
201
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
202
+ tcg_opt_gen_movi(&ctx, op, op->args[0], i);
203
continue;
204
}
205
if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
206
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
207
if (op->args[5] == TCG_COND_EQ) {
208
/* Simplify EQ comparisons where one of the pairs
209
can be simplified. */
210
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
211
- op->args[1], op->args[3],
212
- TCG_COND_EQ);
213
- if (tmp == 0) {
214
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
215
+ op->args[1], op->args[3],
216
+ TCG_COND_EQ);
217
+ if (i == 0) {
218
goto do_setcond_const;
219
- } else if (tmp == 1) {
220
+ } else if (i > 0) {
221
goto do_setcond_high;
222
}
223
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
224
- op->args[2], op->args[4],
225
- TCG_COND_EQ);
226
- if (tmp == 0) {
227
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
228
+ op->args[2], op->args[4],
229
+ TCG_COND_EQ);
230
+ if (i == 0) {
231
goto do_setcond_high;
232
- } else if (tmp != 1) {
233
+ } else if (i < 0) {
234
break;
235
}
236
do_setcond_low:
237
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
238
if (op->args[5] == TCG_COND_NE) {
239
/* Simplify NE comparisons where one of the pairs
240
can be simplified. */
241
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
242
- op->args[1], op->args[3],
243
- TCG_COND_NE);
244
- if (tmp == 0) {
245
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
246
+ op->args[1], op->args[3],
247
+ TCG_COND_NE);
248
+ if (i == 0) {
249
goto do_setcond_high;
250
- } else if (tmp == 1) {
251
+ } else if (i > 0) {
252
goto do_setcond_const;
253
}
254
- tmp = do_constant_folding_cond(INDEX_op_setcond_i32,
255
- op->args[2], op->args[4],
256
- TCG_COND_NE);
257
- if (tmp == 0) {
258
+ i = do_constant_folding_cond(INDEX_op_setcond_i32,
259
+ op->args[2], op->args[4],
260
+ TCG_COND_NE);
261
+ if (i == 0) {
262
goto do_setcond_low;
263
- } else if (tmp == 1) {
264
+ } else if (i > 0) {
265
goto do_setcond_const;
266
}
267
}
20
--
268
--
21
2.43.0
269
2.25.1
270
271
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
This will allow callers to tail call to these functions
2
and return true indicating processing complete.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
tcg/optimize.c | 9 +++++----
9
tcg/optimize.c | 9 +++++----
5
1 file changed, 5 insertions(+), 4 deletions(-)
10
1 file changed, 5 insertions(+), 4 deletions(-)
6
11
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
16
@@ -XXX,XX +XXX,XX @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2)
12
remove_mem_copy_all(ctx);
17
return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
13
}
18
}
14
19
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
20
-static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
21
+static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
17
{
22
{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
23
TCGTemp *dst_ts = arg_temp(dst);
19
int i, nb_oargs;
24
TCGTemp *src_ts = arg_temp(src);
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
25
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
21
ts_info(ts)->z_mask = ctx->z_mask;
26
22
}
27
if (ts_are_copies(dst_ts, src_ts)) {
28
tcg_op_remove(ctx->tcg, op);
29
- return;
30
+ return true;
31
}
32
33
reset_ts(dst_ts);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
35
di->is_const = si->is_const;
36
di->val = si->val;
23
}
37
}
24
+ return true;
38
+ return true;
25
}
39
}
26
40
27
/*
41
-static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
42
+static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
29
fold_xi_to_x(ctx, op, 0)) {
43
TCGArg dst, uint64_t val)
30
return true;
44
{
31
}
45
const TCGOpDef *def = &tcg_op_defs[op->opc];
32
- return false;
46
@@ -XXX,XX +XXX,XX @@ static void tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
33
+ return finish_folding(ctx, op);
47
/* Convert movi to mov with constant temp. */
48
tv = tcg_constant_internal(type, val);
49
init_ts_info(ctx, tv);
50
- tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
51
+ return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
34
}
52
}
35
53
36
/* We cannot as yet do_constant_folding with vectors. */
54
static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
38
fold_xi_to_x(ctx, op, 0)) {
39
return true;
40
}
41
- return false;
42
+ return finish_folding(ctx, op);
43
}
44
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
47
op->args[4] = arg_new_constant(ctx, bl);
48
op->args[5] = arg_new_constant(ctx, bh);
49
}
50
- return false;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
55
--
55
--
56
2.43.0
56
2.25.1
57
58
diff view generated by jsdifflib
1
The big comment just above says functions should be sorted.
1
Copy z_mask into OptContext, for writeback to the
2
Add forward declarations as needed.
2
first output within the new function.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
8
tcg/optimize.c | 49 +++++++++++++++++++++++++++++++++----------------
8
1 file changed, 59 insertions(+), 55 deletions(-)
9
1 file changed, 33 insertions(+), 16 deletions(-)
9
10
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
15
* 3) those that produce information about the result value.
16
TCGContext *tcg;
16
*/
17
TCGOp *prev_mb;
17
18
TCGTempSet temps_used;
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
+
19
+
22
static bool fold_add(OptContext *ctx, TCGOp *op)
20
+ /* In flight values from optimization. */
23
{
21
+ uint64_t z_mask;
24
if (fold_const2_commutative(ctx, op) ||
22
} OptContext;
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
23
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
24
static inline TempOptInfo *ts_info(TCGTemp *ts)
25
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
26
}
27
}
27
}
28
28
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
29
+static void finish_folding(OptContext *ctx, TCGOp *op)
30
+{
30
+{
31
+ /* If true and false values are the same, eliminate the cmp. */
31
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
32
+ if (args_are_copies(op->args[2], op->args[3])) {
32
+ int i, nb_oargs;
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
33
+
34
+ /*
35
+ * For an opcode that ends a BB, reset all temp data.
36
+ * We do no cross-BB optimization.
37
+ */
38
+ if (def->flags & TCG_OPF_BB_END) {
39
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
40
+ ctx->prev_mb = NULL;
41
+ return;
34
+ }
42
+ }
35
+
43
+
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
44
+ nb_oargs = def->nb_oargs;
37
+ uint64_t tv = arg_info(op->args[2])->val;
45
+ for (i = 0; i < nb_oargs; i++) {
38
+ uint64_t fv = arg_info(op->args[3])->val;
46
+ reset_temp(op->args[i]);
39
+
47
+ /*
40
+ if (tv == -1 && fv == 0) {
48
+ * Save the corresponding known-zero bits mask for the
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
49
+ * first output argument (only one supported so far).
42
+ }
50
+ */
43
+ if (tv == 0 && fv == -1) {
51
+ if (i == 0) {
44
+ if (TCG_TARGET_HAS_not_vec) {
52
+ arg_info(op->args[i])->z_mask = ctx->z_mask;
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
53
+ }
54
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
55
+}
83
+
56
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
57
static bool fold_call(OptContext *ctx, TCGOp *op)
85
{
58
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
59
TCGContext *s = ctx->tcg;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
61
partmask &= 0xffffffffu;
89
}
62
affected &= 0xffffffffu;
90
63
}
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
64
+ ctx.z_mask = z_mask;
92
-{
65
93
- /* If true and false values are the same, eliminate the cmp. */
66
if (partmask == 0) {
94
- if (args_are_copies(op->args[2], op->args[3])) {
67
tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
96
- }
69
break;
97
-
70
}
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
71
99
- uint64_t tv = arg_info(op->args[2])->val;
72
- /* Some of the folding above can change opc. */
100
- uint64_t fv = arg_info(op->args[3])->val;
73
- opc = op->opc;
101
-
74
- def = &tcg_op_defs[opc];
102
- if (tv == -1 && fv == 0) {
75
- if (def->flags & TCG_OPF_BB_END) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
76
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
104
- }
77
- } else {
105
- if (tv == 0 && fv == -1) {
78
- int nb_oargs = def->nb_oargs;
106
- if (TCG_TARGET_HAS_not_vec) {
79
- for (i = 0; i < nb_oargs; i++) {
107
- op->opc = INDEX_op_not_vec;
80
- reset_temp(op->args[i]);
108
- return fold_not(ctx, op);
81
- /* Save the corresponding known-zero bits mask for the
109
- } else {
82
- first output argument (only one supported so far). */
110
- op->opc = INDEX_op_xor_vec;
83
- if (i == 0) {
111
- op->args[2] = arg_new_constant(ctx, -1);
84
- arg_info(op->args[i])->z_mask = z_mask;
112
- return fold_xor(ctx, op);
85
- }
113
- }
86
- }
114
- }
87
- }
115
- }
88
+ finish_folding(&ctx, op);
116
- if (arg_is_const(op->args[2])) {
89
117
- uint64_t tv = arg_info(op->args[2])->val;
90
/* Eliminate duplicate and redundant fence instructions. */
118
- if (tv == -1) {
91
if (ctx.prev_mb) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
149
--
92
--
150
2.43.0
93
2.25.1
94
95
diff view generated by jsdifflib
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
now that fold_sub_vec always returns true.
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/optimize.c | 9 ++++++---
6
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
7
1 file changed, 6 insertions(+), 3 deletions(-)
9
8
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
11
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
13
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
fold_sub_to_neg(ctx, op)) {
14
uint64_t z_mask, partmask, affected, tmp;
16
return true;
15
TCGOpcode opc = op->opc;
17
}
16
const TCGOpDef *def;
18
- return false;
17
+ bool done = false;
19
+ return finish_folding(ctx, op);
18
20
}
19
/* Calls are special. */
21
20
if (opc == INDEX_op_call) {
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
21
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
23
{
22
allocator where needed and possible. Also detect copies. */
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
23
switch (opc) {
25
+ if (fold_const2(ctx, op) ||
24
CASE_OP_32_64_VEC(mov):
26
+ fold_xx_to_i(ctx, op, 0) ||
25
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
27
+ fold_xi_to_x(ctx, op, 0) ||
26
- continue;
28
+ fold_sub_to_neg(ctx, op)) {
27
+ done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
29
return true;
28
+ break;
30
}
29
31
30
case INDEX_op_dup_vec:
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
31
if (arg_is_const(op->args[1])) {
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
32
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
34
op->args[2] = arg_new_constant(ctx, -val);
33
break;
35
}
34
}
36
- return false;
35
37
+ return finish_folding(ctx, op);
36
- finish_folding(&ctx, op);
38
}
37
+ if (!done) {
39
38
+ finish_folding(&ctx, op);
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
39
+ }
40
41
/* Eliminate duplicate and redundant fence instructions. */
42
if (ctx.prev_mb) {
41
--
43
--
42
2.43.0
44
2.25.1
45
46
diff view generated by jsdifflib
1
The big comment just above says functions should be sorted.
1
This puts the separate mb optimization into the same framework
2
as the others. While fold_qemu_{ld,st} are currently identical,
3
that won't last as more code gets moved.
2
4
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
9
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
7
1 file changed, 30 insertions(+), 30 deletions(-)
10
1 file changed, 51 insertions(+), 38 deletions(-)
8
11
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
14
return true;
17
return true;
15
}
18
}
16
19
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
20
+static bool fold_mb(OptContext *ctx, TCGOp *op)
18
+{
21
+{
19
+ /* Canonicalize the comparison to put immediate second. */
22
+ /* Eliminate duplicate and redundant fence instructions. */
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
23
+ if (ctx->prev_mb) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
24
+ /*
25
+ * Merge two barriers of the same type into one,
26
+ * or a weaker barrier into a stronger one,
27
+ * or two weaker barriers into a stronger one.
28
+ * mb X; mb Y => mb X|Y
29
+ * mb; strl => mb; st
30
+ * ldaq; mb => ld; mb
31
+ * ldaq; strl => ld; mb; st
32
+ * Other combinations are also merged into a strong
33
+ * barrier. This is stricter than specified but for
34
+ * the purposes of TCG is better than not optimizing.
35
+ */
36
+ ctx->prev_mb->args[0] |= op->args[0];
37
+ tcg_op_remove(ctx->tcg, op);
38
+ } else {
39
+ ctx->prev_mb = op;
22
+ }
40
+ }
23
+ return finish_folding(ctx, op);
41
+ return true;
24
+}
42
+}
25
+
43
+
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
44
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
27
+{
45
+{
28
+ /* If true and false values are the same, eliminate the cmp. */
46
+ /* Opcodes that touch guest memory stop the mb optimization. */
29
+ if (args_are_copies(op->args[3], op->args[4])) {
47
+ ctx->prev_mb = NULL;
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
48
+ return false;
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
45
+}
49
+}
46
+
50
+
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
51
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
52
+{
53
+ /* Opcodes that touch guest memory stop the mb optimization. */
54
+ ctx->prev_mb = NULL;
55
+ return false;
56
+}
57
+
58
/* Propagate constants and copies, fold constant expressions. */
59
void tcg_optimize(TCGContext *s)
48
{
60
{
49
uint64_t z_mask, s_mask;
61
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
62
}
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
63
break;
64
65
+ case INDEX_op_mb:
66
+ done = fold_mb(&ctx, op);
67
+ break;
68
+ case INDEX_op_qemu_ld_i32:
69
+ case INDEX_op_qemu_ld_i64:
70
+ done = fold_qemu_ld(&ctx, op);
71
+ break;
72
+ case INDEX_op_qemu_st_i32:
73
+ case INDEX_op_qemu_st8_i32:
74
+ case INDEX_op_qemu_st_i64:
75
+ done = fold_qemu_st(&ctx, op);
76
+ break;
77
+
78
default:
79
break;
80
}
81
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
82
if (!done) {
83
finish_folding(&ctx, op);
84
}
85
-
86
- /* Eliminate duplicate and redundant fence instructions. */
87
- if (ctx.prev_mb) {
88
- switch (opc) {
89
- case INDEX_op_mb:
90
- /* Merge two barriers of the same type into one,
91
- * or a weaker barrier into a stronger one,
92
- * or two weaker barriers into a stronger one.
93
- * mb X; mb Y => mb X|Y
94
- * mb; strl => mb; st
95
- * ldaq; mb => ld; mb
96
- * ldaq; strl => ld; mb; st
97
- * Other combinations are also merged into a strong
98
- * barrier. This is stricter than specified but for
99
- * the purposes of TCG is better than not optimizing.
100
- */
101
- ctx.prev_mb->args[0] |= op->args[0];
102
- tcg_op_remove(s, op);
103
- break;
104
-
105
- default:
106
- /* Opcodes that end the block stop the optimization. */
107
- if ((def->flags & TCG_OPF_BB_END) == 0) {
108
- break;
109
- }
110
- /* fallthru */
111
- case INDEX_op_qemu_ld_i32:
112
- case INDEX_op_qemu_ld_i64:
113
- case INDEX_op_qemu_st_i32:
114
- case INDEX_op_qemu_st8_i32:
115
- case INDEX_op_qemu_st_i64:
116
- /* Opcodes that touch guest memory stop the optimization. */
117
- ctx.prev_mb = NULL;
118
- break;
119
- }
120
- } else if (opc == INDEX_op_mb) {
121
- ctx.prev_mb = op;
122
- }
123
}
52
}
124
}
53
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
-{
56
- /* Canonicalize the comparison to put immediate second. */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
61
-}
62
-
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
-{
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
87
--
125
--
88
2.43.0
126
2.25.1
127
128
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Split out a whole bunch of placeholder functions, which are
2
2
currently identical. That won't last as more code gets moved.
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
4
Use CASE_32_64_VEC for some logical operators that previously
5
missed the addition of vectors.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
10
---
6
tcg/optimize.c | 9 ++-------
11
tcg/optimize.c | 271 +++++++++++++++++++++++++++++++++++++++----------
7
1 file changed, 2 insertions(+), 7 deletions(-)
12
1 file changed, 219 insertions(+), 52 deletions(-)
8
13
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
16
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
17
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
18
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
19
}
20
}
21
22
+/*
23
+ * The fold_* functions return true when processing is complete,
24
+ * usually by folding the operation to a constant or to a copy,
25
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
26
+ * like collect information about the value produced, for use in
27
+ * optimizing a subsequent operation.
28
+ *
29
+ * These first fold_* functions are all helpers, used by other
30
+ * folders for more specific operations.
31
+ */
32
+
33
+static bool fold_const1(OptContext *ctx, TCGOp *op)
34
+{
35
+ if (arg_is_const(op->args[1])) {
36
+ uint64_t t;
37
+
38
+ t = arg_info(op->args[1])->val;
39
+ t = do_constant_folding(op->opc, t, 0);
40
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
41
+ }
42
+ return false;
43
+}
44
+
45
+static bool fold_const2(OptContext *ctx, TCGOp *op)
46
+{
47
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
48
+ uint64_t t1 = arg_info(op->args[1])->val;
49
+ uint64_t t2 = arg_info(op->args[2])->val;
50
+
51
+ t1 = do_constant_folding(op->opc, t1, t2);
52
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
53
+ }
54
+ return false;
55
+}
56
+
57
+/*
58
+ * These outermost fold_<op> functions are sorted alphabetically.
59
+ */
60
+
61
+static bool fold_add(OptContext *ctx, TCGOp *op)
62
+{
63
+ return fold_const2(ctx, op);
64
+}
65
+
66
+static bool fold_and(OptContext *ctx, TCGOp *op)
67
+{
68
+ return fold_const2(ctx, op);
69
+}
70
+
71
+static bool fold_andc(OptContext *ctx, TCGOp *op)
72
+{
73
+ return fold_const2(ctx, op);
74
+}
75
+
76
static bool fold_call(OptContext *ctx, TCGOp *op)
14
{
77
{
15
/* Set to 1 all bits to the left of the rightmost. */
78
TCGContext *s = ctx->tcg;
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
79
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
17
- ctx->z_mask = -(z_mask & -z_mask);
80
return true;
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
81
}
28
82
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
83
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
84
+{
85
+ return fold_const1(ctx, op);
86
+}
87
+
88
+static bool fold_divide(OptContext *ctx, TCGOp *op)
89
+{
90
+ return fold_const2(ctx, op);
91
+}
92
+
93
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
94
+{
95
+ return fold_const2(ctx, op);
96
+}
97
+
98
+static bool fold_exts(OptContext *ctx, TCGOp *op)
99
+{
100
+ return fold_const1(ctx, op);
101
+}
102
+
103
+static bool fold_extu(OptContext *ctx, TCGOp *op)
104
+{
105
+ return fold_const1(ctx, op);
106
+}
107
+
108
static bool fold_mb(OptContext *ctx, TCGOp *op)
109
{
110
/* Eliminate duplicate and redundant fence instructions. */
111
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
112
return true;
113
}
114
115
+static bool fold_mul(OptContext *ctx, TCGOp *op)
116
+{
117
+ return fold_const2(ctx, op);
118
+}
119
+
120
+static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
121
+{
122
+ return fold_const2(ctx, op);
123
+}
124
+
125
+static bool fold_nand(OptContext *ctx, TCGOp *op)
126
+{
127
+ return fold_const2(ctx, op);
128
+}
129
+
130
+static bool fold_neg(OptContext *ctx, TCGOp *op)
131
+{
132
+ return fold_const1(ctx, op);
133
+}
134
+
135
+static bool fold_nor(OptContext *ctx, TCGOp *op)
136
+{
137
+ return fold_const2(ctx, op);
138
+}
139
+
140
+static bool fold_not(OptContext *ctx, TCGOp *op)
141
+{
142
+ return fold_const1(ctx, op);
143
+}
144
+
145
+static bool fold_or(OptContext *ctx, TCGOp *op)
146
+{
147
+ return fold_const2(ctx, op);
148
+}
149
+
150
+static bool fold_orc(OptContext *ctx, TCGOp *op)
151
+{
152
+ return fold_const2(ctx, op);
153
+}
154
+
155
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
156
{
157
/* Opcodes that touch guest memory stop the mb optimization. */
158
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
159
return false;
160
}
161
162
+static bool fold_remainder(OptContext *ctx, TCGOp *op)
163
+{
164
+ return fold_const2(ctx, op);
165
+}
166
+
167
+static bool fold_shift(OptContext *ctx, TCGOp *op)
168
+{
169
+ return fold_const2(ctx, op);
170
+}
171
+
172
+static bool fold_sub(OptContext *ctx, TCGOp *op)
173
+{
174
+ return fold_const2(ctx, op);
175
+}
176
+
177
+static bool fold_xor(OptContext *ctx, TCGOp *op)
178
+{
179
+ return fold_const2(ctx, op);
180
+}
181
+
182
/* Propagate constants and copies, fold constant expressions. */
183
void tcg_optimize(TCGContext *s)
184
{
185
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
186
}
187
break;
188
189
- CASE_OP_32_64(not):
190
- CASE_OP_32_64(neg):
191
- CASE_OP_32_64(ext8s):
192
- CASE_OP_32_64(ext8u):
193
- CASE_OP_32_64(ext16s):
194
- CASE_OP_32_64(ext16u):
195
- CASE_OP_32_64(ctpop):
196
- case INDEX_op_ext32s_i64:
197
- case INDEX_op_ext32u_i64:
198
- case INDEX_op_ext_i32_i64:
199
- case INDEX_op_extu_i32_i64:
200
- case INDEX_op_extrl_i64_i32:
201
- case INDEX_op_extrh_i64_i32:
202
- if (arg_is_const(op->args[1])) {
203
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
204
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
205
- continue;
206
- }
207
- break;
208
-
209
CASE_OP_32_64(bswap16):
210
CASE_OP_32_64(bswap32):
211
case INDEX_op_bswap64_i64:
212
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
213
}
214
break;
215
216
- CASE_OP_32_64(add):
217
- CASE_OP_32_64(sub):
218
- CASE_OP_32_64(mul):
219
- CASE_OP_32_64(or):
220
- CASE_OP_32_64(and):
221
- CASE_OP_32_64(xor):
222
- CASE_OP_32_64(shl):
223
- CASE_OP_32_64(shr):
224
- CASE_OP_32_64(sar):
225
- CASE_OP_32_64(rotl):
226
- CASE_OP_32_64(rotr):
227
- CASE_OP_32_64(andc):
228
- CASE_OP_32_64(orc):
229
- CASE_OP_32_64(eqv):
230
- CASE_OP_32_64(nand):
231
- CASE_OP_32_64(nor):
232
- CASE_OP_32_64(muluh):
233
- CASE_OP_32_64(mulsh):
234
- CASE_OP_32_64(div):
235
- CASE_OP_32_64(divu):
236
- CASE_OP_32_64(rem):
237
- CASE_OP_32_64(remu):
238
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
239
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
240
- arg_info(op->args[2])->val);
241
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
242
- continue;
243
- }
244
- break;
245
-
246
CASE_OP_32_64(clz):
247
CASE_OP_32_64(ctz):
248
if (arg_is_const(op->args[1])) {
249
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
250
}
251
break;
252
253
+ default:
254
+ break;
255
+
256
+ /* ---------------------------------------------------------- */
257
+ /* Sorted alphabetically by opcode as much as possible. */
258
+
259
+ CASE_OP_32_64_VEC(add):
260
+ done = fold_add(&ctx, op);
261
+ break;
262
+ CASE_OP_32_64_VEC(and):
263
+ done = fold_and(&ctx, op);
264
+ break;
265
+ CASE_OP_32_64_VEC(andc):
266
+ done = fold_andc(&ctx, op);
267
+ break;
268
+ CASE_OP_32_64(ctpop):
269
+ done = fold_ctpop(&ctx, op);
270
+ break;
271
+ CASE_OP_32_64(div):
272
+ CASE_OP_32_64(divu):
273
+ done = fold_divide(&ctx, op);
274
+ break;
275
+ CASE_OP_32_64(eqv):
276
+ done = fold_eqv(&ctx, op);
277
+ break;
278
+ CASE_OP_32_64(ext8s):
279
+ CASE_OP_32_64(ext16s):
280
+ case INDEX_op_ext32s_i64:
281
+ case INDEX_op_ext_i32_i64:
282
+ done = fold_exts(&ctx, op);
283
+ break;
284
+ CASE_OP_32_64(ext8u):
285
+ CASE_OP_32_64(ext16u):
286
+ case INDEX_op_ext32u_i64:
287
+ case INDEX_op_extu_i32_i64:
288
+ case INDEX_op_extrl_i64_i32:
289
+ case INDEX_op_extrh_i64_i32:
290
+ done = fold_extu(&ctx, op);
291
+ break;
292
case INDEX_op_mb:
293
done = fold_mb(&ctx, op);
294
break;
295
+ CASE_OP_32_64(mul):
296
+ done = fold_mul(&ctx, op);
297
+ break;
298
+ CASE_OP_32_64(mulsh):
299
+ CASE_OP_32_64(muluh):
300
+ done = fold_mul_highpart(&ctx, op);
301
+ break;
302
+ CASE_OP_32_64(nand):
303
+ done = fold_nand(&ctx, op);
304
+ break;
305
+ CASE_OP_32_64(neg):
306
+ done = fold_neg(&ctx, op);
307
+ break;
308
+ CASE_OP_32_64(nor):
309
+ done = fold_nor(&ctx, op);
310
+ break;
311
+ CASE_OP_32_64_VEC(not):
312
+ done = fold_not(&ctx, op);
313
+ break;
314
+ CASE_OP_32_64_VEC(or):
315
+ done = fold_or(&ctx, op);
316
+ break;
317
+ CASE_OP_32_64_VEC(orc):
318
+ done = fold_orc(&ctx, op);
319
+ break;
320
case INDEX_op_qemu_ld_i32:
321
case INDEX_op_qemu_ld_i64:
322
done = fold_qemu_ld(&ctx, op);
323
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
324
case INDEX_op_qemu_st_i64:
325
done = fold_qemu_st(&ctx, op);
326
break;
327
-
328
- default:
329
+ CASE_OP_32_64(rem):
330
+ CASE_OP_32_64(remu):
331
+ done = fold_remainder(&ctx, op);
332
+ break;
333
+ CASE_OP_32_64(rotl):
334
+ CASE_OP_32_64(rotr):
335
+ CASE_OP_32_64(sar):
336
+ CASE_OP_32_64(shl):
337
+ CASE_OP_32_64(shr):
338
+ done = fold_shift(&ctx, op);
339
+ break;
340
+ CASE_OP_32_64_VEC(sub):
341
+ done = fold_sub(&ctx, op);
342
+ break;
343
+ CASE_OP_32_64_VEC(xor):
344
+ done = fold_xor(&ctx, op);
345
break;
346
}
347
30
--
348
--
31
2.43.0
349
2.25.1
350
351
diff view generated by jsdifflib
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
1
Reduce some code duplication by folding the NE and EQ cases.
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
4
2
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/optimize.c | 5 ++---
7
tcg/optimize.c | 145 ++++++++++++++++++++++++-------------------------
9
1 file changed, 2 insertions(+), 3 deletions(-)
8
1 file changed, 72 insertions(+), 73 deletions(-)
10
9
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
16
15
return fold_const2(ctx, op);
16
}
17
18
+static bool fold_setcond2(OptContext *ctx, TCGOp *op)
19
+{
20
+ TCGCond cond = op->args[5];
21
+ int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
22
+ int inv = 0;
23
+
24
+ if (i >= 0) {
25
+ goto do_setcond_const;
26
+ }
27
+
28
+ switch (cond) {
29
+ case TCG_COND_LT:
30
+ case TCG_COND_GE:
31
+ /*
32
+ * Simplify LT/GE comparisons vs zero to a single compare
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
36
+ arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
37
+ goto do_setcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
50
+ op->args[3], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_setcond_const;
54
+ case 1:
55
+ goto do_setcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
59
+ op->args[4], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_setcond_const;
63
+ case 1:
64
+ op->args[2] = op->args[3];
65
+ op->args[3] = cond;
66
+ op->opc = INDEX_op_setcond_i32;
67
+ break;
68
+ }
69
+ break;
70
+
71
+ default:
72
+ break;
73
+
74
+ do_setcond_high:
75
+ op->args[1] = op->args[2];
76
+ op->args[2] = op->args[4];
77
+ op->args[3] = cond;
78
+ op->opc = INDEX_op_setcond_i32;
79
+ break;
80
+ }
81
+ return false;
82
+
83
+ do_setcond_const:
84
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
85
+}
86
+
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
87
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
88
{
19
- uint64_t s_mask, z_mask, sign;
89
return fold_const2(ctx, op);
20
+ uint64_t s_mask, z_mask;
90
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
21
TempOptInfo *t1, *t2;
91
}
22
92
break;
23
if (fold_const2(ctx, op) ||
93
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
94
- case INDEX_op_setcond2_i32:
25
* If the sign bit is known zero, then logical right shift
95
- i = do_constant_folding_cond2(&op->args[1], &op->args[3],
26
* will not reduce the number of input sign repetitions.
96
- op->args[5]);
27
*/
97
- if (i >= 0) {
28
- sign = -s_mask;
98
- do_setcond_const:
29
- if (sign && !(z_mask & sign)) {
99
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
30
+ if (~z_mask & -s_mask) {
100
- continue;
31
return fold_masks_s(ctx, op, s_mask);
101
- }
32
}
102
- if ((op->args[5] == TCG_COND_LT || op->args[5] == TCG_COND_GE)
33
break;
103
- && arg_is_const(op->args[3])
104
- && arg_info(op->args[3])->val == 0
105
- && arg_is_const(op->args[4])
106
- && arg_info(op->args[4])->val == 0) {
107
- /* Simplify LT/GE comparisons vs zero to a single compare
108
- vs the high word of the input. */
109
- do_setcond_high:
110
- reset_temp(op->args[0]);
111
- arg_info(op->args[0])->z_mask = 1;
112
- op->opc = INDEX_op_setcond_i32;
113
- op->args[1] = op->args[2];
114
- op->args[2] = op->args[4];
115
- op->args[3] = op->args[5];
116
- break;
117
- }
118
- if (op->args[5] == TCG_COND_EQ) {
119
- /* Simplify EQ comparisons where one of the pairs
120
- can be simplified. */
121
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
122
- op->args[1], op->args[3],
123
- TCG_COND_EQ);
124
- if (i == 0) {
125
- goto do_setcond_const;
126
- } else if (i > 0) {
127
- goto do_setcond_high;
128
- }
129
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
130
- op->args[2], op->args[4],
131
- TCG_COND_EQ);
132
- if (i == 0) {
133
- goto do_setcond_high;
134
- } else if (i < 0) {
135
- break;
136
- }
137
- do_setcond_low:
138
- reset_temp(op->args[0]);
139
- arg_info(op->args[0])->z_mask = 1;
140
- op->opc = INDEX_op_setcond_i32;
141
- op->args[2] = op->args[3];
142
- op->args[3] = op->args[5];
143
- break;
144
- }
145
- if (op->args[5] == TCG_COND_NE) {
146
- /* Simplify NE comparisons where one of the pairs
147
- can be simplified. */
148
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
149
- op->args[1], op->args[3],
150
- TCG_COND_NE);
151
- if (i == 0) {
152
- goto do_setcond_high;
153
- } else if (i > 0) {
154
- goto do_setcond_const;
155
- }
156
- i = do_constant_folding_cond(INDEX_op_setcond_i32,
157
- op->args[2], op->args[4],
158
- TCG_COND_NE);
159
- if (i == 0) {
160
- goto do_setcond_low;
161
- } else if (i > 0) {
162
- goto do_setcond_const;
163
- }
164
- }
165
- break;
166
-
167
default:
168
break;
169
170
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
171
CASE_OP_32_64(shr):
172
done = fold_shift(&ctx, op);
173
break;
174
+ case INDEX_op_setcond2_i32:
175
+ done = fold_setcond2(&ctx, op);
176
+ break;
177
CASE_OP_32_64_VEC(sub):
178
done = fold_sub(&ctx, op);
179
break;
34
--
180
--
35
2.43.0
181
2.25.1
182
183
diff view generated by jsdifflib
1
The input which overlaps the sign bit of the output can
1
Reduce some code duplication by folding the NE and EQ cases.
2
have its input s_mask propagated to the output s_mask.
3
2
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/optimize.c | 14 ++++++++++++--
6
tcg/optimize.c | 159 +++++++++++++++++++++++++------------------------
8
1 file changed, 12 insertions(+), 2 deletions(-)
7
1 file changed, 81 insertions(+), 78 deletions(-)
9
8
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
11
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
14
return fold_const2(ctx, op);
16
int ofs = op->args[3];
15
}
17
int len = op->args[4];
16
18
+ int width;
17
+static bool fold_brcond2(OptContext *ctx, TCGOp *op)
19
TCGOpcode and_opc;
18
+{
20
- uint64_t z_mask;
19
+ TCGCond cond = op->args[4];
21
+ uint64_t z_mask, s_mask;
20
+ int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
22
21
+ TCGArg label = op->args[5];
23
if (ti_is_const(t1) && ti_is_const(t2)) {
22
+ int inv = 0;
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
23
+
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
24
+ if (i >= 0) {
26
switch (ctx->type) {
25
+ goto do_brcond_const;
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
26
+ }
47
+
27
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
28
+ switch (cond) {
49
- return fold_masks_z(ctx, op, z_mask);
29
+ case TCG_COND_LT:
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
30
+ case TCG_COND_GE:
51
}
31
+ /*
52
32
+ * Simplify LT/GE comparisons vs zero to a single compare
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
33
+ * vs the high word of the input.
34
+ */
35
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
36
+ arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
37
+ goto do_brcond_high;
38
+ }
39
+ break;
40
+
41
+ case TCG_COND_NE:
42
+ inv = 1;
43
+ QEMU_FALLTHROUGH;
44
+ case TCG_COND_EQ:
45
+ /*
46
+ * Simplify EQ/NE comparisons where one of the pairs
47
+ * can be simplified.
48
+ */
49
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
50
+ op->args[2], cond);
51
+ switch (i ^ inv) {
52
+ case 0:
53
+ goto do_brcond_const;
54
+ case 1:
55
+ goto do_brcond_high;
56
+ }
57
+
58
+ i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
59
+ op->args[3], cond);
60
+ switch (i ^ inv) {
61
+ case 0:
62
+ goto do_brcond_const;
63
+ case 1:
64
+ op->opc = INDEX_op_brcond_i32;
65
+ op->args[1] = op->args[2];
66
+ op->args[2] = cond;
67
+ op->args[3] = label;
68
+ break;
69
+ }
70
+ break;
71
+
72
+ default:
73
+ break;
74
+
75
+ do_brcond_high:
76
+ op->opc = INDEX_op_brcond_i32;
77
+ op->args[0] = op->args[1];
78
+ op->args[1] = op->args[3];
79
+ op->args[2] = cond;
80
+ op->args[3] = label;
81
+ break;
82
+
83
+ do_brcond_const:
84
+ if (i == 0) {
85
+ tcg_op_remove(ctx->tcg, op);
86
+ return true;
87
+ }
88
+ op->opc = INDEX_op_br;
89
+ op->args[0] = label;
90
+ break;
91
+ }
92
+ return false;
93
+}
94
+
95
static bool fold_call(OptContext *ctx, TCGOp *op)
96
{
97
TCGContext *s = ctx->tcg;
98
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
99
}
100
break;
101
102
- case INDEX_op_brcond2_i32:
103
- i = do_constant_folding_cond2(&op->args[0], &op->args[2],
104
- op->args[4]);
105
- if (i == 0) {
106
- do_brcond_false:
107
- tcg_op_remove(s, op);
108
- continue;
109
- }
110
- if (i > 0) {
111
- do_brcond_true:
112
- op->opc = opc = INDEX_op_br;
113
- op->args[0] = op->args[5];
114
- break;
115
- }
116
- if ((op->args[4] == TCG_COND_LT || op->args[4] == TCG_COND_GE)
117
- && arg_is_const(op->args[2])
118
- && arg_info(op->args[2])->val == 0
119
- && arg_is_const(op->args[3])
120
- && arg_info(op->args[3])->val == 0) {
121
- /* Simplify LT/GE comparisons vs zero to a single compare
122
- vs the high word of the input. */
123
- do_brcond_high:
124
- op->opc = opc = INDEX_op_brcond_i32;
125
- op->args[0] = op->args[1];
126
- op->args[1] = op->args[3];
127
- op->args[2] = op->args[4];
128
- op->args[3] = op->args[5];
129
- break;
130
- }
131
- if (op->args[4] == TCG_COND_EQ) {
132
- /* Simplify EQ comparisons where one of the pairs
133
- can be simplified. */
134
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
135
- op->args[0], op->args[2],
136
- TCG_COND_EQ);
137
- if (i == 0) {
138
- goto do_brcond_false;
139
- } else if (i > 0) {
140
- goto do_brcond_high;
141
- }
142
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
143
- op->args[1], op->args[3],
144
- TCG_COND_EQ);
145
- if (i == 0) {
146
- goto do_brcond_false;
147
- } else if (i < 0) {
148
- break;
149
- }
150
- do_brcond_low:
151
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
152
- op->opc = INDEX_op_brcond_i32;
153
- op->args[1] = op->args[2];
154
- op->args[2] = op->args[4];
155
- op->args[3] = op->args[5];
156
- break;
157
- }
158
- if (op->args[4] == TCG_COND_NE) {
159
- /* Simplify NE comparisons where one of the pairs
160
- can be simplified. */
161
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
162
- op->args[0], op->args[2],
163
- TCG_COND_NE);
164
- if (i == 0) {
165
- goto do_brcond_high;
166
- } else if (i > 0) {
167
- goto do_brcond_true;
168
- }
169
- i = do_constant_folding_cond(INDEX_op_brcond_i32,
170
- op->args[1], op->args[3],
171
- TCG_COND_NE);
172
- if (i == 0) {
173
- goto do_brcond_low;
174
- } else if (i > 0) {
175
- goto do_brcond_true;
176
- }
177
- }
178
- break;
179
-
180
default:
181
break;
182
183
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
184
CASE_OP_32_64_VEC(andc):
185
done = fold_andc(&ctx, op);
186
break;
187
+ case INDEX_op_brcond2_i32:
188
+ done = fold_brcond2(&ctx, op);
189
+ break;
190
CASE_OP_32_64(ctpop):
191
done = fold_ctpop(&ctx, op);
192
break;
54
--
193
--
55
2.43.0
194
2.25.1
195
196
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/optimize.c | 2 +-
5
tcg/optimize.c | 33 +++++++++++++++++++--------------
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
1 file changed, 19 insertions(+), 14 deletions(-)
6
7
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
return fold_const2(ctx, op);
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
14
}
18
15
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
+static bool fold_brcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[2];
19
+ int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
20
+
21
+ if (i == 0) {
22
+ tcg_op_remove(ctx->tcg, op);
23
+ return true;
24
+ }
25
+ if (i > 0) {
26
+ op->opc = INDEX_op_br;
27
+ op->args[0] = op->args[3];
28
+ }
29
+ return false;
30
+}
31
+
32
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
33
{
34
TCGCond cond = op->args[4];
35
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
36
}
37
break;
38
39
- CASE_OP_32_64(brcond):
40
- i = do_constant_folding_cond(opc, op->args[0],
41
- op->args[1], op->args[2]);
42
- if (i == 0) {
43
- tcg_op_remove(s, op);
44
- continue;
45
- } else if (i > 0) {
46
- memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
47
- op->opc = opc = INDEX_op_br;
48
- op->args[0] = op->args[3];
49
- break;
50
- }
51
- break;
52
-
53
CASE_OP_32_64(movcond):
54
i = do_constant_folding_cond(opc, op->args[1],
55
op->args[2], op->args[5]);
56
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
57
CASE_OP_32_64_VEC(andc):
58
done = fold_andc(&ctx, op);
59
break;
60
+ CASE_OP_32_64(brcond):
61
+ done = fold_brcond(&ctx, op);
62
+ break;
63
case INDEX_op_brcond2_i32:
64
done = fold_brcond2(&ctx, op);
65
break;
20
--
66
--
21
2.43.0
67
2.25.1
68
69
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/optimize.c | 2 +-
5
tcg/optimize.c | 23 ++++++++++++++---------
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
1 file changed, 14 insertions(+), 9 deletions(-)
6
7
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return fold_const2(ctx, op);
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
14
}
18
15
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
16
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGCond cond = op->args[3];
19
+ int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
20
+
21
+ if (i >= 0) {
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
{
29
TCGCond cond = op->args[5];
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
31
}
32
break;
33
34
- CASE_OP_32_64(setcond):
35
- i = do_constant_folding_cond(opc, op->args[1],
36
- op->args[2], op->args[3]);
37
- if (i >= 0) {
38
- tcg_opt_gen_movi(&ctx, op, op->args[0], i);
39
- continue;
40
- }
41
- break;
42
-
43
CASE_OP_32_64(movcond):
44
i = do_constant_folding_cond(opc, op->args[1],
45
op->args[2], op->args[5]);
46
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
47
CASE_OP_32_64(shr):
48
done = fold_shift(&ctx, op);
49
break;
50
+ CASE_OP_32_64(setcond):
51
+ done = fold_setcond(&ctx, op);
52
+ break;
53
case INDEX_op_setcond2_i32:
54
done = fold_setcond2(&ctx, op);
55
break;
20
--
56
--
21
2.43.0
57
2.25.1
58
59
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/optimize.c | 2 +-
5
tcg/optimize.c | 37 +++++++++++++++++++++----------------
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
1 file changed, 21 insertions(+), 16 deletions(-)
6
7
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
return fold_const2(ctx, op);
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
14
}
18
15
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
16
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
19
+ uint32_t a = arg_info(op->args[2])->val;
20
+ uint32_t b = arg_info(op->args[3])->val;
21
+ uint64_t r = (uint64_t)a * b;
22
+ TCGArg rl, rh;
23
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
24
+
25
+ rl = op->args[0];
26
+ rh = op->args[1];
27
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
28
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
29
+ return true;
30
+ }
31
+ return false;
32
+}
33
+
34
static bool fold_nand(OptContext *ctx, TCGOp *op)
35
{
36
return fold_const2(ctx, op);
37
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
38
}
39
break;
40
41
- case INDEX_op_mulu2_i32:
42
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
43
- uint32_t a = arg_info(op->args[2])->val;
44
- uint32_t b = arg_info(op->args[3])->val;
45
- uint64_t r = (uint64_t)a * b;
46
- TCGArg rl, rh;
47
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
48
-
49
- rl = op->args[0];
50
- rh = op->args[1];
51
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
52
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
53
- continue;
54
- }
55
- break;
56
-
57
default:
58
break;
59
60
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
61
CASE_OP_32_64(muluh):
62
done = fold_mul_highpart(&ctx, op);
63
break;
64
+ case INDEX_op_mulu2_i32:
65
+ done = fold_mulu2_i32(&ctx, op);
66
+ break;
67
CASE_OP_32_64(nand):
68
done = fold_nand(&ctx, op);
69
break;
20
--
70
--
21
2.43.0
71
2.25.1
72
73
diff view generated by jsdifflib
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
1
Add two additional helpers, fold_add2_i32 and fold_sub2_i32
2
which will not be simple wrappers forever.
2
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
tcg/optimize.c | 20 +++++++++++++++++---
8
tcg/optimize.c | 70 +++++++++++++++++++++++++++++++-------------------
6
1 file changed, 17 insertions(+), 3 deletions(-)
9
1 file changed, 44 insertions(+), 26 deletions(-)
7
10
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
13
return ts_info(arg_temp(arg));
16
return fold_const2(ctx, op);
14
}
17
}
15
18
16
+static inline bool ti_is_const(TempOptInfo *ti)
19
+static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
17
+{
20
+{
18
+ return ti->is_const;
21
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
22
+ arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
23
+ uint32_t al = arg_info(op->args[2])->val;
24
+ uint32_t ah = arg_info(op->args[3])->val;
25
+ uint32_t bl = arg_info(op->args[4])->val;
26
+ uint32_t bh = arg_info(op->args[5])->val;
27
+ uint64_t a = ((uint64_t)ah << 32) | al;
28
+ uint64_t b = ((uint64_t)bh << 32) | bl;
29
+ TCGArg rl, rh;
30
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+
32
+ if (add) {
33
+ a += b;
34
+ } else {
35
+ a -= b;
36
+ }
37
+
38
+ rl = op->args[0];
39
+ rh = op->args[1];
40
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
41
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
42
+ return true;
43
+ }
44
+ return false;
19
+}
45
+}
20
+
46
+
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
47
+static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
22
+{
48
+{
23
+ return ti->val;
49
+ return fold_addsub2_i32(ctx, op, true);
24
+}
50
+}
25
+
51
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
52
static bool fold_and(OptContext *ctx, TCGOp *op)
53
{
54
return fold_const2(ctx, op);
55
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
56
return fold_const2(ctx, op);
57
}
58
59
+static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
27
+{
60
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
61
+ return fold_addsub2_i32(ctx, op, false);
29
+}
62
+}
30
+
63
+
31
static inline bool ts_is_const(TCGTemp *ts)
64
static bool fold_xor(OptContext *ctx, TCGOp *op)
32
{
65
{
33
- return ts_info(ts)->is_const;
66
return fold_const2(ctx, op);
34
+ return ti_is_const(ts_info(ts));
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
35
}
68
}
36
69
break;
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
70
38
{
71
- case INDEX_op_add2_i32:
39
- TempOptInfo *ti = ts_info(ts);
72
- case INDEX_op_sub2_i32:
40
- return ti->is_const && ti->val == val;
73
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])
41
+ return ti_is_const_val(ts_info(ts), val);
74
- && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
42
}
75
- uint32_t al = arg_info(op->args[2])->val;
43
76
- uint32_t ah = arg_info(op->args[3])->val;
44
static inline bool arg_is_const(TCGArg arg)
77
- uint32_t bl = arg_info(op->args[4])->val;
78
- uint32_t bh = arg_info(op->args[5])->val;
79
- uint64_t a = ((uint64_t)ah << 32) | al;
80
- uint64_t b = ((uint64_t)bh << 32) | bl;
81
- TCGArg rl, rh;
82
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
83
-
84
- if (opc == INDEX_op_add2_i32) {
85
- a += b;
86
- } else {
87
- a -= b;
88
- }
89
-
90
- rl = op->args[0];
91
- rh = op->args[1];
92
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)a);
93
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(a >> 32));
94
- continue;
95
- }
96
- break;
97
98
default:
99
break;
100
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
101
CASE_OP_32_64_VEC(add):
102
done = fold_add(&ctx, op);
103
break;
104
+ case INDEX_op_add2_i32:
105
+ done = fold_add2_i32(&ctx, op);
106
+ break;
107
CASE_OP_32_64_VEC(and):
108
done = fold_and(&ctx, op);
109
break;
110
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
CASE_OP_32_64_VEC(sub):
112
done = fold_sub(&ctx, op);
113
break;
114
+ case INDEX_op_sub2_i32:
115
+ done = fold_sub2_i32(&ctx, op);
116
+ break;
117
CASE_OP_32_64_VEC(xor):
118
done = fold_xor(&ctx, op);
119
break;
45
--
120
--
46
2.43.0
121
2.25.1
122
123
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
tcg/optimize.c | 3 +--
5
tcg/optimize.c | 56 ++++++++++++++++++++++++++++----------------------
7
1 file changed, 1 insertion(+), 2 deletions(-)
6
1 file changed, 31 insertions(+), 25 deletions(-)
8
7
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
14
}
13
return true;
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
14
}
21
15
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
16
+static bool fold_movcond(OptContext *ctx, TCGOp *op)
17
+{
18
+ TCGOpcode opc = op->opc;
19
+ TCGCond cond = op->args[5];
20
+ int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
21
+
22
+ if (i >= 0) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
+ }
25
+
26
+ if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
27
+ uint64_t tv = arg_info(op->args[3])->val;
28
+ uint64_t fv = arg_info(op->args[4])->val;
29
+
30
+ opc = (opc == INDEX_op_movcond_i32
31
+ ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
32
+
33
+ if (tv == 1 && fv == 0) {
34
+ op->opc = opc;
35
+ op->args[3] = cond;
36
+ } else if (fv == 1 && tv == 0) {
37
+ op->opc = opc;
38
+ op->args[3] = tcg_invert_cond(cond);
39
+ }
40
+ }
41
+ return false;
42
+}
43
+
44
static bool fold_mul(OptContext *ctx, TCGOp *op)
45
{
46
return fold_const2(ctx, op);
47
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
48
}
49
break;
50
51
- CASE_OP_32_64(movcond):
52
- i = do_constant_folding_cond(opc, op->args[1],
53
- op->args[2], op->args[5]);
54
- if (i >= 0) {
55
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[4 - i]);
56
- continue;
57
- }
58
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
59
- uint64_t tv = arg_info(op->args[3])->val;
60
- uint64_t fv = arg_info(op->args[4])->val;
61
- TCGCond cond = op->args[5];
62
-
63
- if (fv == 1 && tv == 0) {
64
- cond = tcg_invert_cond(cond);
65
- } else if (!(tv == 1 && fv == 0)) {
66
- break;
67
- }
68
- op->args[3] = cond;
69
- op->opc = opc = (opc == INDEX_op_movcond_i32
70
- ? INDEX_op_setcond_i32
71
- : INDEX_op_setcond_i64);
72
- }
73
- break;
74
-
75
-
76
default:
77
break;
78
79
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
80
case INDEX_op_mb:
81
done = fold_mb(&ctx, op);
82
break;
83
+ CASE_OP_32_64(movcond):
84
+ done = fold_movcond(&ctx, op);
85
+ break;
86
CASE_OP_32_64(mul):
87
done = fold_mul(&ctx, op);
88
break;
23
--
89
--
24
2.43.0
90
2.25.1
91
92
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
tcg/optimize.c | 15 ++++++---------
5
tcg/optimize.c | 39 ++++++++++++++++++++++-----------------
7
1 file changed, 6 insertions(+), 9 deletions(-)
6
1 file changed, 22 insertions(+), 17 deletions(-)
8
7
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
13
return fold_const2(ctx, op);
14
}
15
16
+static bool fold_extract2(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t v1 = arg_info(op->args[1])->val;
20
+ uint64_t v2 = arg_info(op->args[2])->val;
21
+ int shr = op->args[3];
22
+
23
+ if (op->opc == INDEX_op_extract2_i64) {
24
+ v1 >>= shr;
25
+ v2 <<= 64 - shr;
26
+ } else {
27
+ v1 = (uint32_t)v1 >> shr;
28
+ v2 = (int32_t)v2 << (32 - shr);
29
+ }
30
+ return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
31
+ }
32
+ return false;
33
+}
34
+
35
static bool fold_exts(OptContext *ctx, TCGOp *op)
15
{
36
{
16
uint64_t z_mask_old, z_mask;
37
return fold_const1(ctx, op);
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
38
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
18
int pos = op->args[2];
39
}
19
int len = op->args[3];
40
break;
20
41
21
- if (arg_is_const(op->args[1])) {
42
- CASE_OP_32_64(extract2):
22
- uint64_t t;
43
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
44
- uint64_t v1 = arg_info(op->args[1])->val;
45
- uint64_t v2 = arg_info(op->args[2])->val;
46
- int shr = op->args[3];
23
-
47
-
24
- t = arg_info(op->args[1])->val;
48
- if (opc == INDEX_op_extract2_i64) {
25
- t = extract64(t, pos, len);
49
- tmp = (v1 >> shr) | (v2 << (64 - shr));
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
50
- } else {
27
+ if (ti_is_const(t1)) {
51
- tmp = (int32_t)(((uint32_t)v1 >> shr) |
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
52
- ((uint32_t)v2 << (32 - shr)));
29
+ extract64(ti_const_val(t1), pos, len));
53
- }
30
}
54
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
31
55
- continue;
32
- z_mask_old = arg_info(op->args[1])->z_mask;
56
- }
33
+ z_mask_old = t1->z_mask;
57
- break;
34
z_mask = extract64(z_mask_old, pos, len);
58
-
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
59
default:
36
return true;
60
break;
37
}
61
38
- ctx->z_mask = z_mask;
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
63
CASE_OP_32_64(eqv):
40
- return fold_masks(ctx, op);
64
done = fold_eqv(&ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
65
break;
42
}
66
+ CASE_OP_32_64(extract2):
43
67
+ done = fold_extract2(&ctx, op);
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
68
+ break;
69
CASE_OP_32_64(ext8s):
70
CASE_OP_32_64(ext16s):
71
case INDEX_op_ext32s_i64:
45
--
72
--
46
2.43.0
73
2.25.1
74
75
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/optimize.c | 4 ++--
5
tcg/optimize.c | 48 ++++++++++++++++++++++++++++++------------------
5
1 file changed, 2 insertions(+), 2 deletions(-)
6
1 file changed, 30 insertions(+), 18 deletions(-)
6
7
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return fold_const2(ctx, op);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
14
}
18
15
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
16
+static bool fold_extract(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
17
+{
21
op->opc = INDEX_op_dup_vec;
18
+ if (arg_is_const(op->args[1])) {
22
TCGOP_VECE(op) = MO_32;
19
+ uint64_t t;
23
}
20
+
24
- return false;
21
+ t = arg_info(op->args[1])->val;
25
+ return finish_folding(ctx, op);
22
+ t = extract64(t, op->args[2], op->args[3]);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_extract2(OptContext *ctx, TCGOp *op)
29
{
30
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
32
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
26
}
33
}
27
34
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
35
+static bool fold_sextract(OptContext *ctx, TCGOp *op)
36
+{
37
+ if (arg_is_const(op->args[1])) {
38
+ uint64_t t;
39
+
40
+ t = arg_info(op->args[1])->val;
41
+ t = sextract64(t, op->args[2], op->args[3]);
42
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
43
+ }
44
+ return false;
45
+}
46
+
47
static bool fold_shift(OptContext *ctx, TCGOp *op)
48
{
49
return fold_const2(ctx, op);
50
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
51
}
52
break;
53
54
- CASE_OP_32_64(extract):
55
- if (arg_is_const(op->args[1])) {
56
- tmp = extract64(arg_info(op->args[1])->val,
57
- op->args[2], op->args[3]);
58
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
59
- continue;
60
- }
61
- break;
62
-
63
- CASE_OP_32_64(sextract):
64
- if (arg_is_const(op->args[1])) {
65
- tmp = sextract64(arg_info(op->args[1])->val,
66
- op->args[2], op->args[3]);
67
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
68
- continue;
69
- }
70
- break;
71
-
72
default:
73
break;
74
75
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
CASE_OP_32_64(eqv):
77
done = fold_eqv(&ctx, op);
78
break;
79
+ CASE_OP_32_64(extract):
80
+ done = fold_extract(&ctx, op);
81
+ break;
82
CASE_OP_32_64(extract2):
83
done = fold_extract2(&ctx, op);
84
break;
85
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
86
case INDEX_op_setcond2_i32:
87
done = fold_setcond2(&ctx, op);
88
break;
89
+ CASE_OP_32_64(sextract):
90
+ done = fold_sextract(&ctx, op);
91
+ break;
92
CASE_OP_32_64_VEC(sub):
93
done = fold_sub(&ctx, op);
94
break;
29
--
95
--
30
2.43.0
96
2.25.1
97
98
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/optimize.c | 2 +-
5
tcg/optimize.c | 25 +++++++++++++++----------
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
1 file changed, 15 insertions(+), 10 deletions(-)
6
7
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
12
}
13
return fold_const1(ctx, op);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
14
}
18
15
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
16
+static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
+ uint64_t t1 = arg_info(op->args[1])->val;
20
+ uint64_t t2 = arg_info(op->args[2])->val;
21
+
22
+ t1 = deposit64(t1, op->args[3], op->args[4], t2);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
24
+ }
25
+ return false;
26
+}
27
+
28
static bool fold_divide(OptContext *ctx, TCGOp *op)
29
{
30
return fold_const2(ctx, op);
31
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
}
33
break;
34
35
- CASE_OP_32_64(deposit):
36
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
37
- tmp = deposit64(arg_info(op->args[1])->val,
38
- op->args[3], op->args[4],
39
- arg_info(op->args[2])->val);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
44
-
45
default:
46
break;
47
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
49
CASE_OP_32_64(ctpop):
50
done = fold_ctpop(&ctx, op);
51
break;
52
+ CASE_OP_32_64(deposit):
53
+ done = fold_deposit(&ctx, op);
54
+ break;
55
CASE_OP_32_64(div):
56
CASE_OP_32_64(divu):
57
done = fold_divide(&ctx, op);
20
--
58
--
21
2.43.0
59
2.25.1
60
61
diff view generated by jsdifflib
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Avoid the use of the OptContext slots.
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
tcg/optimize.c | 13 ++++++++++---
5
tcg/optimize.c | 32 ++++++++++++++++++--------------
8
1 file changed, 10 insertions(+), 3 deletions(-)
6
1 file changed, 18 insertions(+), 14 deletions(-)
9
7
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
12
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
15
return true;
13
return true;
16
}
14
}
17
15
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
16
+static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
19
+{
17
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ if (t != 0) {
22
+ t = do_constant_folding(op->opc, t, 0);
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
24
+ }
25
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
26
+ }
27
+ return false;
21
+}
28
+}
22
+
29
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
30
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
31
{
30
+ uint64_t z_mask;
32
return fold_const1(ctx, op);
31
+
33
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
if (fold_const1(ctx, op)) {
34
}
33
return true;
35
break;
34
}
36
35
37
- CASE_OP_32_64(clz):
36
switch (ctx->type) {
38
- CASE_OP_32_64(ctz):
37
case TCG_TYPE_I32:
39
- if (arg_is_const(op->args[1])) {
38
- ctx->z_mask = 32 | 31;
40
- TCGArg v = arg_info(op->args[1])->val;
39
+ z_mask = 32 | 31;
41
- if (v != 0) {
40
break;
42
- tmp = do_constant_folding(opc, v, 0);
41
case TCG_TYPE_I64:
43
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
42
- ctx->z_mask = 64 | 63;
44
- } else {
43
+ z_mask = 64 | 63;
45
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[2]);
44
break;
46
- }
45
default:
47
- continue;
46
g_assert_not_reached();
48
- }
47
}
49
- break;
48
- return false;
50
-
49
+ return fold_masks_z(ctx, op, z_mask);
51
default:
50
}
52
break;
51
53
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
54
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
55
case INDEX_op_brcond2_i32:
56
done = fold_brcond2(&ctx, op);
57
break;
58
+ CASE_OP_32_64(clz):
59
+ CASE_OP_32_64(ctz):
60
+ done = fold_count_zeros(&ctx, op);
61
+ break;
62
CASE_OP_32_64(ctpop):
63
done = fold_ctpop(&ctx, op);
64
break;
53
--
65
--
54
2.43.0
66
2.25.1
67
68
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
Always set s_mask along the BSWAP_OS path, since the result is
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
being explicitly sign-extended.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/optimize.c | 21 ++++++++++-----------
5
tcg/optimize.c | 27 ++++++++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
6
1 file changed, 16 insertions(+), 11 deletions(-)
10
7
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
13
return false;
14
}
15
16
+static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
+{
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+
21
+ t = do_constant_folding(op->opc, t, op->args[2]);
22
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
23
+ }
24
+ return false;
25
+}
26
+
27
static bool fold_call(OptContext *ctx, TCGOp *op)
17
{
28
{
18
uint64_t z_mask, s_mask, sign;
29
TCGContext *s = ctx->tcg;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
30
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
20
31
}
21
- if (arg_is_const(op->args[1])) {
32
break;
22
- uint64_t t = arg_info(op->args[1])->val;
33
34
- CASE_OP_32_64(bswap16):
35
- CASE_OP_32_64(bswap32):
36
- case INDEX_op_bswap64_i64:
37
- if (arg_is_const(op->args[1])) {
38
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
39
- op->args[2]);
40
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
41
- continue;
42
- }
43
- break;
23
-
44
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
45
default:
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
46
break;
26
+ if (ti_is_const(t1)) {
47
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
48
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
28
+ do_constant_folding(op->opc, ctx->type,
49
case INDEX_op_brcond2_i32:
29
+ ti_const_val(t1),
50
done = fold_brcond2(&ctx, op);
30
+ op->args[2]));
51
break;
31
}
52
+ CASE_OP_32_64(bswap16):
32
53
+ CASE_OP_32_64(bswap32):
33
- z_mask = arg_info(op->args[1])->z_mask;
54
+ case INDEX_op_bswap64_i64:
34
-
55
+ done = fold_bswap(&ctx, op);
35
+ z_mask = t1->z_mask;
56
+ break;
36
switch (op->opc) {
57
CASE_OP_32_64(clz):
37
case INDEX_op_bswap16_i32:
58
CASE_OP_32_64(ctz):
38
case INDEX_op_bswap16_i64:
59
done = fold_count_zeros(&ctx, op);
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
60
--
62
2.43.0
61
2.25.1
62
63
diff view generated by jsdifflib
1
Add a routine to which masks can be passed directly, rather than
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
storing them into OptContext. To be used in upcoming patches.
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
tcg/optimize.c | 15 ++++++++++++---
5
tcg/optimize.c | 53 +++++++++++++++++++++++++++++---------------------
8
1 file changed, 12 insertions(+), 3 deletions(-)
6
1 file changed, 31 insertions(+), 22 deletions(-)
9
7
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
10
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
11
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
12
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
13
return fold_const2(ctx, op);
16
}
14
}
17
15
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
16
+static bool fold_dup(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
17
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
18
+ if (arg_is_const(op->args[1])) {
19
+ uint64_t t = arg_info(op->args[1])->val;
20
+ t = dup_const(TCGOP_VECE(op), t);
21
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
22
+ }
23
+ return false;
39
+}
24
+}
40
+
25
+
41
/*
26
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
42
* An "affected" mask bit is 0 if and only if the result is identical
27
+{
43
* to the first input. Thus if the entire mask is 0, the operation
28
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
29
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
30
+ arg_info(op->args[2])->val);
31
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
32
+ }
33
+
34
+ if (args_are_copies(op->args[1], op->args[2])) {
35
+ op->opc = INDEX_op_dup_vec;
36
+ TCGOP_VECE(op) = MO_32;
37
+ }
38
+ return false;
39
+}
40
+
41
static bool fold_eqv(OptContext *ctx, TCGOp *op)
42
{
43
return fold_const2(ctx, op);
44
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
45
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
46
break;
47
48
- case INDEX_op_dup_vec:
49
- if (arg_is_const(op->args[1])) {
50
- tmp = arg_info(op->args[1])->val;
51
- tmp = dup_const(TCGOP_VECE(op), tmp);
52
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
53
- continue;
54
- }
55
- break;
56
-
57
- case INDEX_op_dup2_vec:
58
- assert(TCG_TARGET_REG_BITS == 32);
59
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
60
- tcg_opt_gen_movi(&ctx, op, op->args[0],
61
- deposit64(arg_info(op->args[1])->val, 32, 32,
62
- arg_info(op->args[2])->val));
63
- continue;
64
- } else if (args_are_copies(op->args[1], op->args[2])) {
65
- op->opc = INDEX_op_dup_vec;
66
- TCGOP_VECE(op) = MO_32;
67
- }
68
- break;
69
-
70
default:
71
break;
72
73
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
CASE_OP_32_64(divu):
75
done = fold_divide(&ctx, op);
76
break;
77
+ case INDEX_op_dup_vec:
78
+ done = fold_dup(&ctx, op);
79
+ break;
80
+ case INDEX_op_dup2_vec:
81
+ done = fold_dup2(&ctx, op);
82
+ break;
83
CASE_OP_32_64(eqv):
84
done = fold_eqv(&ctx, op);
85
break;
44
--
86
--
45
2.43.0
87
2.25.1
88
89
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
This is the final entry in the main switch that was in a
2
different form. After this, we have the option to convert
3
the switch into a function dispatch table.
2
4
3
Be careful not to call fold_masks_zs when the memory operation
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
is wide enough to require multiple outputs, so split into two
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
9
tcg/optimize.c | 27 ++++++++++++++-------------
11
1 file changed, 21 insertions(+), 5 deletions(-)
10
1 file changed, 14 insertions(+), 13 deletions(-)
12
11
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
17
return true;
19
}
18
}
20
19
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
20
+static bool fold_mov(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
21
+{
23
{
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
23
+}
46
+
24
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
25
static bool fold_movcond(OptContext *ctx, TCGOp *op)
48
+{
26
{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
27
TCGOpcode opc = op->opc;
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
28
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
29
break;
57
case INDEX_op_qemu_ld_a32_i32:
30
}
58
case INDEX_op_qemu_ld_a64_i32:
31
59
+ done = fold_qemu_ld_1reg(&ctx, op);
32
- /* Propagate constants through copy operations and do constant
33
- folding. Constants will be substituted to arguments by register
34
- allocator where needed and possible. Also detect copies. */
35
+ /*
36
+ * Process each opcode.
37
+ * Sorted alphabetically by opcode as much as possible.
38
+ */
39
switch (opc) {
40
- CASE_OP_32_64_VEC(mov):
41
- done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
42
- break;
43
-
44
- default:
45
- break;
46
-
47
- /* ---------------------------------------------------------- */
48
- /* Sorted alphabetically by opcode as much as possible. */
49
-
50
CASE_OP_32_64_VEC(add):
51
done = fold_add(&ctx, op);
52
break;
53
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
54
case INDEX_op_mb:
55
done = fold_mb(&ctx, op);
56
break;
57
+ CASE_OP_32_64_VEC(mov):
58
+ done = fold_mov(&ctx, op);
60
+ break;
59
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
60
CASE_OP_32_64(movcond):
62
case INDEX_op_qemu_ld_a64_i64:
61
done = fold_movcond(&ctx, op);
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
62
break;
73
case INDEX_op_qemu_st8_a32_i32:
63
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
case INDEX_op_qemu_st8_a64_i32:
64
CASE_OP_32_64_VEC(xor):
65
done = fold_xor(&ctx, op);
66
break;
67
+ default:
68
+ break;
69
}
70
71
if (!done) {
75
--
72
--
76
2.43.0
73
2.25.1
74
75
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Pull the "op r, a, a => movi r, 0" optimization into a function,
2
Remove fold_masks as the function becomes unused.
2
and use it in the outer opcode fold functions.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/optimize.c | 18 ++++++++----------
8
tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
1 file changed, 24 insertions(+), 17 deletions(-)
9
10
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
15
return fold_masks_zs(ctx, op, -1, s_mask);
16
return false;
16
}
17
}
17
18
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/* If the binary operation has both arguments equal, fold to @i. */
19
-{
20
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
+{
21
-}
22
+ if (args_are_copies(op->args[1], op->args[2])) {
22
-
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
24
+ }
25
+ return false;
26
+}
27
+
23
/*
28
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
29
* These outermost fold_<op> functions are sorted alphabetically.
25
* to the first input. Thus if the entire mask is 0, the operation
30
*/
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
32
33
static bool fold_andc(OptContext *ctx, TCGOp *op)
34
{
35
- return fold_const2(ctx, op);
36
+ if (fold_const2(ctx, op) ||
37
+ fold_xx_to_i(ctx, op, 0)) {
38
+ return true;
39
+ }
40
+ return false;
41
}
42
43
static bool fold_brcond(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
45
46
static bool fold_sub(OptContext *ctx, TCGOp *op)
47
{
48
- return fold_const2(ctx, op);
49
+ if (fold_const2(ctx, op) ||
50
+ fold_xx_to_i(ctx, op, 0)) {
51
+ return true;
52
+ }
53
+ return false;
54
}
55
56
static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
57
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
27
58
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
60
{
30
+ uint64_t z_mask, s_mask;
61
- return fold_const2(ctx, op);
31
+ TempOptInfo *t1, *t2;
62
+ if (fold_const2(ctx, op) ||
32
+
63
+ fold_xx_to_i(ctx, op, 0)) {
33
if (fold_const2_commutative(ctx, op) ||
64
+ return true;
34
fold_xx_to_i(ctx, op, 0) ||
65
+ }
35
fold_xi_to_x(ctx, op, 0) ||
66
+ return false;
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
67
}
51
68
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
69
/* Propagate constants and copies, fold constant expressions. */
70
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
71
break;
72
}
73
74
- /* Simplify expression for "op r, a, a => movi r, 0" cases */
75
- switch (opc) {
76
- CASE_OP_32_64_VEC(andc):
77
- CASE_OP_32_64_VEC(sub):
78
- CASE_OP_32_64_VEC(xor):
79
- if (args_are_copies(op->args[1], op->args[2])) {
80
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
81
- continue;
82
- }
83
- break;
84
- default:
85
- break;
86
- }
87
-
88
/*
89
* Process each opcode.
90
* Sorted alphabetically by opcode as much as possible.
53
--
91
--
54
2.43.0
92
2.25.1
93
94
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Pull the "op r, a, a => mov r, a" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 13 ++++++++-----
8
tcg/optimize.c | 39 ++++++++++++++++++++++++---------------
7
1 file changed, 8 insertions(+), 5 deletions(-)
9
1 file changed, 24 insertions(+), 15 deletions(-)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
16
return false;
17
}
18
19
+/* If the binary operation has both arguments equal, fold to identity. */
20
+static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
+{
22
+ if (args_are_copies(op->args[1], op->args[2])) {
23
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
24
+ }
25
+ return false;
26
+}
27
+
28
/*
29
* These outermost fold_<op> functions are sorted alphabetically.
30
+ *
31
+ * The ordering of the transformations should be:
32
+ * 1) those that produce a constant
33
+ * 2) those that produce a copy
34
+ * 3) those that produce information about the result value.
35
*/
36
37
static bool fold_add(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
39
40
static bool fold_and(OptContext *ctx, TCGOp *op)
41
{
42
- return fold_const2(ctx, op);
43
+ if (fold_const2(ctx, op) ||
44
+ fold_xx_to_x(ctx, op)) {
45
+ return true;
46
+ }
47
+ return false;
48
}
49
50
static bool fold_andc(OptContext *ctx, TCGOp *op)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
51
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
52
15
static bool fold_or(OptContext *ctx, TCGOp *op)
53
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
54
{
17
+ uint64_t z_mask, s_mask;
55
- return fold_const2(ctx, op);
18
+ TempOptInfo *t1, *t2;
56
+ if (fold_const2(ctx, op) ||
19
+
57
+ fold_xx_to_x(ctx, op)) {
20
if (fold_const2_commutative(ctx, op) ||
58
+ return true;
21
fold_xi_to_x(ctx, op, 0) ||
59
+ }
22
fold_xx_to_x(ctx, op)) {
60
+ return false;
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
61
}
37
62
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
63
static bool fold_orc(OptContext *ctx, TCGOp *op)
64
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
break;
66
}
67
68
- /* Simplify expression for "op r, a, a => mov r, a" cases */
69
- switch (opc) {
70
- CASE_OP_32_64_VEC(or):
71
- CASE_OP_32_64_VEC(and):
72
- if (args_are_copies(op->args[1], op->args[2])) {
73
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
74
- continue;
75
- }
76
- break;
77
- default:
78
- break;
79
- }
80
-
81
/*
82
* Process each opcode.
83
* Sorted alphabetically by opcode as much as possible.
39
--
84
--
40
2.43.0
85
2.25.1
86
87
diff view generated by jsdifflib
1
All mask setting is now done with parameters via fold_masks_*.
1
Pull the "op r, a, 0 => movi r, 0" optimization into a function,
2
and use it in the outer opcode fold functions.
2
3
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 13 -------------
8
tcg/optimize.c | 38 ++++++++++++++++++++------------------
7
1 file changed, 13 deletions(-)
9
1 file changed, 20 insertions(+), 18 deletions(-)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
16
return false;
15
17
}
16
/* In flight values from optimization. */
18
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
19
+/* If the binary operation has second argument @i, fold to @i. */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
20
+static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
19
TCGType type;
21
+{
20
} OptContext;
22
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
21
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
24
+ }
23
for (i = 0; i < nb_oargs; i++) {
25
+ return false;
24
TCGTemp *ts = arg_temp(op->args[i]);
26
+}
25
reset_ts(ctx, ts);
27
+
26
- /*
28
/* If the binary operation has both arguments equal, fold to @i. */
27
- * Save the corresponding known-zero/sign bits mask for the
29
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
28
- * first output argument (only one supported so far).
30
{
29
- */
31
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
30
- if (i == 0) {
32
static bool fold_and(OptContext *ctx, TCGOp *op)
31
- ts_info(ts)->z_mask = ctx->z_mask;
33
{
34
if (fold_const2(ctx, op) ||
35
+ fold_xi_to_i(ctx, op, 0) ||
36
fold_xx_to_x(ctx, op)) {
37
return true;
38
}
39
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
40
41
static bool fold_mul(OptContext *ctx, TCGOp *op)
42
{
43
- return fold_const2(ctx, op);
44
+ if (fold_const2(ctx, op) ||
45
+ fold_xi_to_i(ctx, op, 0)) {
46
+ return true;
47
+ }
48
+ return false;
49
}
50
51
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
52
{
53
- return fold_const2(ctx, op);
54
+ if (fold_const2(ctx, op) ||
55
+ fold_xi_to_i(ctx, op, 0)) {
56
+ return true;
57
+ }
58
+ return false;
59
}
60
61
static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
62
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
63
continue;
64
}
65
66
- /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
67
- switch (opc) {
68
- CASE_OP_32_64_VEC(and):
69
- CASE_OP_32_64_VEC(mul):
70
- CASE_OP_32_64(muluh):
71
- CASE_OP_32_64(mulsh):
72
- if (arg_is_const(op->args[2])
73
- && arg_info(op->args[2])->val == 0) {
74
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
75
- continue;
76
- }
77
- break;
78
- default:
79
- break;
32
- }
80
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
81
-
44
/*
82
/*
45
* Process each opcode.
83
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
84
* Sorted alphabetically by opcode as much as possible.
47
--
85
--
48
2.43.0
86
2.25.1
87
88
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Compute the type of the operation early.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
There are at least 4 places that used a def->flags ladder
4
to determine the type of the operation being optimized.
5
6
There were two places that assumed !TCG_OPF_64BIT means
7
TCG_TYPE_I32, and so could potentially compute incorrect
8
results for vector operations.
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
12
---
6
tcg/optimize.c | 19 +++++++++++--------
13
tcg/optimize.c | 149 +++++++++++++++++++++++++++++--------------------
7
1 file changed, 11 insertions(+), 8 deletions(-)
14
1 file changed, 89 insertions(+), 60 deletions(-)
8
15
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
18
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
19
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
22
/* In flight values from optimization. */
23
uint64_t z_mask;
24
+ TCGType type;
25
} OptContext;
26
27
static inline TempOptInfo *ts_info(TCGTemp *ts)
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
29
{
30
TCGTemp *dst_ts = arg_temp(dst);
31
TCGTemp *src_ts = arg_temp(src);
32
- const TCGOpDef *def;
33
TempOptInfo *di;
34
TempOptInfo *si;
35
uint64_t z_mask;
36
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
37
reset_ts(dst_ts);
38
di = ts_info(dst_ts);
39
si = ts_info(src_ts);
40
- def = &tcg_op_defs[op->opc];
41
- if (def->flags & TCG_OPF_VECTOR) {
42
- new_op = INDEX_op_mov_vec;
43
- } else if (def->flags & TCG_OPF_64BIT) {
44
- new_op = INDEX_op_mov_i64;
45
- } else {
46
+
47
+ switch (ctx->type) {
48
+ case TCG_TYPE_I32:
49
new_op = INDEX_op_mov_i32;
50
+ break;
51
+ case TCG_TYPE_I64:
52
+ new_op = INDEX_op_mov_i64;
53
+ break;
54
+ case TCG_TYPE_V64:
55
+ case TCG_TYPE_V128:
56
+ case TCG_TYPE_V256:
57
+ /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
58
+ new_op = INDEX_op_mov_vec;
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
}
63
op->opc = new_op;
64
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
65
op->args[0] = dst;
66
op->args[1] = src;
67
68
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
69
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
70
TCGArg dst, uint64_t val)
71
{
72
- const TCGOpDef *def = &tcg_op_defs[op->opc];
73
- TCGType type;
74
- TCGTemp *tv;
75
-
76
- if (def->flags & TCG_OPF_VECTOR) {
77
- type = TCGOP_VECL(op) + TCG_TYPE_V64;
78
- } else if (def->flags & TCG_OPF_64BIT) {
79
- type = TCG_TYPE_I64;
80
- } else {
81
- type = TCG_TYPE_I32;
82
- }
83
-
84
/* Convert movi to mov with constant temp. */
85
- tv = tcg_constant_internal(type, val);
86
+ TCGTemp *tv = tcg_constant_internal(ctx->type, val);
87
+
88
init_ts_info(ctx, tv);
89
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
90
}
91
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
92
}
93
}
94
95
-static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y)
96
+static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
97
+ uint64_t x, uint64_t y)
98
{
99
- const TCGOpDef *def = &tcg_op_defs[op];
100
uint64_t res = do_constant_folding_2(op, x, y);
101
- if (!(def->flags & TCG_OPF_64BIT)) {
102
+ if (type == TCG_TYPE_I32) {
103
res = (int32_t)res;
104
}
105
return res;
106
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
107
* Return -1 if the condition can't be simplified,
108
* and the result of the condition (0 or 1) if it can.
109
*/
110
-static int do_constant_folding_cond(TCGOpcode op, TCGArg x,
111
+static int do_constant_folding_cond(TCGType type, TCGArg x,
112
TCGArg y, TCGCond c)
113
{
114
uint64_t xv = arg_info(x)->val;
115
uint64_t yv = arg_info(y)->val;
116
117
if (arg_is_const(x) && arg_is_const(y)) {
118
- const TCGOpDef *def = &tcg_op_defs[op];
119
- tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR));
120
- if (def->flags & TCG_OPF_64BIT) {
121
- return do_constant_folding_cond_64(xv, yv, c);
122
- } else {
123
+ switch (type) {
124
+ case TCG_TYPE_I32:
125
return do_constant_folding_cond_32(xv, yv, c);
126
+ case TCG_TYPE_I64:
127
+ return do_constant_folding_cond_64(xv, yv, c);
128
+ default:
129
+ /* Only scalar comparisons are optimizable */
130
+ return -1;
131
}
132
} else if (args_are_copies(x, y)) {
133
return do_constant_folding_cond_eq(c);
134
@@ -XXX,XX +XXX,XX @@ static bool fold_const1(OptContext *ctx, TCGOp *op)
135
uint64_t t;
136
137
t = arg_info(op->args[1])->val;
138
- t = do_constant_folding(op->opc, t, 0);
139
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
140
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
141
}
142
return false;
143
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
144
uint64_t t1 = arg_info(op->args[1])->val;
145
uint64_t t2 = arg_info(op->args[2])->val;
146
147
- t1 = do_constant_folding(op->opc, t1, t2);
148
+ t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
149
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
150
}
151
return false;
152
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
153
static bool fold_brcond(OptContext *ctx, TCGOp *op)
154
{
155
TCGCond cond = op->args[2];
156
- int i = do_constant_folding_cond(op->opc, op->args[0], op->args[1], cond);
157
+ int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
158
159
if (i == 0) {
160
tcg_op_remove(ctx->tcg, op);
161
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
162
* Simplify EQ/NE comparisons where one of the pairs
163
* can be simplified.
164
*/
165
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[0],
166
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
167
op->args[2], cond);
168
switch (i ^ inv) {
169
case 0:
170
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
171
goto do_brcond_high;
172
}
173
174
- i = do_constant_folding_cond(INDEX_op_brcond_i32, op->args[1],
175
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
176
op->args[3], cond);
177
switch (i ^ inv) {
178
case 0:
179
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
180
if (arg_is_const(op->args[1])) {
181
uint64_t t = arg_info(op->args[1])->val;
182
183
- t = do_constant_folding(op->opc, t, op->args[2]);
184
+ t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
185
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
186
}
187
return false;
188
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
189
uint64_t t = arg_info(op->args[1])->val;
190
191
if (t != 0) {
192
- t = do_constant_folding(op->opc, t, 0);
193
+ t = do_constant_folding(op->opc, ctx->type, t, 0);
194
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
195
}
196
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
197
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
198
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
199
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
200
{
17
+ uint64_t z_mask, s_mask;
201
- TCGOpcode opc = op->opc;
18
+ TempOptInfo *tt, *ft;
202
TCGCond cond = op->args[5];
19
int i;
203
- int i = do_constant_folding_cond(opc, op->args[1], op->args[2], cond);
20
204
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
21
/* If true and false values are the same, eliminate the cmp. */
205
206
if (i >= 0) {
207
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
208
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
209
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
24
}
210
uint64_t tv = arg_info(op->args[3])->val;
25
211
uint64_t fv = arg_info(op->args[4])->val;
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
212
+ TCGOpcode opc;
27
- | arg_info(op->args[4])->z_mask;
213
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
214
- opc = (opc == INDEX_op_movcond_i32
29
- & arg_info(op->args[4])->s_mask;
215
- ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64);
30
+ tt = arg_info(op->args[3]);
216
+ switch (ctx->type) {
31
+ ft = arg_info(op->args[4]);
217
+ case TCG_TYPE_I32:
32
+ z_mask = tt->z_mask | ft->z_mask;
218
+ opc = INDEX_op_setcond_i32;
33
+ s_mask = tt->s_mask & ft->s_mask;
219
+ break;
34
220
+ case TCG_TYPE_I64:
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
221
+ opc = INDEX_op_setcond_i64;
36
- uint64_t tv = arg_info(op->args[3])->val;
222
+ break;
37
- uint64_t fv = arg_info(op->args[4])->val;
223
+ default:
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
224
+ g_assert_not_reached();
39
+ uint64_t tv = ti_const_val(tt);
225
+ }
40
+ uint64_t fv = ti_const_val(ft);
226
41
TCGOpcode opc, negopc = 0;
227
if (tv == 1 && fv == 0) {
42
TCGCond cond = op->args[5];
228
op->opc = opc;
43
229
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
230
static bool fold_setcond(OptContext *ctx, TCGOp *op)
45
}
231
{
232
TCGCond cond = op->args[3];
233
- int i = do_constant_folding_cond(op->opc, op->args[1], op->args[2], cond);
234
+ int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
235
236
if (i >= 0) {
237
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
238
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
239
* Simplify EQ/NE comparisons where one of the pairs
240
* can be simplified.
241
*/
242
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[1],
243
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
244
op->args[3], cond);
245
switch (i ^ inv) {
246
case 0:
247
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
248
goto do_setcond_high;
46
}
249
}
47
}
250
48
- return false;
251
- i = do_constant_folding_cond(INDEX_op_setcond_i32, op->args[2],
252
+ i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
253
op->args[4], cond);
254
switch (i ^ inv) {
255
case 0:
256
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
257
init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
258
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
259
260
+ /* Pre-compute the type of the operation. */
261
+ if (def->flags & TCG_OPF_VECTOR) {
262
+ ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
263
+ } else if (def->flags & TCG_OPF_64BIT) {
264
+ ctx.type = TCG_TYPE_I64;
265
+ } else {
266
+ ctx.type = TCG_TYPE_I32;
267
+ }
49
+
268
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
269
/* For commutative operations make constant second argument */
51
}
270
switch (opc) {
52
271
CASE_OP_32_64_VEC(add):
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
272
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
273
/* Proceed with possible constant folding. */
274
break;
275
}
276
- if (opc == INDEX_op_sub_i32) {
277
+ switch (ctx.type) {
278
+ case TCG_TYPE_I32:
279
neg_op = INDEX_op_neg_i32;
280
have_neg = TCG_TARGET_HAS_neg_i32;
281
- } else if (opc == INDEX_op_sub_i64) {
282
+ break;
283
+ case TCG_TYPE_I64:
284
neg_op = INDEX_op_neg_i64;
285
have_neg = TCG_TARGET_HAS_neg_i64;
286
- } else if (TCG_TARGET_HAS_neg_vec) {
287
- TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64;
288
- unsigned vece = TCGOP_VECE(op);
289
- neg_op = INDEX_op_neg_vec;
290
- have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0;
291
- } else {
292
break;
293
+ case TCG_TYPE_V64:
294
+ case TCG_TYPE_V128:
295
+ case TCG_TYPE_V256:
296
+ neg_op = INDEX_op_neg_vec;
297
+ have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
298
+ TCGOP_VECE(op)) > 0;
299
+ break;
300
+ default:
301
+ g_assert_not_reached();
302
}
303
if (!have_neg) {
304
break;
305
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
306
TCGOpcode not_op;
307
bool have_not;
308
309
- if (def->flags & TCG_OPF_VECTOR) {
310
- not_op = INDEX_op_not_vec;
311
- have_not = TCG_TARGET_HAS_not_vec;
312
- } else if (def->flags & TCG_OPF_64BIT) {
313
- not_op = INDEX_op_not_i64;
314
- have_not = TCG_TARGET_HAS_not_i64;
315
- } else {
316
+ switch (ctx.type) {
317
+ case TCG_TYPE_I32:
318
not_op = INDEX_op_not_i32;
319
have_not = TCG_TARGET_HAS_not_i32;
320
+ break;
321
+ case TCG_TYPE_I64:
322
+ not_op = INDEX_op_not_i64;
323
+ have_not = TCG_TARGET_HAS_not_i64;
324
+ break;
325
+ case TCG_TYPE_V64:
326
+ case TCG_TYPE_V128:
327
+ case TCG_TYPE_V256:
328
+ not_op = INDEX_op_not_vec;
329
+ have_not = TCG_TARGET_HAS_not_vec;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
}
334
if (!have_not) {
335
break;
336
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
337
below, we can ignore high bits, but for further optimizations we
338
need to record that the high bits contain garbage. */
339
partmask = z_mask;
340
- if (!(def->flags & TCG_OPF_64BIT)) {
341
+ if (ctx.type == TCG_TYPE_I32) {
342
z_mask |= ~(tcg_target_ulong)0xffffffffu;
343
partmask &= 0xffffffffu;
344
affected &= 0xffffffffu;
54
--
345
--
55
2.43.0
346
2.25.1
347
348
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Split out the conditional conversion from a more complex logical
2
Avoid double inversion of the value of second const operand.
2
operation to a simple NOT. Create a couple more helpers to make
3
3
this easy for the outer-most logical operations.
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/optimize.c | 21 +++++++++++----------
8
tcg/optimize.c | 158 +++++++++++++++++++++++++++----------------------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
1 file changed, 86 insertions(+), 72 deletions(-)
9
10
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
16
return false;
17
}
18
19
+/*
20
+ * Convert @op to NOT, if NOT is supported by the host.
21
+ * Return true f the conversion is successful, which will still
22
+ * indicate that the processing is complete.
23
+ */
24
+static bool fold_not(OptContext *ctx, TCGOp *op);
25
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
26
+{
27
+ TCGOpcode not_op;
28
+ bool have_not;
29
+
30
+ switch (ctx->type) {
31
+ case TCG_TYPE_I32:
32
+ not_op = INDEX_op_not_i32;
33
+ have_not = TCG_TARGET_HAS_not_i32;
34
+ break;
35
+ case TCG_TYPE_I64:
36
+ not_op = INDEX_op_not_i64;
37
+ have_not = TCG_TARGET_HAS_not_i64;
38
+ break;
39
+ case TCG_TYPE_V64:
40
+ case TCG_TYPE_V128:
41
+ case TCG_TYPE_V256:
42
+ not_op = INDEX_op_not_vec;
43
+ have_not = TCG_TARGET_HAS_not_vec;
44
+ break;
45
+ default:
46
+ g_assert_not_reached();
47
+ }
48
+ if (have_not) {
49
+ op->opc = not_op;
50
+ op->args[1] = op->args[idx];
51
+ return fold_not(ctx, op);
52
+ }
53
+ return false;
54
+}
55
+
56
+/* If the binary operation has first argument @i, fold to NOT. */
57
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
58
+{
59
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
60
+ return fold_to_not(ctx, op, 2);
61
+ }
62
+ return false;
63
+}
64
+
65
/* If the binary operation has second argument @i, fold to @i. */
66
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
67
{
68
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
69
return false;
70
}
71
72
+/* If the binary operation has second argument @i, fold to NOT. */
73
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
74
+{
75
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
76
+ return fold_to_not(ctx, op, 1);
77
+ }
78
+ return false;
79
+}
80
+
81
/* If the binary operation has both arguments equal, fold to @i. */
82
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
83
{
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
84
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
85
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
86
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
87
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
88
- fold_xx_to_i(ctx, op, 0)) {
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
89
+ fold_xx_to_i(ctx, op, 0) ||
90
+ fold_ix_to_not(ctx, op, -1)) {
25
return true;
91
return true;
26
}
92
}
27
93
return false;
28
- z1 = arg_info(op->args[1])->z_mask;
94
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
29
+ t1 = arg_info(op->args[1]);
95
30
+ t2 = arg_info(op->args[2]);
96
static bool fold_eqv(OptContext *ctx, TCGOp *op)
31
+ z_mask = t1->z_mask;
97
{
32
98
- return fold_const2(ctx, op);
33
/*
99
+ if (fold_const2(ctx, op) ||
34
* Known-zeros does not imply known-ones. Therefore unless
100
+ fold_xi_to_not(ctx, op, 0)) {
35
* arg2 is constant, we can't infer anything from it.
101
+ return true;
36
*/
102
+ }
37
- if (arg_is_const(op->args[2])) {
103
+ return false;
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
104
}
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
105
40
+ if (ti_is_const(t2)) {
106
static bool fold_extract(OptContext *ctx, TCGOp *op)
41
+ uint64_t v2 = ti_const_val(t2);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
108
43
return true;
109
static bool fold_nand(OptContext *ctx, TCGOp *op)
110
{
111
- return fold_const2(ctx, op);
112
+ if (fold_const2(ctx, op) ||
113
+ fold_xi_to_not(ctx, op, -1)) {
114
+ return true;
115
+ }
116
+ return false;
117
}
118
119
static bool fold_neg(OptContext *ctx, TCGOp *op)
120
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
121
122
static bool fold_nor(OptContext *ctx, TCGOp *op)
123
{
124
- return fold_const2(ctx, op);
125
+ if (fold_const2(ctx, op) ||
126
+ fold_xi_to_not(ctx, op, 0)) {
127
+ return true;
128
+ }
129
+ return false;
130
}
131
132
static bool fold_not(OptContext *ctx, TCGOp *op)
133
{
134
- return fold_const1(ctx, op);
135
+ if (fold_const1(ctx, op)) {
136
+ return true;
137
+ }
138
+
139
+ /* Because of fold_to_not, we want to always return true, via finish. */
140
+ finish_folding(ctx, op);
141
+ return true;
142
}
143
144
static bool fold_or(OptContext *ctx, TCGOp *op)
145
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
146
147
static bool fold_orc(OptContext *ctx, TCGOp *op)
148
{
149
- return fold_const2(ctx, op);
150
+ if (fold_const2(ctx, op) ||
151
+ fold_ix_to_not(ctx, op, 0)) {
152
+ return true;
153
+ }
154
+ return false;
155
}
156
157
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
158
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
159
static bool fold_xor(OptContext *ctx, TCGOp *op)
160
{
161
if (fold_const2(ctx, op) ||
162
- fold_xx_to_i(ctx, op, 0)) {
163
+ fold_xx_to_i(ctx, op, 0) ||
164
+ fold_xi_to_not(ctx, op, -1)) {
165
return true;
166
}
167
return false;
168
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
169
}
170
}
171
break;
172
- CASE_OP_32_64_VEC(xor):
173
- CASE_OP_32_64(nand):
174
- if (!arg_is_const(op->args[1])
175
- && arg_is_const(op->args[2])
176
- && arg_info(op->args[2])->val == -1) {
177
- i = 1;
178
- goto try_not;
179
- }
180
- break;
181
- CASE_OP_32_64(nor):
182
- if (!arg_is_const(op->args[1])
183
- && arg_is_const(op->args[2])
184
- && arg_info(op->args[2])->val == 0) {
185
- i = 1;
186
- goto try_not;
187
- }
188
- break;
189
- CASE_OP_32_64_VEC(andc):
190
- if (!arg_is_const(op->args[2])
191
- && arg_is_const(op->args[1])
192
- && arg_info(op->args[1])->val == -1) {
193
- i = 2;
194
- goto try_not;
195
- }
196
- break;
197
- CASE_OP_32_64_VEC(orc):
198
- CASE_OP_32_64(eqv):
199
- if (!arg_is_const(op->args[2])
200
- && arg_is_const(op->args[1])
201
- && arg_info(op->args[1])->val == 0) {
202
- i = 2;
203
- goto try_not;
204
- }
205
- break;
206
- try_not:
207
- {
208
- TCGOpcode not_op;
209
- bool have_not;
210
-
211
- switch (ctx.type) {
212
- case TCG_TYPE_I32:
213
- not_op = INDEX_op_not_i32;
214
- have_not = TCG_TARGET_HAS_not_i32;
215
- break;
216
- case TCG_TYPE_I64:
217
- not_op = INDEX_op_not_i64;
218
- have_not = TCG_TARGET_HAS_not_i64;
219
- break;
220
- case TCG_TYPE_V64:
221
- case TCG_TYPE_V128:
222
- case TCG_TYPE_V256:
223
- not_op = INDEX_op_not_vec;
224
- have_not = TCG_TARGET_HAS_not_vec;
225
- break;
226
- default:
227
- g_assert_not_reached();
228
- }
229
- if (!have_not) {
230
- break;
231
- }
232
- op->opc = not_op;
233
- reset_temp(op->args[0]);
234
- op->args[1] = op->args[i];
235
- continue;
236
- }
237
default:
238
break;
44
}
239
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
240
--
59
2.43.0
241
2.25.1
242
243
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Even though there is only one user, place this more complex
2
conversion into its own helper.
2
3
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/optimize.c | 8 +++++---
7
tcg/optimize.c | 89 ++++++++++++++++++++++++++------------------------
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
1 file changed, 47 insertions(+), 42 deletions(-)
8
9
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
15
16
static bool fold_neg(OptContext *ctx, TCGOp *op)
17
{
18
- return fold_const1(ctx, op);
19
+ if (fold_const1(ctx, op)) {
20
+ return true;
21
+ }
22
+ /*
23
+ * Because of fold_sub_to_neg, we want to always return true,
24
+ * via finish_folding.
25
+ */
26
+ finish_folding(ctx, op);
27
+ return true;
28
}
14
29
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
30
static bool fold_nor(OptContext *ctx, TCGOp *op)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
32
return fold_const2(ctx, op);
33
}
34
35
+static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
36
+{
37
+ TCGOpcode neg_op;
38
+ bool have_neg;
39
+
40
+ if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
41
+ return false;
42
+ }
43
+
44
+ switch (ctx->type) {
45
+ case TCG_TYPE_I32:
46
+ neg_op = INDEX_op_neg_i32;
47
+ have_neg = TCG_TARGET_HAS_neg_i32;
48
+ break;
49
+ case TCG_TYPE_I64:
50
+ neg_op = INDEX_op_neg_i64;
51
+ have_neg = TCG_TARGET_HAS_neg_i64;
52
+ break;
53
+ case TCG_TYPE_V64:
54
+ case TCG_TYPE_V128:
55
+ case TCG_TYPE_V256:
56
+ neg_op = INDEX_op_neg_vec;
57
+ have_neg = (TCG_TARGET_HAS_neg_vec &&
58
+ tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
59
+ break;
60
+ default:
61
+ g_assert_not_reached();
62
+ }
63
+ if (have_neg) {
64
+ op->opc = neg_op;
65
+ op->args[1] = op->args[2];
66
+ return fold_neg(ctx, op);
67
+ }
68
+ return false;
69
+}
70
+
71
static bool fold_sub(OptContext *ctx, TCGOp *op)
16
{
72
{
17
+ uint64_t s_mask;
73
if (fold_const2(ctx, op) ||
18
+
74
- fold_xx_to_i(ctx, op, 0)) {
19
if (fold_const2_commutative(ctx, op) ||
75
+ fold_xx_to_i(ctx, op, 0) ||
20
fold_xi_to_not(ctx, op, 0)) {
76
+ fold_sub_to_neg(ctx, op)) {
21
return true;
77
return true;
22
}
78
}
23
79
return false;
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
80
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
25
- & arg_info(op->args[2])->s_mask;
81
continue;
26
- return false;
82
}
27
+ s_mask = arg_info(op->args[1])->s_mask
83
break;
28
+ & arg_info(op->args[2])->s_mask;
84
- CASE_OP_32_64_VEC(sub):
29
+ return fold_masks_s(ctx, op, s_mask);
85
- {
30
}
86
- TCGOpcode neg_op;
31
87
- bool have_neg;
32
static bool fold_not(OptContext *ctx, TCGOp *op)
88
-
89
- if (arg_is_const(op->args[2])) {
90
- /* Proceed with possible constant folding. */
91
- break;
92
- }
93
- switch (ctx.type) {
94
- case TCG_TYPE_I32:
95
- neg_op = INDEX_op_neg_i32;
96
- have_neg = TCG_TARGET_HAS_neg_i32;
97
- break;
98
- case TCG_TYPE_I64:
99
- neg_op = INDEX_op_neg_i64;
100
- have_neg = TCG_TARGET_HAS_neg_i64;
101
- break;
102
- case TCG_TYPE_V64:
103
- case TCG_TYPE_V128:
104
- case TCG_TYPE_V256:
105
- neg_op = INDEX_op_neg_vec;
106
- have_neg = tcg_can_emit_vec_op(neg_op, ctx.type,
107
- TCGOP_VECE(op)) > 0;
108
- break;
109
- default:
110
- g_assert_not_reached();
111
- }
112
- if (!have_neg) {
113
- break;
114
- }
115
- if (arg_is_const(op->args[1])
116
- && arg_info(op->args[1])->val == 0) {
117
- op->opc = neg_op;
118
- reset_temp(op->args[0]);
119
- op->args[1] = op->args[2];
120
- continue;
121
- }
122
- }
123
- break;
124
default:
125
break;
126
}
33
--
127
--
34
2.43.0
128
2.25.1
129
130
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Pull the "op r, a, i => mov r, a" optimization into a function,
2
Sink mask computation below fold_affected_mask early exit.
2
and use them in the outer-most logical operations.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
7
tcg/optimize.c | 61 +++++++++++++++++++++-----------------------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
8
1 file changed, 26 insertions(+), 35 deletions(-)
9
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
15
15
return false;
16
static bool fold_and(OptContext *ctx, TCGOp *op)
16
}
17
18
+/* If the binary operation has second argument @i, fold to identity. */
19
+static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
20
+{
21
+ if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
22
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
23
+ }
24
+ return false;
25
+}
26
+
27
/* If the binary operation has second argument @i, fold to NOT. */
28
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
17
{
29
{
18
- uint64_t z1, z2;
30
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
19
+ uint64_t z1, z2, z_mask, s_mask;
31
20
+ TempOptInfo *t1, *t2;
32
static bool fold_add(OptContext *ctx, TCGOp *op)
21
33
{
22
if (fold_const2_commutative(ctx, op) ||
34
- return fold_const2(ctx, op);
35
+ if (fold_const2(ctx, op) ||
36
+ fold_xi_to_x(ctx, op, 0)) {
37
+ return true;
38
+ }
39
+ return false;
40
}
41
42
static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
43
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
44
{
45
if (fold_const2(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
46
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
47
+ fold_xi_to_x(ctx, op, -1) ||
48
fold_xx_to_x(ctx, op)) {
25
return true;
49
return true;
26
}
50
}
27
51
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
28
- z1 = arg_info(op->args[1])->z_mask;
52
{
29
- z2 = arg_info(op->args[2])->z_mask;
53
if (fold_const2(ctx, op) ||
30
- ctx->z_mask = z1 & z2;
54
fold_xx_to_i(ctx, op, 0) ||
31
-
55
+ fold_xi_to_x(ctx, op, 0) ||
32
- /*
56
fold_ix_to_not(ctx, op, -1)) {
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
57
return true;
51
}
58
}
52
59
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
53
- return fold_masks(ctx, op);
60
static bool fold_eqv(OptContext *ctx, TCGOp *op)
54
+ z_mask = z1 & z2;
61
{
55
+
62
if (fold_const2(ctx, op) ||
56
+ /*
63
+ fold_xi_to_x(ctx, op, -1) ||
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
64
fold_xi_to_not(ctx, op, 0)) {
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
65
return true;
59
+ */
66
}
60
+ s_mask = t1->s_mask & t2->s_mask;
67
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
61
+
68
static bool fold_or(OptContext *ctx, TCGOp *op)
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
69
{
70
if (fold_const2(ctx, op) ||
71
+ fold_xi_to_x(ctx, op, 0) ||
72
fold_xx_to_x(ctx, op)) {
73
return true;
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
76
static bool fold_orc(OptContext *ctx, TCGOp *op)
77
{
78
if (fold_const2(ctx, op) ||
79
+ fold_xi_to_x(ctx, op, -1) ||
80
fold_ix_to_not(ctx, op, 0)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
84
85
static bool fold_shift(OptContext *ctx, TCGOp *op)
86
{
87
- return fold_const2(ctx, op);
88
+ if (fold_const2(ctx, op) ||
89
+ fold_xi_to_x(ctx, op, 0)) {
90
+ return true;
91
+ }
92
+ return false;
63
}
93
}
64
94
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
95
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
96
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
97
{
98
if (fold_const2(ctx, op) ||
99
fold_xx_to_i(ctx, op, 0) ||
100
+ fold_xi_to_x(ctx, op, 0) ||
101
fold_sub_to_neg(ctx, op)) {
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
105
{
106
if (fold_const2(ctx, op) ||
107
fold_xx_to_i(ctx, op, 0) ||
108
+ fold_xi_to_x(ctx, op, 0) ||
109
fold_xi_to_not(ctx, op, -1)) {
110
return true;
111
}
112
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
113
break;
114
}
115
116
- /* Simplify expression for "op r, a, const => mov r, a" cases */
117
- switch (opc) {
118
- CASE_OP_32_64_VEC(add):
119
- CASE_OP_32_64_VEC(sub):
120
- CASE_OP_32_64_VEC(or):
121
- CASE_OP_32_64_VEC(xor):
122
- CASE_OP_32_64_VEC(andc):
123
- CASE_OP_32_64(shl):
124
- CASE_OP_32_64(shr):
125
- CASE_OP_32_64(sar):
126
- CASE_OP_32_64(rotl):
127
- CASE_OP_32_64(rotr):
128
- if (!arg_is_const(op->args[1])
129
- && arg_is_const(op->args[2])
130
- && arg_info(op->args[2])->val == 0) {
131
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
132
- continue;
133
- }
134
- break;
135
- CASE_OP_32_64_VEC(and):
136
- CASE_OP_32_64_VEC(orc):
137
- CASE_OP_32_64(eqv):
138
- if (!arg_is_const(op->args[1])
139
- && arg_is_const(op->args[2])
140
- && arg_info(op->args[2])->val == -1) {
141
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
142
- continue;
143
- }
144
- break;
145
- default:
146
- break;
147
- }
148
-
149
/* Simplify using known-zero bits. Currently only ops with a single
150
output argument is supported. */
151
z_mask = -1;
66
--
152
--
67
2.43.0
153
2.25.1
154
155
diff view generated by jsdifflib
1
There are only a few logical operations which can compute
1
Pull the "op r, 0, b => movi r, 0" optimization into a function,
2
an "affected" mask. Split out handling of this optimization
2
and use it in fold_shift.
3
to a separate function, only to be called when applicable.
4
3
5
Remove the a_mask field from OptContext, as the mask is
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
no longer stored anywhere.
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
8
tcg/optimize.c | 28 ++++++++++------------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
9
1 file changed, 10 insertions(+), 18 deletions(-)
13
10
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
16
return false;
20
17
}
21
/* In flight values from optimization. */
18
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
19
+/* If the binary operation has first argument @i, fold to @i. */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
20
+static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
24
uint64_t s_mask; /* mask of clrsb(value) bits */
21
+{
25
TCGType type;
22
+ if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
23
+ return tcg_opt_gen_movi(ctx, op, op->args[0], i);
27
24
+ }
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
25
+ return false;
47
+}
26
+}
48
+
27
+
49
+/*
28
/* If the binary operation has first argument @i, fold to NOT. */
50
+ * An "affected" mask bit is 0 if and only if the result is identical
29
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
51
+ * to the first input. Thus if the entire mask is 0, the operation
30
{
52
+ * is equivalent to a copy.
31
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
53
+ */
32
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
33
{
55
+{
34
if (fold_const2(ctx, op) ||
56
+ if (ctx->type == TCG_TYPE_I32) {
35
+ fold_ix_to_i(ctx, op, 0) ||
57
+ a_mask = (uint32_t)a_mask;
36
fold_xi_to_x(ctx, op, 0)) {
58
+ }
37
return true;
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
38
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
40
break;
130
}
41
}
131
42
132
/* Assume all bits affected, no bits known zero, no sign reps. */
43
- /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
133
- ctx.a_mask = -1;
44
- and "sub r, 0, a => neg r, a" case. */
134
ctx.z_mask = -1;
45
- switch (opc) {
135
ctx.s_mask = 0;
46
- CASE_OP_32_64(shl):
136
47
- CASE_OP_32_64(shr):
48
- CASE_OP_32_64(sar):
49
- CASE_OP_32_64(rotl):
50
- CASE_OP_32_64(rotr):
51
- if (arg_is_const(op->args[1])
52
- && arg_info(op->args[1])->val == 0) {
53
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
54
- continue;
55
- }
56
- break;
57
- default:
58
- break;
59
- }
60
-
61
/* Simplify using known-zero bits. Currently only ops with a single
62
output argument is supported. */
63
z_mask = -1;
137
--
64
--
138
2.43.0
65
2.25.1
66
67
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
Move all of the known-zero optimizations into the per-opcode
2
Explicitly sign-extend z_mask instead of doing that manually.
2
functions. Use fold_masks when there is a possibility of the
3
result being determined, and simply set ctx->z_mask otherwise.
3
4
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
9
tcg/optimize.c | 545 ++++++++++++++++++++++++++-----------------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
10
1 file changed, 294 insertions(+), 251 deletions(-)
9
11
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
17
TCGTempSet temps_used;
18
19
/* In flight values from optimization. */
20
- uint64_t z_mask;
21
+ uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
22
+ uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
23
TCGType type;
24
} OptContext;
25
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
27
return false;
28
}
29
30
+static bool fold_masks(OptContext *ctx, TCGOp *op)
31
+{
32
+ uint64_t a_mask = ctx->a_mask;
33
+ uint64_t z_mask = ctx->z_mask;
34
+
35
+ /*
36
+ * 32-bit ops generate 32-bit results. For the result is zero test
37
+ * below, we can ignore high bits, but for further optimizations we
38
+ * need to record that the high bits contain garbage.
39
+ */
40
+ if (ctx->type == TCG_TYPE_I32) {
41
+ ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
42
+ a_mask &= MAKE_64BIT_MASK(0, 32);
43
+ z_mask &= MAKE_64BIT_MASK(0, 32);
44
+ }
45
+
46
+ if (z_mask == 0) {
47
+ return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
+ }
49
+ if (a_mask == 0) {
50
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
51
+ }
52
+ return false;
53
+}
54
+
55
/*
56
* Convert @op to NOT, if NOT is supported by the host.
57
* Return true f the conversion is successful, which will still
58
@@ -XXX,XX +XXX,XX @@ static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
59
60
static bool fold_and(OptContext *ctx, TCGOp *op)
61
{
62
+ uint64_t z1, z2;
63
+
64
if (fold_const2(ctx, op) ||
65
fold_xi_to_i(ctx, op, 0) ||
66
fold_xi_to_x(ctx, op, -1) ||
67
fold_xx_to_x(ctx, op)) {
68
return true;
69
}
70
- return false;
71
+
72
+ z1 = arg_info(op->args[1])->z_mask;
73
+ z2 = arg_info(op->args[2])->z_mask;
74
+ ctx->z_mask = z1 & z2;
75
+
76
+ /*
77
+ * Known-zeros does not imply known-ones. Therefore unless
78
+ * arg2 is constant, we can't infer affected bits from it.
79
+ */
80
+ if (arg_is_const(op->args[2])) {
81
+ ctx->a_mask = z1 & ~z2;
82
+ }
83
+
84
+ return fold_masks(ctx, op);
85
}
86
87
static bool fold_andc(OptContext *ctx, TCGOp *op)
88
{
89
+ uint64_t z1;
90
+
91
if (fold_const2(ctx, op) ||
92
fold_xx_to_i(ctx, op, 0) ||
93
fold_xi_to_x(ctx, op, 0) ||
94
fold_ix_to_not(ctx, op, -1)) {
95
return true;
96
}
97
- return false;
98
+
99
+ z1 = arg_info(op->args[1])->z_mask;
100
+
101
+ /*
102
+ * Known-zeros does not imply known-ones. Therefore unless
103
+ * arg2 is constant, we can't infer anything from it.
104
+ */
105
+ if (arg_is_const(op->args[2])) {
106
+ uint64_t z2 = ~arg_info(op->args[2])->z_mask;
107
+ ctx->a_mask = z1 & ~z2;
108
+ z1 &= z2;
109
+ }
110
+ ctx->z_mask = z1;
111
+
112
+ return fold_masks(ctx, op);
113
}
114
115
static bool fold_brcond(OptContext *ctx, TCGOp *op)
116
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
117
118
static bool fold_bswap(OptContext *ctx, TCGOp *op)
119
{
120
+ uint64_t z_mask, sign;
121
+
122
if (arg_is_const(op->args[1])) {
123
uint64_t t = arg_info(op->args[1])->val;
124
125
t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
126
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
127
}
128
- return false;
129
+
130
+ z_mask = arg_info(op->args[1])->z_mask;
131
+ switch (op->opc) {
132
+ case INDEX_op_bswap16_i32:
133
+ case INDEX_op_bswap16_i64:
134
+ z_mask = bswap16(z_mask);
135
+ sign = INT16_MIN;
136
+ break;
137
+ case INDEX_op_bswap32_i32:
138
+ case INDEX_op_bswap32_i64:
139
+ z_mask = bswap32(z_mask);
140
+ sign = INT32_MIN;
141
+ break;
142
+ case INDEX_op_bswap64_i64:
143
+ z_mask = bswap64(z_mask);
144
+ sign = INT64_MIN;
145
+ break;
146
+ default:
147
+ g_assert_not_reached();
148
+ }
149
+
150
+ switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
151
+ case TCG_BSWAP_OZ:
152
+ break;
153
+ case TCG_BSWAP_OS:
154
+ /* If the sign bit may be 1, force all the bits above to 1. */
155
+ if (z_mask & sign) {
156
+ z_mask |= sign;
157
+ }
158
+ break;
159
+ default:
160
+ /* The high bits are undefined: force all bits above the sign to 1. */
161
+ z_mask |= sign << 1;
162
+ break;
163
+ }
164
+ ctx->z_mask = z_mask;
165
+
166
+ return fold_masks(ctx, op);
167
}
168
169
static bool fold_call(OptContext *ctx, TCGOp *op)
170
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
171
172
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
173
{
174
+ uint64_t z_mask;
175
+
176
if (arg_is_const(op->args[1])) {
177
uint64_t t = arg_info(op->args[1])->val;
178
179
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
180
}
181
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
182
}
183
+
184
+ switch (ctx->type) {
185
+ case TCG_TYPE_I32:
186
+ z_mask = 31;
187
+ break;
188
+ case TCG_TYPE_I64:
189
+ z_mask = 63;
190
+ break;
191
+ default:
192
+ g_assert_not_reached();
193
+ }
194
+ ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
195
+
196
return false;
197
}
198
199
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
200
{
201
- return fold_const1(ctx, op);
202
+ if (fold_const1(ctx, op)) {
203
+ return true;
204
+ }
205
+
206
+ switch (ctx->type) {
207
+ case TCG_TYPE_I32:
208
+ ctx->z_mask = 32 | 31;
209
+ break;
210
+ case TCG_TYPE_I64:
211
+ ctx->z_mask = 64 | 63;
212
+ break;
213
+ default:
214
+ g_assert_not_reached();
215
+ }
216
+ return false;
217
}
218
219
static bool fold_deposit(OptContext *ctx, TCGOp *op)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
221
t1 = deposit64(t1, op->args[3], op->args[4], t2);
222
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
223
}
224
+
225
+ ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
226
+ op->args[3], op->args[4],
227
+ arg_info(op->args[2])->z_mask);
228
return false;
229
}
230
231
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
232
233
static bool fold_extract(OptContext *ctx, TCGOp *op)
234
{
235
+ uint64_t z_mask_old, z_mask;
236
+
237
if (arg_is_const(op->args[1])) {
238
uint64_t t;
239
240
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
241
t = extract64(t, op->args[2], op->args[3]);
242
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
243
}
244
- return false;
245
+
246
+ z_mask_old = arg_info(op->args[1])->z_mask;
247
+ z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
248
+ if (op->args[2] == 0) {
249
+ ctx->a_mask = z_mask_old ^ z_mask;
250
+ }
251
+ ctx->z_mask = z_mask;
252
+
253
+ return fold_masks(ctx, op);
254
}
255
256
static bool fold_extract2(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
257
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
258
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
259
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
260
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
261
- return fold_const1(ctx, op);
19
+ uint64_t s_mask_old, s_mask, z_mask;
262
+ uint64_t z_mask_old, z_mask, sign;
20
bool type_change = false;
263
+ bool type_change = false;
21
+ TempOptInfo *t1;
264
+
22
265
+ if (fold_const1(ctx, op)) {
266
+ return true;
267
+ }
268
+
269
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
270
+
271
+ switch (op->opc) {
272
+ CASE_OP_32_64(ext8s):
273
+ sign = INT8_MIN;
274
+ z_mask = (uint8_t)z_mask;
275
+ break;
276
+ CASE_OP_32_64(ext16s):
277
+ sign = INT16_MIN;
278
+ z_mask = (uint16_t)z_mask;
279
+ break;
280
+ case INDEX_op_ext_i32_i64:
281
+ type_change = true;
282
+ QEMU_FALLTHROUGH;
283
+ case INDEX_op_ext32s_i64:
284
+ sign = INT32_MIN;
285
+ z_mask = (uint32_t)z_mask;
286
+ break;
287
+ default:
288
+ g_assert_not_reached();
289
+ }
290
+
291
+ if (z_mask & sign) {
292
+ z_mask |= sign;
293
+ } else if (!type_change) {
294
+ ctx->a_mask = z_mask_old ^ z_mask;
295
+ }
296
+ ctx->z_mask = z_mask;
297
+
298
+ return fold_masks(ctx, op);
299
}
300
301
static bool fold_extu(OptContext *ctx, TCGOp *op)
302
{
303
- return fold_const1(ctx, op);
304
+ uint64_t z_mask_old, z_mask;
305
+ bool type_change = false;
306
+
307
+ if (fold_const1(ctx, op)) {
308
+ return true;
309
+ }
310
+
311
+ z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
312
+
313
+ switch (op->opc) {
314
+ CASE_OP_32_64(ext8u):
315
+ z_mask = (uint8_t)z_mask;
316
+ break;
317
+ CASE_OP_32_64(ext16u):
318
+ z_mask = (uint16_t)z_mask;
319
+ break;
320
+ case INDEX_op_extrl_i64_i32:
321
+ case INDEX_op_extu_i32_i64:
322
+ type_change = true;
323
+ QEMU_FALLTHROUGH;
324
+ case INDEX_op_ext32u_i64:
325
+ z_mask = (uint32_t)z_mask;
326
+ break;
327
+ case INDEX_op_extrh_i64_i32:
328
+ type_change = true;
329
+ z_mask >>= 32;
330
+ break;
331
+ default:
332
+ g_assert_not_reached();
333
+ }
334
+
335
+ ctx->z_mask = z_mask;
336
+ if (!type_change) {
337
+ ctx->a_mask = z_mask_old ^ z_mask;
338
+ }
339
+ return fold_masks(ctx, op);
340
}
341
342
static bool fold_mb(OptContext *ctx, TCGOp *op)
343
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
344
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
345
}
346
347
+ ctx->z_mask = arg_info(op->args[3])->z_mask
348
+ | arg_info(op->args[4])->z_mask;
349
+
350
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
351
uint64_t tv = arg_info(op->args[3])->val;
352
uint64_t fv = arg_info(op->args[4])->val;
353
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
354
355
static bool fold_neg(OptContext *ctx, TCGOp *op)
356
{
357
+ uint64_t z_mask;
358
+
23
if (fold_const1(ctx, op)) {
359
if (fold_const1(ctx, op)) {
24
return true;
360
return true;
25
}
361
}
26
362
+
27
- z_mask = arg_info(op->args[1])->z_mask;
363
+ /* Set to 1 all bits to the left of the rightmost. */
28
- s_mask = arg_info(op->args[1])->s_mask;
364
+ z_mask = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
365
+ ctx->z_mask = -(z_mask & -z_mask);
30
+ z_mask = t1->z_mask;
366
+
31
+ s_mask = t1->s_mask;
367
/*
32
s_mask_old = s_mask;
368
* Because of fold_sub_to_neg, we want to always return true,
33
369
* via finish_folding.
34
switch (op->opc) {
370
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
35
CASE_OP_32_64(ext8s):
371
fold_xx_to_x(ctx, op)) {
36
- sign = INT8_MIN;
372
return true;
37
- z_mask = (uint8_t)z_mask;
373
}
38
+ s_mask |= INT8_MIN;
374
- return false;
39
+ z_mask = (int8_t)z_mask;
375
+
376
+ ctx->z_mask = arg_info(op->args[1])->z_mask
377
+ | arg_info(op->args[2])->z_mask;
378
+ return fold_masks(ctx, op);
379
}
380
381
static bool fold_orc(OptContext *ctx, TCGOp *op)
382
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
383
384
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
385
{
386
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
387
+ MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
388
+ MemOp mop = get_memop(oi);
389
+ int width = 8 * memop_size(mop);
390
+
391
+ if (!(mop & MO_SIGN) && width < 64) {
392
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
393
+ }
394
+
395
/* Opcodes that touch guest memory stop the mb optimization. */
396
ctx->prev_mb = NULL;
397
return false;
398
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
399
if (i >= 0) {
400
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
401
}
402
+
403
+ ctx->z_mask = 1;
404
return false;
405
}
406
407
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
408
op->opc = INDEX_op_setcond_i32;
40
break;
409
break;
41
CASE_OP_32_64(ext16s):
410
}
42
- sign = INT16_MIN;
411
+
43
- z_mask = (uint16_t)z_mask;
412
+ ctx->z_mask = 1;
44
+ s_mask |= INT16_MIN;
413
return false;
45
+ z_mask = (int16_t)z_mask;
414
46
break;
415
do_setcond_const:
47
case INDEX_op_ext_i32_i64:
416
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
48
type_change = true;
417
49
QEMU_FALLTHROUGH;
418
static bool fold_sextract(OptContext *ctx, TCGOp *op)
50
case INDEX_op_ext32s_i64:
419
{
51
- sign = INT32_MIN;
420
+ int64_t z_mask_old, z_mask;
52
- z_mask = (uint32_t)z_mask;
421
+
53
+ s_mask |= INT32_MIN;
422
if (arg_is_const(op->args[1])) {
54
+ z_mask = (int32_t)z_mask;
423
uint64_t t;
55
break;
424
56
default:
425
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
57
g_assert_not_reached();
426
t = sextract64(t, op->args[2], op->args[3]);
58
}
427
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
59
428
}
60
- if (z_mask & sign) {
429
- return false;
61
- z_mask |= sign;
430
+
62
- }
431
+ z_mask_old = arg_info(op->args[1])->z_mask;
63
- s_mask |= sign << 1;
432
+ z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
64
-
433
+ if (op->args[2] == 0 && z_mask >= 0) {
65
- ctx->z_mask = z_mask;
434
+ ctx->a_mask = z_mask_old ^ z_mask;
66
- ctx->s_mask = s_mask;
435
+ }
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
436
+ ctx->z_mask = z_mask;
437
+
438
+ return fold_masks(ctx, op);
439
}
440
441
static bool fold_shift(OptContext *ctx, TCGOp *op)
442
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
443
fold_xi_to_x(ctx, op, 0)) {
68
return true;
444
return true;
69
}
445
}
70
446
+
71
- return fold_masks(ctx, op);
447
+ if (arg_is_const(op->args[2])) {
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
448
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type,
73
}
449
+ arg_info(op->args[1])->z_mask,
74
450
+ arg_info(op->args[2])->val);
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
451
+ return fold_masks(ctx, op);
452
+ }
453
return false;
454
}
455
456
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
457
return fold_addsub2_i32(ctx, op, false);
458
}
459
460
+static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
461
+{
462
+ /* We can't do any folding with a load, but we can record bits. */
463
+ switch (op->opc) {
464
+ CASE_OP_32_64(ld8u):
465
+ ctx->z_mask = MAKE_64BIT_MASK(0, 8);
466
+ break;
467
+ CASE_OP_32_64(ld16u):
468
+ ctx->z_mask = MAKE_64BIT_MASK(0, 16);
469
+ break;
470
+ case INDEX_op_ld32u_i64:
471
+ ctx->z_mask = MAKE_64BIT_MASK(0, 32);
472
+ break;
473
+ default:
474
+ g_assert_not_reached();
475
+ }
476
+ return false;
477
+}
478
+
479
static bool fold_xor(OptContext *ctx, TCGOp *op)
480
{
481
if (fold_const2(ctx, op) ||
482
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
483
fold_xi_to_not(ctx, op, -1)) {
484
return true;
485
}
486
- return false;
487
+
488
+ ctx->z_mask = arg_info(op->args[1])->z_mask
489
+ | arg_info(op->args[2])->z_mask;
490
+ return fold_masks(ctx, op);
491
}
492
493
/* Propagate constants and copies, fold constant expressions. */
494
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
495
}
496
497
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
498
- uint64_t z_mask, partmask, affected, tmp;
499
TCGOpcode opc = op->opc;
500
const TCGOpDef *def;
501
bool done = false;
502
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
503
break;
504
}
505
506
- /* Simplify using known-zero bits. Currently only ops with a single
507
- output argument is supported. */
508
- z_mask = -1;
509
- affected = -1;
510
- switch (opc) {
511
- CASE_OP_32_64(ext8s):
512
- if ((arg_info(op->args[1])->z_mask & 0x80) != 0) {
513
- break;
514
- }
515
- QEMU_FALLTHROUGH;
516
- CASE_OP_32_64(ext8u):
517
- z_mask = 0xff;
518
- goto and_const;
519
- CASE_OP_32_64(ext16s):
520
- if ((arg_info(op->args[1])->z_mask & 0x8000) != 0) {
521
- break;
522
- }
523
- QEMU_FALLTHROUGH;
524
- CASE_OP_32_64(ext16u):
525
- z_mask = 0xffff;
526
- goto and_const;
527
- case INDEX_op_ext32s_i64:
528
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
529
- break;
530
- }
531
- QEMU_FALLTHROUGH;
532
- case INDEX_op_ext32u_i64:
533
- z_mask = 0xffffffffU;
534
- goto and_const;
535
-
536
- CASE_OP_32_64(and):
537
- z_mask = arg_info(op->args[2])->z_mask;
538
- if (arg_is_const(op->args[2])) {
539
- and_const:
540
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
541
- }
542
- z_mask = arg_info(op->args[1])->z_mask & z_mask;
543
- break;
544
-
545
- case INDEX_op_ext_i32_i64:
546
- if ((arg_info(op->args[1])->z_mask & 0x80000000) != 0) {
547
- break;
548
- }
549
- QEMU_FALLTHROUGH;
550
- case INDEX_op_extu_i32_i64:
551
- /* We do not compute affected as it is a size changing op. */
552
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
553
- break;
554
-
555
- CASE_OP_32_64(andc):
556
- /* Known-zeros does not imply known-ones. Therefore unless
557
- op->args[2] is constant, we can't infer anything from it. */
558
- if (arg_is_const(op->args[2])) {
559
- z_mask = ~arg_info(op->args[2])->z_mask;
560
- goto and_const;
561
- }
562
- /* But we certainly know nothing outside args[1] may be set. */
563
- z_mask = arg_info(op->args[1])->z_mask;
564
- break;
565
-
566
- case INDEX_op_sar_i32:
567
- if (arg_is_const(op->args[2])) {
568
- tmp = arg_info(op->args[2])->val & 31;
569
- z_mask = (int32_t)arg_info(op->args[1])->z_mask >> tmp;
570
- }
571
- break;
572
- case INDEX_op_sar_i64:
573
- if (arg_is_const(op->args[2])) {
574
- tmp = arg_info(op->args[2])->val & 63;
575
- z_mask = (int64_t)arg_info(op->args[1])->z_mask >> tmp;
576
- }
577
- break;
578
-
579
- case INDEX_op_shr_i32:
580
- if (arg_is_const(op->args[2])) {
581
- tmp = arg_info(op->args[2])->val & 31;
582
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask >> tmp;
583
- }
584
- break;
585
- case INDEX_op_shr_i64:
586
- if (arg_is_const(op->args[2])) {
587
- tmp = arg_info(op->args[2])->val & 63;
588
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> tmp;
589
- }
590
- break;
591
-
592
- case INDEX_op_extrl_i64_i32:
593
- z_mask = (uint32_t)arg_info(op->args[1])->z_mask;
594
- break;
595
- case INDEX_op_extrh_i64_i32:
596
- z_mask = (uint64_t)arg_info(op->args[1])->z_mask >> 32;
597
- break;
598
-
599
- CASE_OP_32_64(shl):
600
- if (arg_is_const(op->args[2])) {
601
- tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1);
602
- z_mask = arg_info(op->args[1])->z_mask << tmp;
603
- }
604
- break;
605
-
606
- CASE_OP_32_64(neg):
607
- /* Set to 1 all bits to the left of the rightmost. */
608
- z_mask = -(arg_info(op->args[1])->z_mask
609
- & -arg_info(op->args[1])->z_mask);
610
- break;
611
-
612
- CASE_OP_32_64(deposit):
613
- z_mask = deposit64(arg_info(op->args[1])->z_mask,
614
- op->args[3], op->args[4],
615
- arg_info(op->args[2])->z_mask);
616
- break;
617
-
618
- CASE_OP_32_64(extract):
619
- z_mask = extract64(arg_info(op->args[1])->z_mask,
620
- op->args[2], op->args[3]);
621
- if (op->args[2] == 0) {
622
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
623
- }
624
- break;
625
- CASE_OP_32_64(sextract):
626
- z_mask = sextract64(arg_info(op->args[1])->z_mask,
627
- op->args[2], op->args[3]);
628
- if (op->args[2] == 0 && (tcg_target_long)z_mask >= 0) {
629
- affected = arg_info(op->args[1])->z_mask & ~z_mask;
630
- }
631
- break;
632
-
633
- CASE_OP_32_64(or):
634
- CASE_OP_32_64(xor):
635
- z_mask = arg_info(op->args[1])->z_mask
636
- | arg_info(op->args[2])->z_mask;
637
- break;
638
-
639
- case INDEX_op_clz_i32:
640
- case INDEX_op_ctz_i32:
641
- z_mask = arg_info(op->args[2])->z_mask | 31;
642
- break;
643
-
644
- case INDEX_op_clz_i64:
645
- case INDEX_op_ctz_i64:
646
- z_mask = arg_info(op->args[2])->z_mask | 63;
647
- break;
648
-
649
- case INDEX_op_ctpop_i32:
650
- z_mask = 32 | 31;
651
- break;
652
- case INDEX_op_ctpop_i64:
653
- z_mask = 64 | 63;
654
- break;
655
-
656
- CASE_OP_32_64(setcond):
657
- case INDEX_op_setcond2_i32:
658
- z_mask = 1;
659
- break;
660
-
661
- CASE_OP_32_64(movcond):
662
- z_mask = arg_info(op->args[3])->z_mask
663
- | arg_info(op->args[4])->z_mask;
664
- break;
665
-
666
- CASE_OP_32_64(ld8u):
667
- z_mask = 0xff;
668
- break;
669
- CASE_OP_32_64(ld16u):
670
- z_mask = 0xffff;
671
- break;
672
- case INDEX_op_ld32u_i64:
673
- z_mask = 0xffffffffu;
674
- break;
675
-
676
- CASE_OP_32_64(qemu_ld):
677
- {
678
- MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
679
- MemOp mop = get_memop(oi);
680
- if (!(mop & MO_SIGN)) {
681
- z_mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
682
- }
683
- }
684
- break;
685
-
686
- CASE_OP_32_64(bswap16):
687
- z_mask = arg_info(op->args[1])->z_mask;
688
- if (z_mask <= 0xffff) {
689
- op->args[2] |= TCG_BSWAP_IZ;
690
- }
691
- z_mask = bswap16(z_mask);
692
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
693
- case TCG_BSWAP_OZ:
694
- break;
695
- case TCG_BSWAP_OS:
696
- z_mask = (int16_t)z_mask;
697
- break;
698
- default: /* undefined high bits */
699
- z_mask |= MAKE_64BIT_MASK(16, 48);
700
- break;
701
- }
702
- break;
703
-
704
- case INDEX_op_bswap32_i64:
705
- z_mask = arg_info(op->args[1])->z_mask;
706
- if (z_mask <= 0xffffffffu) {
707
- op->args[2] |= TCG_BSWAP_IZ;
708
- }
709
- z_mask = bswap32(z_mask);
710
- switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
711
- case TCG_BSWAP_OZ:
712
- break;
713
- case TCG_BSWAP_OS:
714
- z_mask = (int32_t)z_mask;
715
- break;
716
- default: /* undefined high bits */
717
- z_mask |= MAKE_64BIT_MASK(32, 32);
718
- break;
719
- }
720
- break;
721
-
722
- default:
723
- break;
724
- }
725
-
726
- /* 32-bit ops generate 32-bit results. For the result is zero test
727
- below, we can ignore high bits, but for further optimizations we
728
- need to record that the high bits contain garbage. */
729
- partmask = z_mask;
730
- if (ctx.type == TCG_TYPE_I32) {
731
- z_mask |= ~(tcg_target_ulong)0xffffffffu;
732
- partmask &= 0xffffffffu;
733
- affected &= 0xffffffffu;
734
- }
735
- ctx.z_mask = z_mask;
736
-
737
- if (partmask == 0) {
738
- tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
739
- continue;
740
- }
741
- if (affected == 0) {
742
- tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
743
- continue;
744
- }
745
+ /* Assume all bits affected, and no bits known zero. */
746
+ ctx.a_mask = -1;
747
+ ctx.z_mask = -1;
748
749
/*
750
* Process each opcode.
751
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
752
case INDEX_op_extrh_i64_i32:
753
done = fold_extu(&ctx, op);
754
break;
755
+ CASE_OP_32_64(ld8u):
756
+ CASE_OP_32_64(ld16u):
757
+ case INDEX_op_ld32u_i64:
758
+ done = fold_tcg_ld(&ctx, op);
759
+ break;
760
case INDEX_op_mb:
761
done = fold_mb(&ctx, op);
762
break;
76
--
763
--
77
2.43.0
764
2.25.1
765
766
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
2
and muls2_i64.
2
3
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 7 +------
8
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
7
1 file changed, 1 insertion(+), 6 deletions(-)
9
1 file changed, 35 insertions(+), 9 deletions(-)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
16
return false;
17
}
18
19
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
20
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
21
{
22
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
23
- uint32_t a = arg_info(op->args[2])->val;
24
- uint32_t b = arg_info(op->args[3])->val;
25
- uint64_t r = (uint64_t)a * b;
26
+ uint64_t a = arg_info(op->args[2])->val;
27
+ uint64_t b = arg_info(op->args[3])->val;
28
+ uint64_t h, l;
29
TCGArg rl, rh;
30
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
31
+ TCGOp *op2;
32
+
33
+ switch (op->opc) {
34
+ case INDEX_op_mulu2_i32:
35
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
36
+ h = (int32_t)(l >> 32);
37
+ l = (int32_t)l;
38
+ break;
39
+ case INDEX_op_muls2_i32:
40
+ l = (int64_t)(int32_t)a * (int32_t)b;
41
+ h = l >> 32;
42
+ l = (int32_t)l;
43
+ break;
44
+ case INDEX_op_mulu2_i64:
45
+ mulu64(&l, &h, a, b);
46
+ break;
47
+ case INDEX_op_muls2_i64:
48
+ muls64(&l, &h, a, b);
49
+ break;
50
+ default:
51
+ g_assert_not_reached();
52
+ }
53
54
rl = op->args[0];
55
rh = op->args[1];
56
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
57
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
58
+
59
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
60
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
61
+
62
+ tcg_opt_gen_movi(ctx, op, rl, l);
63
+ tcg_opt_gen_movi(ctx, op2, rh, h);
15
return true;
64
return true;
16
}
65
}
17
-
66
return false;
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
67
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
19
-
68
CASE_OP_32_64(muluh):
20
- /* Because of fold_to_not, we want to always return true, via finish. */
69
done = fold_mul_highpart(&ctx, op);
21
- finish_folding(ctx, op);
70
break;
22
- return true;
71
- case INDEX_op_mulu2_i32:
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
72
- done = fold_mulu2_i32(&ctx, op);
24
}
73
+ CASE_OP_32_64(muls2):
25
74
+ CASE_OP_32_64(mulu2):
26
static bool fold_or(OptContext *ctx, TCGOp *op)
75
+ done = fold_multiply2(&ctx, op);
76
break;
77
CASE_OP_32_64(nand):
78
done = fold_nand(&ctx, op);
27
--
79
--
28
2.43.0
80
2.25.1
81
82
diff view generated by jsdifflib
1
Stores have no output operands, and so need no further work.
1
Rename to fold_addsub2.
2
Use Int128 to implement the wider operation.
2
3
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
tcg/optimize.c | 11 +++++------
9
tcg/optimize.c | 65 ++++++++++++++++++++++++++++++++++----------------
7
1 file changed, 5 insertions(+), 6 deletions(-)
10
1 file changed, 44 insertions(+), 21 deletions(-)
8
11
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@
17
*/
18
19
#include "qemu/osdep.h"
20
+#include "qemu/int128.h"
21
#include "tcg/tcg-op.h"
22
#include "tcg-internal.h"
23
24
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
25
return false;
26
}
27
28
-static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add)
29
+static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
14
{
30
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
31
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
16
ctx->prev_mb = NULL;
32
arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
17
- return false;
33
- uint32_t al = arg_info(op->args[2])->val;
18
+ return true;
34
- uint32_t ah = arg_info(op->args[3])->val;
35
- uint32_t bl = arg_info(op->args[4])->val;
36
- uint32_t bh = arg_info(op->args[5])->val;
37
- uint64_t a = ((uint64_t)ah << 32) | al;
38
- uint64_t b = ((uint64_t)bh << 32) | bl;
39
+ uint64_t al = arg_info(op->args[2])->val;
40
+ uint64_t ah = arg_info(op->args[3])->val;
41
+ uint64_t bl = arg_info(op->args[4])->val;
42
+ uint64_t bh = arg_info(op->args[5])->val;
43
TCGArg rl, rh;
44
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
45
+ TCGOp *op2;
46
47
- if (add) {
48
- a += b;
49
+ if (ctx->type == TCG_TYPE_I32) {
50
+ uint64_t a = deposit64(al, 32, 32, ah);
51
+ uint64_t b = deposit64(bl, 32, 32, bh);
52
+
53
+ if (add) {
54
+ a += b;
55
+ } else {
56
+ a -= b;
57
+ }
58
+
59
+ al = sextract64(a, 0, 32);
60
+ ah = sextract64(a, 32, 32);
61
} else {
62
- a -= b;
63
+ Int128 a = int128_make128(al, ah);
64
+ Int128 b = int128_make128(bl, bh);
65
+
66
+ if (add) {
67
+ a = int128_add(a, b);
68
+ } else {
69
+ a = int128_sub(a, b);
70
+ }
71
+
72
+ al = int128_getlo(a);
73
+ ah = int128_gethi(a);
74
}
75
76
rl = op->args[0];
77
rh = op->args[1];
78
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)a);
79
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32));
80
+
81
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
82
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
83
+
84
+ tcg_opt_gen_movi(ctx, op, rl, al);
85
+ tcg_opt_gen_movi(ctx, op2, rh, ah);
86
return true;
87
}
88
return false;
19
}
89
}
20
90
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
91
-static bool fold_add2_i32(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
92
+static bool fold_add2(OptContext *ctx, TCGOp *op)
23
93
{
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
94
- return fold_addsub2_i32(ctx, op, true);
25
remove_mem_copy_all(ctx);
95
+ return fold_addsub2(ctx, op, true);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
96
}
38
97
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
98
static bool fold_and(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
41
TCGType type;
100
return false;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
101
}
57
102
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
103
-static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
104
+static bool fold_sub2(OptContext *ctx, TCGOp *op)
105
{
106
- return fold_addsub2_i32(ctx, op, false);
107
+ return fold_addsub2(ctx, op, false);
108
}
109
110
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
112
CASE_OP_32_64_VEC(add):
113
done = fold_add(&ctx, op);
114
break;
115
- case INDEX_op_add2_i32:
116
- done = fold_add2_i32(&ctx, op);
117
+ CASE_OP_32_64(add2):
118
+ done = fold_add2(&ctx, op);
119
break;
120
CASE_OP_32_64_VEC(and):
121
done = fold_and(&ctx, op);
122
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
123
CASE_OP_32_64_VEC(sub):
124
done = fold_sub(&ctx, op);
125
break;
126
- case INDEX_op_sub2_i32:
127
- done = fold_sub2_i32(&ctx, op);
128
+ CASE_OP_32_64(sub2):
129
+ done = fold_sub2(&ctx, op);
130
break;
131
CASE_OP_32_64_VEC(xor):
132
done = fold_xor(&ctx, op);
59
--
133
--
60
2.43.0
134
2.25.1
135
136
diff view generated by jsdifflib
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
1
Most of these are handled by creating a fold_const2_commutative
2
Avoid the use of the OptContext slots.
2
to handle all of the binary operators. The rest were already
3
3
handled on a case-by-case basis in the switch, and have their
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
own fold function in which to place the call.
5
6
We now have only one major switch on TCGOpcode.
7
8
Introduce NO_DEST and a block comment for swap_commutative in
9
order to make the handling of brcond and movcond opcodes cleaner.
10
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
13
---
7
tcg/optimize.c | 13 ++++++++++---
14
tcg/optimize.c | 142 ++++++++++++++++++++++++-------------------------
8
1 file changed, 10 insertions(+), 3 deletions(-)
15
1 file changed, 70 insertions(+), 72 deletions(-)
9
16
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
19
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
20
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
21
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
15
return fold_masks_zs(ctx, op, z_mask, 0);
22
return -1;
16
}
23
}
17
24
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
25
+/**
26
+ * swap_commutative:
27
+ * @dest: TCGArg of the destination argument, or NO_DEST.
28
+ * @p1: first paired argument
29
+ * @p2: second paired argument
30
+ *
31
+ * If *@p1 is a constant and *@p2 is not, swap.
32
+ * If *@p2 matches @dest, swap.
33
+ * Return true if a swap was performed.
34
+ */
35
+
36
+#define NO_DEST temp_arg(NULL)
37
+
38
static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
39
{
40
TCGArg a1 = *p1, a2 = *p2;
41
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
42
return false;
43
}
44
45
+static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
19
+{
46
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
47
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
48
+ return fold_const2(ctx, op);
21
+}
49
+}
22
+
50
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
51
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
52
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
53
uint64_t a_mask = ctx->a_mask;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
55
56
static bool fold_add(OptContext *ctx, TCGOp *op)
57
{
58
- if (fold_const2(ctx, op) ||
59
+ if (fold_const2_commutative(ctx, op) ||
60
fold_xi_to_x(ctx, op, 0)) {
61
return true;
62
}
63
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
64
65
static bool fold_add2(OptContext *ctx, TCGOp *op)
66
{
67
+ /* Note that the high and low parts may be independently swapped. */
68
+ swap_commutative(op->args[0], &op->args[2], &op->args[4]);
69
+ swap_commutative(op->args[1], &op->args[3], &op->args[5]);
70
+
71
return fold_addsub2(ctx, op, true);
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
75
{
76
uint64_t z1, z2;
77
78
- if (fold_const2(ctx, op) ||
79
+ if (fold_const2_commutative(ctx, op) ||
80
fold_xi_to_i(ctx, op, 0) ||
81
fold_xi_to_x(ctx, op, -1) ||
82
fold_xx_to_x(ctx, op)) {
83
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
TCGCond cond = op->args[2];
87
- int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
88
+ int i;
89
90
+ if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
91
+ op->args[2] = cond = tcg_swap_cond(cond);
92
+ }
93
+
94
+ i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
95
if (i == 0) {
96
tcg_op_remove(ctx->tcg, op);
97
return true;
98
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
100
{
101
TCGCond cond = op->args[4];
102
- int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
103
TCGArg label = op->args[5];
104
- int inv = 0;
105
+ int i, inv = 0;
106
107
+ if (swap_commutative2(&op->args[0], &op->args[2])) {
108
+ op->args[4] = cond = tcg_swap_cond(cond);
109
+ }
110
+
111
+ i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
112
if (i >= 0) {
113
goto do_brcond_const;
114
}
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
115
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
116
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
117
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
118
{
30
+ uint64_t s_mask;
119
- if (fold_const2(ctx, op) ||
31
+
120
+ if (fold_const2_commutative(ctx, op) ||
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
121
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
122
fold_xi_to_not(ctx, op, 0)) {
35
return true;
123
return true;
36
}
124
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
37
125
static bool fold_movcond(OptContext *ctx, TCGOp *op)
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
126
{
39
- & arg_info(op->args[2])->s_mask;
127
TCGCond cond = op->args[5];
40
- return false;
128
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
41
+ s_mask = arg_info(op->args[1])->s_mask
129
+ int i;
42
+ & arg_info(op->args[2])->s_mask;
130
43
+ return fold_masks_s(ctx, op, s_mask);
131
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
44
}
132
+ op->args[5] = cond = tcg_swap_cond(cond);
45
133
+ }
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
134
+ /*
135
+ * Canonicalize the "false" input reg to match the destination reg so
136
+ * that the tcg backend can implement a "move if true" operation.
137
+ */
138
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
139
+ op->args[5] = cond = tcg_invert_cond(cond);
140
+ }
141
+
142
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
143
if (i >= 0) {
144
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
145
}
146
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
147
148
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
149
{
150
- if (fold_const2(ctx, op) ||
151
+ if (fold_const2_commutative(ctx, op) ||
152
fold_xi_to_i(ctx, op, 0)) {
153
return true;
154
}
155
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
156
157
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
158
{
159
+ swap_commutative(op->args[0], &op->args[2], &op->args[3]);
160
+
161
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
162
uint64_t a = arg_info(op->args[2])->val;
163
uint64_t b = arg_info(op->args[3])->val;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
165
166
static bool fold_nand(OptContext *ctx, TCGOp *op)
167
{
168
- if (fold_const2(ctx, op) ||
169
+ if (fold_const2_commutative(ctx, op) ||
170
fold_xi_to_not(ctx, op, -1)) {
171
return true;
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
174
175
static bool fold_nor(OptContext *ctx, TCGOp *op)
176
{
177
- if (fold_const2(ctx, op) ||
178
+ if (fold_const2_commutative(ctx, op) ||
179
fold_xi_to_not(ctx, op, 0)) {
180
return true;
181
}
182
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
183
184
static bool fold_or(OptContext *ctx, TCGOp *op)
185
{
186
- if (fold_const2(ctx, op) ||
187
+ if (fold_const2_commutative(ctx, op) ||
188
fold_xi_to_x(ctx, op, 0) ||
189
fold_xx_to_x(ctx, op)) {
190
return true;
191
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
192
static bool fold_setcond(OptContext *ctx, TCGOp *op)
193
{
194
TCGCond cond = op->args[3];
195
- int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
196
+ int i;
197
198
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
199
+ op->args[3] = cond = tcg_swap_cond(cond);
200
+ }
201
+
202
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
203
if (i >= 0) {
204
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
205
}
206
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
207
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
208
{
209
TCGCond cond = op->args[5];
210
- int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
211
- int inv = 0;
212
+ int i, inv = 0;
213
214
+ if (swap_commutative2(&op->args[1], &op->args[3])) {
215
+ op->args[5] = cond = tcg_swap_cond(cond);
216
+ }
217
+
218
+ i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
219
if (i >= 0) {
220
goto do_setcond_const;
221
}
222
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
223
224
static bool fold_xor(OptContext *ctx, TCGOp *op)
225
{
226
- if (fold_const2(ctx, op) ||
227
+ if (fold_const2_commutative(ctx, op) ||
228
fold_xx_to_i(ctx, op, 0) ||
229
fold_xi_to_x(ctx, op, 0) ||
230
fold_xi_to_not(ctx, op, -1)) {
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
ctx.type = TCG_TYPE_I32;
233
}
234
235
- /* For commutative operations make constant second argument */
236
- switch (opc) {
237
- CASE_OP_32_64_VEC(add):
238
- CASE_OP_32_64_VEC(mul):
239
- CASE_OP_32_64_VEC(and):
240
- CASE_OP_32_64_VEC(or):
241
- CASE_OP_32_64_VEC(xor):
242
- CASE_OP_32_64(eqv):
243
- CASE_OP_32_64(nand):
244
- CASE_OP_32_64(nor):
245
- CASE_OP_32_64(muluh):
246
- CASE_OP_32_64(mulsh):
247
- swap_commutative(op->args[0], &op->args[1], &op->args[2]);
248
- break;
249
- CASE_OP_32_64(brcond):
250
- if (swap_commutative(-1, &op->args[0], &op->args[1])) {
251
- op->args[2] = tcg_swap_cond(op->args[2]);
252
- }
253
- break;
254
- CASE_OP_32_64(setcond):
255
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
256
- op->args[3] = tcg_swap_cond(op->args[3]);
257
- }
258
- break;
259
- CASE_OP_32_64(movcond):
260
- if (swap_commutative(-1, &op->args[1], &op->args[2])) {
261
- op->args[5] = tcg_swap_cond(op->args[5]);
262
- }
263
- /* For movcond, we canonicalize the "false" input reg to match
264
- the destination reg so that the tcg backend can implement
265
- a "move if true" operation. */
266
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
267
- op->args[5] = tcg_invert_cond(op->args[5]);
268
- }
269
- break;
270
- CASE_OP_32_64(add2):
271
- swap_commutative(op->args[0], &op->args[2], &op->args[4]);
272
- swap_commutative(op->args[1], &op->args[3], &op->args[5]);
273
- break;
274
- CASE_OP_32_64(mulu2):
275
- CASE_OP_32_64(muls2):
276
- swap_commutative(op->args[0], &op->args[2], &op->args[3]);
277
- break;
278
- case INDEX_op_brcond2_i32:
279
- if (swap_commutative2(&op->args[0], &op->args[2])) {
280
- op->args[4] = tcg_swap_cond(op->args[4]);
281
- }
282
- break;
283
- case INDEX_op_setcond2_i32:
284
- if (swap_commutative2(&op->args[1], &op->args[3])) {
285
- op->args[5] = tcg_swap_cond(op->args[5]);
286
- }
287
- break;
288
- default:
289
- break;
290
- }
291
-
292
/* Assume all bits affected, and no bits known zero. */
293
ctx.a_mask = -1;
294
ctx.z_mask = -1;
47
--
295
--
48
2.43.0
296
2.25.1
297
298
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
1
Pretending that the source is i64 when it is in fact i32 is
2
incorrect; we have type-changing opcodes that must be used.
3
This bug trips up the subsequent change to the optimizer.
2
4
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Fixes: 4f2331e5b67a
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
9
---
6
target/arm/tcg/helper-a64.c | 6 +++---
10
tcg/tcg.c | 6 +++---
7
1 file changed, 3 insertions(+), 3 deletions(-)
11
1 file changed, 3 insertions(+), 3 deletions(-)
8
12
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
13
diff --git a/tcg/tcg.c b/tcg/tcg.c
10
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/tcg/helper-a64.c
15
--- a/tcg/tcg.c
12
+++ b/target/arm/tcg/helper-a64.c
16
+++ b/tcg/tcg.c
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
14
(float16_is_infinity(b) && float16_is_zero(a))) {
18
15
return float16_one_point_five;
19
if (is_32bit) {
16
}
20
TCGv_i64 temp = tcg_temp_new_i64();
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
21
- TCGv_i64 orig = temp_tcgv_i64(args[i]);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
22
+ TCGv_i32 orig = temp_tcgv_i32(args[i]);
19
}
23
if (is_signed) {
20
24
- tcg_gen_ext32s_i64(temp, orig);
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
25
+ tcg_gen_ext_i32_i64(temp, orig);
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
26
} else {
23
(float32_is_infinity(b) && float32_is_zero(a))) {
27
- tcg_gen_ext32u_i64(temp, orig);
24
return float32_one_point_five;
28
+ tcg_gen_extu_i32_i64(temp, orig);
25
}
29
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
30
args[i] = tcgv_i64_temp(temp);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
31
}
28
}
29
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
40
--
32
--
41
2.43.0
33
2.25.1
42
34
43
35
diff view generated by jsdifflib
1
Use of fold_masks should be restricted to those opcodes that
1
This "garbage" setting pre-dates the addition of the type
2
can reliably make use of it -- those with a single output,
2
changing opcodes INDEX_op_ext_i32_i64, INDEX_op_extu_i32_i64,
3
and from higher-level folders that set up the masks.
3
and INDEX_op_extr{l,h}_i64_i32.
4
Prepare for conversion of each folder in turn.
5
4
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
So now we have a definitive points at which to adjust z_mask
6
to eliminate such bits from the 32-bit operands.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
tcg/optimize.c | 17 ++++++++++++++---
12
tcg/optimize.c | 35 ++++++++++++++++-------------------
10
1 file changed, 14 insertions(+), 3 deletions(-)
13
1 file changed, 16 insertions(+), 19 deletions(-)
11
14
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
17
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
18
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
20
ti->is_const = true;
21
ti->val = ts->val;
22
ti->z_mask = ts->val;
23
- if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
24
- /* High bits of a 32-bit quantity are garbage. */
25
- ti->z_mask |= ~0xffffffffull;
26
- }
27
} else {
28
ti->is_const = false;
29
ti->z_mask = -1;
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
31
TCGTemp *src_ts = arg_temp(src);
32
TempOptInfo *di;
33
TempOptInfo *si;
34
- uint64_t z_mask;
35
TCGOpcode new_op;
36
37
if (ts_are_copies(dst_ts, src_ts)) {
38
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
39
op->args[0] = dst;
40
op->args[1] = src;
41
42
- z_mask = si->z_mask;
43
- if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
44
- /* High bits of the destination are now garbage. */
45
- z_mask |= ~0xffffffffull;
46
- }
47
- di->z_mask = z_mask;
48
+ di->z_mask = si->z_mask;
49
50
if (src_ts->type == dst_ts->type) {
51
TempOptInfo *ni = ts_info(si->next_copy);
52
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
53
static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
54
TCGArg dst, uint64_t val)
55
{
56
- /* Convert movi to mov with constant temp. */
57
- TCGTemp *tv = tcg_constant_internal(ctx->type, val);
58
+ TCGTemp *tv;
59
60
+ if (ctx->type == TCG_TYPE_I32) {
61
+ val = (int32_t)val;
62
+ }
63
+
64
+ /* Convert movi to mov with constant temp. */
65
+ tv = tcg_constant_internal(ctx->type, val);
66
init_ts_info(ctx, tv);
67
return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
68
}
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
69
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
70
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
71
27
/*
72
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
73
- * 32-bit ops generate 32-bit results. For the result is zero test
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
74
- * below, we can ignore high bits, but for further optimizations we
75
- * need to record that the high bits contain garbage.
76
+ * 32-bit ops generate 32-bit results, which for the purpose of
77
+ * simplifying tcg are sign-extended. Certainly that's how we
78
+ * represent our constants elsewhere. Note that the bits will
79
+ * be reset properly for a 64-bit value when encountering the
80
+ * type changing opcodes.
81
*/
30
if (ctx->type == TCG_TYPE_I32) {
82
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
83
- ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
32
s_mask |= MAKE_64BIT_MASK(32, 32);
84
- a_mask &= MAKE_64BIT_MASK(0, 32);
33
- ctx->z_mask = z_mask;
85
- z_mask &= MAKE_64BIT_MASK(0, 32);
34
- ctx->s_mask = s_mask;
86
+ a_mask = (int32_t)a_mask;
87
+ z_mask = (int32_t)z_mask;
88
+ ctx->z_mask = z_mask;
35
}
89
}
36
90
37
if (z_mask == 0) {
91
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
92
--
53
2.43.0
93
2.25.1
94
95
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Recognize the constant function for or-complement.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 8 +++++---
8
tcg/optimize.c | 1 +
7
1 file changed, 5 insertions(+), 3 deletions(-)
9
1 file changed, 1 insertion(+)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
18
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
19
+ fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
20
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
21
fold_ix_to_not(ctx, op, 0)) {
23
return true;
22
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
23
--
36
2.43.0
24
2.25.1
25
26
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Recognize the identity function for low-part multiply.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/optimize.c | 4 ++--
8
tcg/optimize.c | 3 ++-
7
1 file changed, 2 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 1 deletion(-)
8
10
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
16
static bool fold_mul(OptContext *ctx, TCGOp *op)
15
}
17
{
16
18
if (fold_const2(ctx, op) ||
17
- ctx->z_mask = z_mask;
19
- fold_xi_to_i(ctx, op, 0)) {
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
20
+ fold_xi_to_i(ctx, op, 0) ||
21
+ fold_xi_to_x(ctx, op, 1)) {
19
return true;
22
return true;
20
}
23
}
21
- return fold_masks(ctx, op);
24
return false;
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
25
--
28
2.43.0
26
2.25.1
27
28
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Recognize the identity function for division.
2
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
tcg/optimize.c | 2 +-
8
tcg/optimize.c | 6 +++++-
5
1 file changed, 1 insertion(+), 1 deletion(-)
9
1 file changed, 5 insertions(+), 1 deletion(-)
6
10
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
13
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
14
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
15
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
16
13
return true;
17
static bool fold_divide(OptContext *ctx, TCGOp *op)
14
}
18
{
15
- return false;
19
- return fold_const2(ctx, op);
16
+ return finish_folding(ctx, op);
20
+ if (fold_const2(ctx, op) ||
21
+ fold_xi_to_x(ctx, op, 1)) {
22
+ return true;
23
+ }
24
+ return false;
17
}
25
}
18
26
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
27
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
28
--
21
2.43.0
29
2.25.1
30
31
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
Recognize the constant function for remainder.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Suggested-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/optimize.c | 16 +++++++++-------
7
tcg/optimize.c | 6 +++++-
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
1 file changed, 5 insertions(+), 1 deletion(-)
8
9
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
15
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
static bool fold_remainder(OptContext *ctx, TCGOp *op)
16
{
17
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
- return fold_const2(ctx, op);
18
+
19
+ if (fold_const2(ctx, op) ||
19
/* We can't do any folding with a load, but we can record bits. */
20
+ fold_xx_to_i(ctx, op, 0)) {
20
switch (op->opc) {
21
+ return true;
21
CASE_OP_32_64(ld8s):
22
+ }
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ return false;
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
24
}
51
25
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
26
static bool fold_setcond(OptContext *ctx, TCGOp *op)
53
--
27
--
54
2.43.0
28
2.25.1
29
30
diff view generated by jsdifflib
1
Change the representation from sign bit repetitions to all bits equal
1
Certain targets, like riscv, produce signed 32-bit results.
2
to the sign bit, including the sign bit itself.
2
This can lead to lots of redundant extensions as values are
3
3
manipulated.
4
The previous format has a problem in that it is difficult to recreate
4
5
a valid sign mask after a shift operation: the "repetitions" part of
5
Begin by tracking only the obvious sign-extensions, and
6
the previous format meant that applying the same shift as for the value
6
converting them to simple copies when possible.
7
lead to an off-by-one value.
7
8
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
The new format, including the sign bit itself, means that the sign mask
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
11
---
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
12
tcg/optimize.c | 123 ++++++++++++++++++++++++++++++++++++++++---------
25
1 file changed, 15 insertions(+), 49 deletions(-)
13
1 file changed, 102 insertions(+), 21 deletions(-)
26
14
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
diff --git a/tcg/optimize.c b/tcg/optimize.c
28
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/optimize.c
17
--- a/tcg/optimize.c
30
+++ b/tcg/optimize.c
18
+++ b/tcg/optimize.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
19
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
20
TCGTemp *next_copy;
33
uint64_t val;
21
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
22
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
23
+ uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
24
} TempOptInfo;
38
25
39
typedef struct OptContext {
26
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
27
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
28
/* In flight values from optimization. */
29
uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
30
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
31
+ uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
32
TCGType type;
47
} OptContext;
33
} OptContext;
48
34
49
-/* Calculate the smask for a specific value. */
35
+/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
36
+static uint64_t smask_from_value(uint64_t value)
51
-{
37
+{
52
- int rep = clrsb64(value);
38
+ int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
39
+ return ~(~0ull >> rep);
54
-}
40
+}
55
-
41
+
56
-/*
42
+/*
57
- * Calculate the smask for a given set of known-zeros.
43
+ * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
44
+ * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
45
+ * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
46
+ * larger.
61
- */
47
+ */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
48
+static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
49
+{
64
- /*
50
+ /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
51
+ * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
52
+ * must be zero, else we have no sign information.
67
- */
53
+ */
68
- int rep = clz64(zmask);
54
+ int rep = clz64(zmask);
69
- if (rep == 0) {
55
+ if (rep == 0) {
70
- return 0;
56
+ return 0;
71
- }
57
+ }
72
- rep -= 1;
58
+ rep -= 1;
73
- return ~(~0ull >> rep);
59
+ return ~(~0ull >> rep);
74
-}
60
+}
75
-
61
+
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
62
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
63
{
90
return ts->state_ptr;
64
return ts->state_ptr;
65
@@ -XXX,XX +XXX,XX @@ static void reset_ts(TCGTemp *ts)
66
ti->prev_copy = ts;
67
ti->is_const = false;
68
ti->z_mask = -1;
69
+ ti->s_mask = 0;
70
}
71
72
static void reset_temp(TCGArg arg)
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
73
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
74
ti->is_const = true;
93
ti->val = ts->val;
75
ti->val = ts->val;
94
ti->z_mask = ts->val;
76
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
77
+ ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
78
} else {
98
ti->is_const = false;
79
ti->is_const = false;
99
ti->z_mask = -1;
80
ti->z_mask = -1;
81
+ ti->s_mask = 0;
82
}
83
}
84
85
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
86
op->args[1] = src;
87
88
di->z_mask = si->z_mask;
89
+ di->s_mask = si->s_mask;
90
91
if (src_ts->type == dst_ts->type) {
92
TempOptInfo *ni = ts_info(si->next_copy);
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
93
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
94
95
nb_oargs = def->nb_oargs;
96
for (i = 0; i < nb_oargs; i++) {
97
- reset_temp(op->args[i]);
98
+ TCGTemp *ts = arg_temp(op->args[i]);
99
+ reset_ts(ts);
100
/*
101
- * Save the corresponding known-zero bits mask for the
102
+ * Save the corresponding known-zero/sign bits mask for the
103
* first output argument (only one supported so far).
101
*/
104
*/
102
if (i == 0) {
105
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
106
- arg_info(op->args[i])->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
107
+ ts_info(ts)->z_mask = ctx->z_mask;
108
+ ts_info(ts)->s_mask = ctx->s_mask;
105
}
109
}
106
}
110
}
107
}
111
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
112
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
113
{
110
*/
114
uint64_t a_mask = ctx->a_mask;
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
115
uint64_t z_mask = ctx->z_mask;
112
- uint64_t z_mask, uint64_t s_mask)
116
+ uint64_t s_mask = ctx->s_mask;
113
+ uint64_t z_mask, int64_t s_mask)
117
114
{
118
/*
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
119
* 32-bit ops generate 32-bit results, which for the purpose of
116
TCGTemp *ts;
120
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
121
if (ctx->type == TCG_TYPE_I32) {
122
a_mask = (int32_t)a_mask;
125
z_mask = (int32_t)z_mask;
123
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
124
+ s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
125
ctx->z_mask = z_mask;
128
}
126
+ ctx->s_mask = s_mask;
127
}
129
128
130
if (z_mask == 0) {
129
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
130
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
132
131
133
ti = ts_info(ts);
132
static bool fold_bswap(OptContext *ctx, TCGOp *op)
134
ti->z_mask = z_mask;
133
{
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
134
- uint64_t z_mask, sign;
136
+
135
+ uint64_t z_mask, s_mask, sign;
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
136
138
+ rep = clz64(~s_mask);
137
if (arg_is_const(op->args[1])) {
139
+ rep = MAX(rep, clz64(z_mask));
138
uint64_t t = arg_info(op->args[1])->val;
140
+ rep = MAX(rep - 1, 0);
139
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
141
+ ti->s_mask = INT64_MIN >> rep;
140
}
142
+
141
143
return true;
142
z_mask = arg_info(op->args[1])->z_mask;
144
}
143
+
145
144
switch (op->opc) {
145
case INDEX_op_bswap16_i32:
146
case INDEX_op_bswap16_i64:
147
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
148
default:
149
g_assert_not_reached();
150
}
151
+ s_mask = smask_from_zmask(z_mask);
152
153
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
154
case TCG_BSWAP_OZ:
155
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
156
/* If the sign bit may be 1, force all the bits above to 1. */
157
if (z_mask & sign) {
158
z_mask |= sign;
159
+ s_mask = sign << 1;
160
}
161
break;
162
default:
163
/* The high bits are undefined: force all bits above the sign to 1. */
164
z_mask |= sign << 1;
165
+ s_mask = 0;
166
break;
167
}
168
ctx->z_mask = z_mask;
169
+ ctx->s_mask = s_mask;
170
171
return fold_masks(ctx, op);
172
}
173
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
174
static bool fold_extract(OptContext *ctx, TCGOp *op)
175
{
176
uint64_t z_mask_old, z_mask;
177
+ int pos = op->args[2];
178
+ int len = op->args[3];
179
180
if (arg_is_const(op->args[1])) {
181
uint64_t t;
182
183
t = arg_info(op->args[1])->val;
184
- t = extract64(t, op->args[2], op->args[3]);
185
+ t = extract64(t, pos, len);
186
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
187
}
188
189
z_mask_old = arg_info(op->args[1])->z_mask;
190
- z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
191
- if (op->args[2] == 0) {
192
+ z_mask = extract64(z_mask_old, pos, len);
193
+ if (pos == 0) {
194
ctx->a_mask = z_mask_old ^ z_mask;
195
}
196
ctx->z_mask = z_mask;
197
+ ctx->s_mask = smask_from_zmask(z_mask);
198
199
return fold_masks(ctx, op);
200
}
201
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
202
203
static bool fold_exts(OptContext *ctx, TCGOp *op)
204
{
205
- uint64_t z_mask_old, z_mask, sign;
206
+ uint64_t s_mask_old, s_mask, z_mask, sign;
207
bool type_change = false;
208
209
if (fold_const1(ctx, op)) {
210
return true;
211
}
212
213
- z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
214
+ z_mask = arg_info(op->args[1])->z_mask;
215
+ s_mask = arg_info(op->args[1])->s_mask;
216
+ s_mask_old = s_mask;
217
218
switch (op->opc) {
219
CASE_OP_32_64(ext8s):
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
220
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
221
148
ctx->z_mask = z_mask;
222
if (z_mask & sign) {
149
ctx->s_mask = s_mask;
223
z_mask |= sign;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
224
- } else if (!type_change) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
225
- ctx->a_mask = z_mask_old ^ z_mask;
152
return true;
226
}
153
}
227
+ s_mask |= sign << 1;
154
228
+
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
229
ctx->z_mask = z_mask;
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
230
+ ctx->s_mask = s_mask;
157
ctx->s_mask = s_mask;
231
+ if (!type_change) {
158
232
+ ctx->a_mask = s_mask & ~s_mask_old;
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
233
+ }
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
234
161
return true;
235
return fold_masks(ctx, op);
162
}
236
}
163
237
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
238
}
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
239
166
240
ctx->z_mask = z_mask;
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
241
+ ctx->s_mask = smask_from_zmask(z_mask);
168
- ctx->s_mask = smask_from_smask(s_mask);
242
if (!type_change) {
169
243
ctx->a_mask = z_mask_old ^ z_mask;
170
return fold_masks(ctx, op);
244
}
171
}
245
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
246
MemOp mop = get_memop(oi);
247
int width = 8 * memop_size(mop);
248
249
- if (!(mop & MO_SIGN) && width < 64) {
250
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
251
+ if (width < 64) {
252
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
253
+ if (!(mop & MO_SIGN)) {
254
+ ctx->z_mask = MAKE_64BIT_MASK(0, width);
255
+ ctx->s_mask <<= 1;
256
+ }
257
}
258
259
/* Opcodes that touch guest memory stop the mb optimization. */
260
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
261
262
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
{
264
- int64_t z_mask_old, z_mask;
265
+ uint64_t z_mask, s_mask, s_mask_old;
266
+ int pos = op->args[2];
267
+ int len = op->args[3];
268
269
if (arg_is_const(op->args[1])) {
270
uint64_t t;
271
272
t = arg_info(op->args[1])->val;
273
- t = sextract64(t, op->args[2], op->args[3]);
274
+ t = sextract64(t, pos, len);
275
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
276
}
277
278
- z_mask_old = arg_info(op->args[1])->z_mask;
279
- z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
280
- if (op->args[2] == 0 && z_mask >= 0) {
281
- ctx->a_mask = z_mask_old ^ z_mask;
282
- }
283
+ z_mask = arg_info(op->args[1])->z_mask;
284
+ z_mask = sextract64(z_mask, pos, len);
285
ctx->z_mask = z_mask;
286
287
+ s_mask_old = arg_info(op->args[1])->s_mask;
288
+ s_mask = sextract64(s_mask_old, pos, len);
289
+ s_mask |= MAKE_64BIT_MASK(len, 64 - len);
290
+ ctx->s_mask = s_mask;
291
+
292
+ if (pos == 0) {
293
+ ctx->a_mask = s_mask & ~s_mask_old;
294
+ }
295
+
296
return fold_masks(ctx, op);
297
}
298
299
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
300
{
301
/* We can't do any folding with a load, but we can record bits. */
302
switch (op->opc) {
303
+ CASE_OP_32_64(ld8s):
304
+ ctx->s_mask = MAKE_64BIT_MASK(8, 56);
305
+ break;
306
CASE_OP_32_64(ld8u):
307
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
308
+ ctx->s_mask = MAKE_64BIT_MASK(9, 55);
309
+ break;
310
+ CASE_OP_32_64(ld16s):
311
+ ctx->s_mask = MAKE_64BIT_MASK(16, 48);
312
break;
313
CASE_OP_32_64(ld16u):
314
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
315
+ ctx->s_mask = MAKE_64BIT_MASK(17, 47);
316
+ break;
317
+ case INDEX_op_ld32s_i64:
318
+ ctx->s_mask = MAKE_64BIT_MASK(32, 32);
319
break;
320
case INDEX_op_ld32u_i64:
321
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
322
+ ctx->s_mask = MAKE_64BIT_MASK(33, 31);
323
break;
324
default:
325
g_assert_not_reached();
326
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
327
ctx.type = TCG_TYPE_I32;
328
}
329
330
- /* Assume all bits affected, and no bits known zero. */
331
+ /* Assume all bits affected, no bits known zero, no sign reps. */
332
ctx.a_mask = -1;
333
ctx.z_mask = -1;
334
+ ctx.s_mask = 0;
335
336
/*
337
* Process each opcode.
338
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
339
case INDEX_op_extrh_i64_i32:
340
done = fold_extu(&ctx, op);
341
break;
342
+ CASE_OP_32_64(ld8s):
343
CASE_OP_32_64(ld8u):
344
+ CASE_OP_32_64(ld16s):
345
CASE_OP_32_64(ld16u):
346
+ case INDEX_op_ld32s_i64:
347
case INDEX_op_ld32u_i64:
348
done = fold_tcg_ld(&ctx, op);
349
break;
172
--
350
--
173
2.43.0
351
2.25.1
352
353
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Sign repetitions are perforce all identical, whether they are 1 or 0.
2
Bitwise operations preserve the relative quantity of the repetitions.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
tcg/optimize.c | 6 +++---
9
tcg/optimize.c | 29 +++++++++++++++++++++++++++++
5
1 file changed, 3 insertions(+), 3 deletions(-)
10
1 file changed, 29 insertions(+)
6
11
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
17
z2 = arg_info(op->args[2])->z_mask;
18
ctx->z_mask = z1 & z2;
19
20
+ /*
21
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
22
+ * Bitwise operations preserve the relative quantity of the repetitions.
23
+ */
24
+ ctx->s_mask = arg_info(op->args[1])->s_mask
25
+ & arg_info(op->args[2])->s_mask;
26
+
27
/*
28
* Known-zeros does not imply known-ones. Therefore unless
29
* arg2 is constant, we can't infer affected bits from it.
30
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
31
}
32
ctx->z_mask = z1;
33
34
+ ctx->s_mask = arg_info(op->args[1])->s_mask
35
+ & arg_info(op->args[2])->s_mask;
36
return fold_masks(ctx, op);
37
}
38
39
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
40
fold_xi_to_not(ctx, op, 0)) {
13
return true;
41
return true;
14
}
42
}
15
- return false;
43
+
16
+ return finish_folding(ctx, op);
44
+ ctx->s_mask = arg_info(op->args[1])->s_mask
45
+ & arg_info(op->args[2])->s_mask;
46
return false;
17
}
47
}
18
48
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
49
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
50
21
fold_xi_to_i(ctx, op, 0)) {
51
ctx->z_mask = arg_info(op->args[3])->z_mask
52
| arg_info(op->args[4])->z_mask;
53
+ ctx->s_mask = arg_info(op->args[3])->s_mask
54
+ & arg_info(op->args[4])->s_mask;
55
56
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
57
uint64_t tv = arg_info(op->args[3])->val;
58
@@ -XXX,XX +XXX,XX @@ static bool fold_nand(OptContext *ctx, TCGOp *op)
59
fold_xi_to_not(ctx, op, -1)) {
22
return true;
60
return true;
23
}
61
}
24
- return false;
62
+
25
+ return finish_folding(ctx, op);
63
+ ctx->s_mask = arg_info(op->args[1])->s_mask
64
+ & arg_info(op->args[2])->s_mask;
65
return false;
26
}
66
}
27
67
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
68
@@ -XXX,XX +XXX,XX @@ static bool fold_nor(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
69
fold_xi_to_not(ctx, op, 0)) {
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
70
return true;
32
}
71
}
33
- return false;
72
+
34
+ return finish_folding(ctx, op);
73
+ ctx->s_mask = arg_info(op->args[1])->s_mask
74
+ & arg_info(op->args[2])->s_mask;
75
return false;
35
}
76
}
36
77
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
78
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
79
return true;
80
}
81
82
+ ctx->s_mask = arg_info(op->args[1])->s_mask;
83
+
84
/* Because of fold_to_not, we want to always return true, via finish. */
85
finish_folding(ctx, op);
86
return true;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
88
89
ctx->z_mask = arg_info(op->args[1])->z_mask
90
| arg_info(op->args[2])->z_mask;
91
+ ctx->s_mask = arg_info(op->args[1])->s_mask
92
+ & arg_info(op->args[2])->s_mask;
93
return fold_masks(ctx, op);
94
}
95
96
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
97
fold_ix_to_not(ctx, op, 0)) {
98
return true;
99
}
100
+
101
+ ctx->s_mask = arg_info(op->args[1])->s_mask
102
+ & arg_info(op->args[2])->s_mask;
103
return false;
104
}
105
106
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
107
108
ctx->z_mask = arg_info(op->args[1])->z_mask
109
| arg_info(op->args[2])->z_mask;
110
+ ctx->s_mask = arg_info(op->args[1])->s_mask
111
+ & arg_info(op->args[2])->s_mask;
112
return fold_masks(ctx, op);
113
}
114
38
--
115
--
39
2.43.0
116
2.25.1
117
118
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots.
1
The result is either 0 or 1, which means that we have
2
a 2 bit signed result, and thus 62 bits of sign.
3
For clarity, use the smask_from_zmask function.
2
4
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
tcg/optimize.c | 3 +--
9
tcg/optimize.c | 2 ++
7
1 file changed, 1 insertion(+), 2 deletions(-)
10
1 file changed, 2 insertions(+)
8
11
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
17
}
16
18
17
- ctx->z_mask = 1;
19
ctx->z_mask = 1;
18
- return false;
20
+ ctx->s_mask = smask_from_zmask(1);
19
+ return fold_masks_z(ctx, op, 1);
21
return false;
20
}
22
}
21
23
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
25
}
26
27
ctx->z_mask = 1;
28
+ ctx->s_mask = smask_from_zmask(1);
29
return false;
30
31
do_setcond_const:
23
--
32
--
24
2.43.0
33
2.25.1
34
35
diff view generated by jsdifflib
1
Consider the passed s_mask to be a minimum deduced from
1
The results are generally 6 bit unsigned values, though
2
either existing s_mask or from a sign-extension operation.
2
the count leading and trailing bits may produce any value
3
We may be able to deduce more from the set of known zeros.
3
for a zero input.
4
Remove identical logic from several opcode folders.
5
4
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/optimize.c | 21 ++++++---------------
9
tcg/optimize.c | 3 ++-
10
1 file changed, 6 insertions(+), 15 deletions(-)
10
1 file changed, 2 insertions(+), 1 deletion(-)
11
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
17
g_assert_not_reached();
53
}
18
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
19
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
20
-
21
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
22
return false;
57
}
23
}
58
24
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
25
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
26
default:
61
g_assert_not_reached();
27
g_assert_not_reached();
62
}
28
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
29
+ ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
30
return false;
65
}
31
}
66
32
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
33
--
136
2.43.0
34
2.25.1
35
36
diff view generated by jsdifflib
Deleted patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
4
1
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
For constant shifts, we can simply shift the s_mask.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
For variable shifts, we know that sar does not reduce
4
the s_mask, which helps for sequences like
5
6
ext32s_i64 t, in
7
sar_i64 t, t, v
8
ext32s_i64 out, t
9
10
allowing the final extend to be eliminated.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
15
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
16
tcg/optimize.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++---
7
1 file changed, 14 insertions(+), 13 deletions(-)
17
1 file changed, 47 insertions(+), 3 deletions(-)
8
18
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
19
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
21
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
22
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ static uint64_t smask_from_zmask(uint64_t zmask)
24
return ~(~0ull >> rep);
25
}
26
27
+/*
28
+ * Recreate a properly left-aligned smask after manipulation.
29
+ * Some bit-shuffling, particularly shifts and rotates, may
30
+ * retain sign bits on the left, but may scatter disconnected
31
+ * sign bits on the right. Retain only what remains to the left.
32
+ */
33
+static uint64_t smask_from_smask(int64_t smask)
34
+{
35
+ /* Only the 1 bits are significant for smask */
36
+ return smask_from_zmask(~smask);
37
+}
38
+
39
static inline TempOptInfo *ts_info(TCGTemp *ts)
40
{
41
return ts->state_ptr;
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
42
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
43
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
44
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
45
{
16
uint64_t s_mask, z_mask, sign;
46
+ uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
47
+
18
19
if (fold_const2(ctx, op) ||
48
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
49
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
50
fold_xi_to_x(ctx, op, 0)) {
22
return true;
51
return true;
23
}
52
}
24
53
25
- s_mask = arg_info(op->args[1])->s_mask;
54
+ s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
55
+ z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
56
+
28
+ t2 = arg_info(op->args[2]);
57
if (arg_is_const(op->args[2])) {
29
+ s_mask = t1->s_mask;
58
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
30
+ z_mask = t1->z_mask;
59
- arg_info(op->args[1])->z_mask,
31
60
- arg_info(op->args[2])->val);
32
- if (arg_is_const(op->args[2])) {
61
+ int sh = arg_info(op->args[2])->val;
33
- int sh = arg_info(op->args[2])->val;
62
+
34
-
63
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
64
+
36
+ if (ti_is_const(t2)) {
65
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
37
+ int sh = ti_const_val(t2);
66
+ ctx->s_mask = smask_from_smask(s_mask);
38
67
+
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
68
return fold_masks(ctx, op);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
69
}
45
70
+
46
switch (op->opc) {
71
+ switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
72
+ CASE_OP_32_64(sar):
48
* Arithmetic right shift will not reduce the number of
73
+ /*
49
* input sign repetitions.
74
+ * Arithmetic right shift will not reduce the number of
50
*/
75
+ * input sign repetitions.
51
- ctx->s_mask = s_mask;
76
+ */
52
- break;
77
+ ctx->s_mask = s_mask;
53
+ return fold_masks_s(ctx, op, s_mask);
78
+ break;
54
CASE_OP_32_64(shr):
79
+ CASE_OP_32_64(shr):
55
/*
80
+ /*
56
* If the sign bit is known zero, then logical right shift
81
+ * If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
82
+ * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
83
+ */
59
*/
84
+ sign = (s_mask & -s_mask) >> 1;
60
- sign = (s_mask & -s_mask) >> 1;
85
+ if (!(z_mask & sign)) {
61
+ sign = -s_mask;
86
+ ctx->s_mask = s_mask;
62
if (sign && !(z_mask & sign)) {
87
+ }
63
- ctx->s_mask = s_mask;
88
+ break;
64
+ return fold_masks_s(ctx, op, s_mask);
89
+ default:
65
}
90
+ break;
66
break;
91
+ }
67
default:
92
+
68
break;
93
return false;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
94
}
74
95
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
96
--
77
2.43.0
97
2.25.1
98
99
diff view generated by jsdifflib
1
Convert all targets simultaneously, as the gen_intermediate_code
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Watchpoint processing code restores vCPU state twice:
4
in tb_check_watchpoint and in cpu_loop_exit_restore/cpu_restore_state.
5
Normally it does not affect anything, but in icount mode instruction
6
counter is incremented twice and becomes incorrect.
7
This patch eliminates unneeded CPU state restore.
8
9
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
10
Reviewed-by: David Hildenbrand <david@redhat.com>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <163542168516.2127597.8781375223437124644.stgit@pasha-ThinkPad-X280>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
14
---
8
include/exec/translator.h | 14 --------------
15
softmmu/physmem.c | 6 ++----
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
16
1 file changed, 2 insertions(+), 4 deletions(-)
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
71
17
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
18
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
73
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
74
--- a/include/exec/translator.h
20
--- a/softmmu/physmem.c
75
+++ b/include/exec/translator.h
21
+++ b/softmmu/physmem.c
76
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
77
#include "qemu/bswap.h"
23
cpu->watchpoint_hit = wp;
78
#include "exec/vaddr.h"
24
79
25
mmap_lock();
80
-/**
26
+ /* This call also restores vCPU state */
81
- * gen_intermediate_code
27
tb_check_watchpoint(cpu, ra);
82
- * @cpu: cpu context
28
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
83
- * @tb: translation block
29
cpu->exception_index = EXCP_DEBUG;
84
- * @max_insns: max number of instructions to translate
30
mmap_unlock();
85
- * @pc: guest virtual program counter address
31
- cpu_loop_exit_restore(cpu, ra);
86
- * @host_pc: host physical program counter address
32
+ cpu_loop_exit(cpu);
87
- *
33
} else {
88
- * This function must be provided by the target, which should create
34
/* Force execution of one insn next time. */
89
- * the target-specific DisasContext, and then invoke translator_loop.
35
cpu->cflags_next_tb = 1 | curr_cflags(cpu);
90
- */
36
mmap_unlock();
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
37
- if (ra) {
92
- vaddr pc, void *host_pc);
38
- cpu_restore_state(cpu, ra, true);
93
-
39
- }
94
/**
40
cpu_loop_exit_noexc(cpu);
95
* DisasJumpType:
41
}
96
* @DISAS_NEXT: Next instruction in program order.
42
}
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
953
--
43
--
954
2.43.0
44
2.25.1
955
45
956
46
diff view generated by jsdifflib
1
All uses have been convered to float*_muladd_scalbn.
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
cpu_check_watchpoint function checks cpu->watchpoint_hit at the entry.
4
But then it also does the same in the middle of the function,
5
while this field can't change.
6
That is why this patch removes this useless condition.
7
8
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <163542169094.2127597.8801843697434113110.stgit@pasha-ThinkPad-X280>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
12
---
6
include/fpu/softfloat.h | 3 ---
13
softmmu/physmem.c | 39 +++++++++++++++++++--------------------
7
fpu/softfloat.c | 6 ------
14
1 file changed, 19 insertions(+), 20 deletions(-)
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
10
15
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
16
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
18
--- a/softmmu/physmem.c
14
+++ b/include/fpu/softfloat.h
19
+++ b/softmmu/physmem.c
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
20
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
16
| Using these differs from negating an input or output before calling
21
}
17
| the muladd function in that this means that a NaN doesn't have its
22
wp->hitaddr = MAX(addr, wp->vaddr);
18
| sign bit inverted before it is propagated.
23
wp->hitattrs = attrs;
19
-| We also support halving the result before rounding, as a special
24
- if (!cpu->watchpoint_hit) {
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
25
- if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint &&
21
*----------------------------------------------------------------------------*/
26
- !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
22
enum {
27
- wp->flags &= ~BP_WATCHPOINT_HIT;
23
float_muladd_negate_c = 1,
28
- continue;
24
float_muladd_negate_product = 2,
29
- }
25
float_muladd_negate_result = 4,
30
- cpu->watchpoint_hit = wp;
26
- float_muladd_halve_result = 8,
31
27
};
32
- mmap_lock();
28
33
- /* This call also restores vCPU state */
29
/*----------------------------------------------------------------------------
34
- tb_check_watchpoint(cpu, ra);
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
35
- if (wp->flags & BP_STOP_BEFORE_ACCESS) {
31
index XXXXXXX..XXXXXXX 100644
36
- cpu->exception_index = EXCP_DEBUG;
32
--- a/fpu/softfloat.c
37
- mmap_unlock();
33
+++ b/fpu/softfloat.c
38
- cpu_loop_exit(cpu);
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
39
- } else {
35
if (unlikely(!can_use_fpu(s))) {
40
- /* Force execution of one insn next time. */
36
goto soft;
41
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
37
}
42
- mmap_unlock();
38
- if (unlikely(flags & float_muladd_halve_result)) {
43
- cpu_loop_exit_noexc(cpu);
39
- goto soft;
44
- }
40
- }
45
+ if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint &&
41
46
+ !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
47
+ wp->flags &= ~BP_WATCHPOINT_HIT;
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
48
+ continue;
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
49
+ }
45
if (unlikely(!can_use_fpu(s))) {
50
+ cpu->watchpoint_hit = wp;
46
goto soft;
51
+
47
}
52
+ mmap_lock();
48
- if (unlikely(flags & float_muladd_halve_result)) {
53
+ /* This call also restores vCPU state */
49
- goto soft;
54
+ tb_check_watchpoint(cpu, ra);
50
- }
55
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
51
56
+ cpu->exception_index = EXCP_DEBUG;
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
57
+ mmap_unlock();
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
58
+ cpu_loop_exit(cpu);
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
59
+ } else {
55
index XXXXXXX..XXXXXXX 100644
60
+ /* Force execution of one insn next time. */
56
--- a/fpu/softfloat-parts.c.inc
61
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
57
+++ b/fpu/softfloat-parts.c.inc
62
+ mmap_unlock();
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
63
+ cpu_loop_exit_noexc(cpu);
59
a->exp = p_widen.exp;
64
}
60
65
} else {
61
return_normal:
66
wp->flags &= ~BP_WATCHPOINT_HIT;
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
69
--
67
--
70
2.43.0
68
2.25.1
71
69
72
70
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Watchpoints that should fire after the memory access
4
break an execution of the current block, try to
5
translate current instruction into the separate block,
6
which then causes debug interrupt.
7
But cpu_interrupt can't be called in such block when
8
icount is enabled, because interrupts muse be allowed
9
explicitly.
10
This patch sets CF_LAST_IO flag for retranslated block,
11
allowing interrupt request for the last instruction.
12
13
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <163542169727.2127597.8141772572696627329.stgit@pasha-ThinkPad-X280>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
17
---
6
target/sparc/helper.h | 4 +-
18
softmmu/physmem.c | 2 +-
7
target/sparc/fop_helper.c | 8 ++--
19
1 file changed, 1 insertion(+), 1 deletion(-)
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
20
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
21
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
12
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
23
--- a/softmmu/physmem.c
14
+++ b/target/sparc/helper.h
24
+++ b/softmmu/physmem.c
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
25
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
26
cpu_loop_exit(cpu);
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
27
} else {
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
28
/* Force execution of one insn next time. */
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
29
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
30
+ cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu);
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
31
mmap_unlock();
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
32
cpu_loop_exit_noexc(cpu);
23
33
}
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
34
--
206
2.43.0
35
2.25.1
207
36
208
37
diff view generated by jsdifflib
Deleted patch
1
This rounding mode is used by Hexagon.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
Deleted patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
Deleted patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
3
1
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
Deleted patch
1
There are no special cases for this instruction.
2
1
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
Deleted patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
4
1
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
Deleted patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
5
1
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
Deleted patch
1
The function is now unused.
2
1
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
Deleted patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
3
1
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
};
16
} Double;
17
18
-typedef union {
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
47
--
48
2.43.0
diff view generated by jsdifflib
Deleted patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
3
1
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
8
1 file changed, 16 insertions(+), 30 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@
15
16
#define WAY_BIG_EXP 4096
17
18
-typedef union {
19
- double f;
20
- uint64_t i;
21
- struct {
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
100
--
101
2.43.0
diff view generated by jsdifflib