1
The following changes since commit 3284aa128153750f14a61e8a96fd085e6f2999b6:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge remote-tracking branch 'remotes/lersek/tags/edk2-pull-2019-04-22' into staging (2019-04-24 13:19:41 +0100)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20190426
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to ef5dae6805cce7b59d129d801bdc5db71bcbd60d:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
cputlb: Fix io_readx() to respect the access_type (2019-04-25 10:40:06 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Add tcg_gen_extract2_*.
14
tcg/optimize: Remove in-flight mask data from OptContext
15
Deal with overflow of TranslationBlocks.
15
fpu: Add float*_muladd_scalbn
16
Respect access_type in io_readx.
16
fpu: Remove float_muladd_halve_result
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
17
21
18
----------------------------------------------------------------
22
----------------------------------------------------------------
19
David Hildenbrand (1):
23
Ilya Leoshkevich (1):
20
tcg: Implement tcg_gen_extract2_{i32,i64}
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
21
25
22
Richard Henderson (13):
26
Pierrick Bouvier (1):
23
tcg: Add INDEX_op_extract2_{i32,i64}
27
plugins: optimize cpu_index code generation
24
tcg: Use deposit and extract2 in tcg_gen_shifti_i64
25
tcg: Use extract2 in tcg_gen_deposit_{i32,i64}
26
tcg/i386: Support INDEX_op_extract2_{i32,i64}
27
tcg/arm: Support INDEX_op_extract2_i32
28
tcg/aarch64: Support INDEX_op_extract2_{i32,i64}
29
tcg: Hoist max_insns computation to tb_gen_code
30
tcg: Restart after TB code generation overflow
31
tcg: Restart TB generation after relocation overflow
32
tcg: Restart TB generation after constant pool overflow
33
tcg: Restart TB generation after out-of-line ldst overflow
34
tcg/ppc: Allow the constant pool to overflow at 32k
35
tcg/arm: Restrict constant pool displacement to 12 bits
36
28
37
Shahab Vahedi (1):
29
Richard Henderson (70):
38
cputlb: Fix io_readx() to respect the access_type
30
tcg/optimize: Split out finish_bb, finish_ebb
31
tcg/optimize: Split out fold_affected_mask
32
tcg/optimize: Copy mask writeback to fold_masks
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
39
100
40
include/exec/exec-all.h | 4 +-
101
include/exec/translator.h | 14 -
41
include/exec/translator.h | 3 +-
102
include/fpu/softfloat-types.h | 2 +
42
tcg/aarch64/tcg-target.h | 2 +
103
include/fpu/softfloat.h | 14 +-
43
tcg/arm/tcg-target.h | 1 +
104
include/hw/core/tcg-cpu-ops.h | 13 +
44
tcg/i386/tcg-target.h | 2 +
105
target/alpha/cpu.h | 2 +
45
tcg/mips/tcg-target.h | 2 +
106
target/arm/internals.h | 2 +
46
tcg/ppc/tcg-target.h | 2 +
107
target/avr/cpu.h | 2 +
47
tcg/riscv/tcg-target.h | 2 +
108
target/hexagon/cpu.h | 2 +
48
tcg/s390/tcg-target.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
49
tcg/sparc/tcg-target.h | 2 +
110
target/hppa/cpu.h | 2 +
50
tcg/tcg-op.h | 6 ++
111
target/i386/tcg/helper-tcg.h | 2 +
51
tcg/tcg-opc.h | 2 +
112
target/loongarch/internals.h | 2 +
52
tcg/tcg.h | 16 +++---
113
target/m68k/cpu.h | 2 +
53
tcg/tci/tcg-target.h | 2 +
114
target/microblaze/cpu.h | 2 +
54
accel/tcg/cputlb.c | 5 +-
115
target/mips/tcg/tcg-internal.h | 2 +
55
accel/tcg/translate-all.c | 53 ++++++++++++++---
116
target/openrisc/cpu.h | 2 +
56
accel/tcg/translator.c | 15 +----
117
target/ppc/cpu.h | 2 +
57
target/alpha/translate.c | 4 +-
118
target/riscv/cpu.h | 3 +
58
target/arm/translate.c | 4 +-
119
target/rx/cpu.h | 2 +
59
target/cris/translate.c | 10 +---
120
target/s390x/s390x-internal.h | 2 +
60
target/hppa/translate.c | 5 +-
121
target/sh4/cpu.h | 2 +
61
target/i386/translate.c | 4 +-
122
target/sparc/cpu.h | 2 +
62
target/lm32/translate.c | 10 +---
123
target/sparc/helper.h | 4 +-
63
target/m68k/translate.c | 4 +-
124
target/tricore/cpu.h | 2 +
64
target/microblaze/translate.c | 10 +---
125
target/xtensa/cpu.h | 2 +
65
target/mips/translate.c | 4 +-
126
accel/tcg/cpu-exec.c | 8 +-
66
target/moxie/translate.c | 11 +---
127
accel/tcg/plugin-gen.c | 9 +
67
target/nios2/translate.c | 14 +----
128
accel/tcg/translate-all.c | 8 +-
68
target/openrisc/translate.c | 4 +-
129
fpu/softfloat.c | 63 +--
69
target/ppc/translate.c | 4 +-
130
target/alpha/cpu.c | 1 +
70
target/riscv/translate.c | 4 +-
131
target/alpha/translate.c | 4 +-
71
target/s390x/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
72
target/sh4/translate.c | 4 +-
133
target/arm/tcg/cpu-v7m.c | 1 +
73
target/sparc/translate.c | 4 +-
134
target/arm/tcg/helper-a64.c | 6 +-
74
target/tilegx/translate.c | 12 +---
135
target/arm/tcg/translate.c | 5 +-
75
target/tricore/translate.c | 16 +-----
136
target/avr/cpu.c | 1 +
76
target/unicore32/translate.c | 10 +---
137
target/avr/translate.c | 6 +-
77
target/xtensa/translate.c | 4 +-
138
target/hexagon/cpu.c | 1 +
78
tcg/aarch64/tcg-target.inc.c | 27 +++++++--
139
target/hexagon/fma_emu.c | 496 ++++++---------------
79
tcg/arm/tcg-target.inc.c | 98 ++++++++++++++++++--------------
140
target/hexagon/op_helper.c | 125 ++----
80
tcg/i386/tcg-target.inc.c | 17 +++++-
141
target/hexagon/translate.c | 4 +-
81
tcg/mips/tcg-target.inc.c | 6 +-
142
target/hppa/cpu.c | 1 +
82
tcg/optimize.c | 16 ++++++
143
target/hppa/translate.c | 4 +-
83
tcg/ppc/tcg-target.inc.c | 42 +++++++-------
144
target/i386/tcg/tcg-cpu.c | 1 +
84
tcg/riscv/tcg-target.inc.c | 16 ++++--
145
target/i386/tcg/translate.c | 5 +-
85
tcg/s390/tcg-target.inc.c | 20 ++++---
146
target/loongarch/cpu.c | 1 +
86
tcg/tcg-ldst.inc.c | 18 +++---
147
target/loongarch/tcg/translate.c | 4 +-
87
tcg/tcg-op.c | 129 +++++++++++++++++++++++++++++++++---------
148
target/m68k/cpu.c | 1 +
88
tcg/tcg-pool.inc.c | 12 ++--
149
target/m68k/translate.c | 4 +-
89
tcg/tcg.c | 85 +++++++++++++++-------------
150
target/microblaze/cpu.c | 1 +
90
tcg/README | 7 +++
151
target/microblaze/translate.c | 4 +-
91
51 files changed, 451 insertions(+), 309 deletions(-)
152
target/mips/cpu.c | 1 +
92
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
7
---
3
tcg/tcg-op.c | 47 ++++++++++++++++++++++++-----------------------
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
4
1 file changed, 24 insertions(+), 23 deletions(-)
9
1 file changed, 31 insertions(+), 16 deletions(-)
5
10
6
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tcg-op.c
13
--- a/tcg/optimize.c
9
+++ b/tcg/tcg-op.c
14
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
11
tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
12
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
13
}
14
- } else {
15
- TCGv_i32 t0, t1;
16
-
17
- t0 = tcg_temp_new_i32();
18
- t1 = tcg_temp_new_i32();
19
- if (right) {
20
- tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
21
- if (arith) {
22
- tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
23
- } else {
24
- tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
25
- }
26
- tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
27
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
28
- tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
29
+ } else if (right) {
30
+ if (TCG_TARGET_HAS_extract2_i32) {
31
+ tcg_gen_extract2_i32(TCGV_LOW(ret),
32
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
33
} else {
34
- tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
35
- /* Note: ret can be the same as arg1, so we use t1 */
36
- tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
37
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
38
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
39
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
40
+ tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
41
+ tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret),
42
+ TCGV_HIGH(arg1), 32 - c, c);
43
}
44
- tcg_temp_free_i32(t0);
45
- tcg_temp_free_i32(t1);
46
+ if (arith) {
47
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
48
+ } else {
49
+ tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
50
+ }
51
+ } else {
52
+ if (TCG_TARGET_HAS_extract2_i32) {
53
+ tcg_gen_extract2_i32(TCGV_HIGH(ret),
54
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
55
+ } else {
56
+ TCGv_i32 t0 = tcg_temp_new_i32();
57
+ tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
58
+ tcg_gen_deposit_i32(TCGV_HIGH(ret), t0,
59
+ TCGV_HIGH(arg1), c, 32 - c);
60
+ tcg_temp_free_i32(t0);
61
+ }
62
+ tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
63
}
16
}
64
}
17
}
65
18
19
+static void finish_bb(OptContext *ctx)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
34
{
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
66
--
97
--
67
2.17.1
98
2.43.0
68
69
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
New patch
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
136
2.43.0
diff view generated by jsdifflib
New patch
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
1
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
25
1 file changed, 15 insertions(+), 49 deletions(-)
26
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/optimize.c
30
+++ b/tcg/optimize.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
172
--
173
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 9 +++++----
5
1 file changed, 5 insertions(+), 4 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
12
remove_mem_copy_all(ctx);
13
}
14
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
17
{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
19
int i, nb_oargs;
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
23
}
24
+ return true;
25
}
26
27
/*
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
29
fold_xi_to_x(ctx, op, 0)) {
30
return true;
31
}
32
- return false;
33
+ return finish_folding(ctx, op);
34
}
35
36
/* We cannot as yet do_constant_folding with vectors. */
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
38
fold_xi_to_x(ctx, op, 0)) {
39
return true;
40
}
41
- return false;
42
+ return finish_folding(ctx, op);
43
}
44
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
47
op->args[4] = arg_new_constant(ctx, bl);
48
op->args[5] = arg_new_constant(ctx, bh);
49
}
50
- return false;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
55
--
56
2.43.0
diff view generated by jsdifflib
1
This will not necessarily restrict the size of the TB, since for v7
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
the majority of constant pool usage is for calls from the out-of-line
3
ldst code, which is already at the end of the TB. But this does
4
allow us to save one insn per reference on the off-chance.
5
2
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/arm/tcg-target.inc.c | 57 +++++++++++++++-------------------------
5
tcg/optimize.c | 20 +++++++++++++++++---
9
1 file changed, 21 insertions(+), 36 deletions(-)
6
1 file changed, 17 insertions(+), 3 deletions(-)
10
7
11
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.inc.c
10
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.inc.c
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
16
return false;
13
return ts_info(arg_temp(arg));
17
}
14
}
18
15
19
+static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
16
+static inline bool ti_is_const(TempOptInfo *ti)
20
+{
17
+{
21
+ ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
18
+ return ti->is_const;
22
+
23
+ if (offset >= -0xfff && offset <= 0xfff) {
24
+ tcg_insn_unit insn = *code_ptr;
25
+ bool u = (offset >= 0);
26
+ if (!u) {
27
+ offset = -offset;
28
+ }
29
+ insn = deposit32(insn, 23, 1, u);
30
+ insn = deposit32(insn, 0, 12, offset);
31
+ *code_ptr = insn;
32
+ return true;
33
+ }
34
+ return false;
35
+}
19
+}
36
+
20
+
37
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
38
intptr_t value, intptr_t addend)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
39
{
32
{
40
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
33
- return ts_info(ts)->is_const;
41
if (type == R_ARM_PC24) {
34
+ return ti_is_const(ts_info(ts));
42
return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
43
} else if (type == R_ARM_PC13) {
44
- intptr_t diff = value - (uintptr_t)(code_ptr + 2);
45
- tcg_insn_unit insn = *code_ptr;
46
- bool u;
47
-
48
- if (diff >= -0xfff && diff <= 0xfff) {
49
- u = (diff >= 0);
50
- if (!u) {
51
- diff = -diff;
52
- }
53
- } else {
54
- int rd = extract32(insn, 12, 4);
55
- int rt = rd == TCG_REG_PC ? TCG_REG_TMP : rd;
56
-
57
- if (diff < 0x1000 || diff >= 0x100000) {
58
- return false;
59
- }
60
-
61
- /* add rt, pc, #high */
62
- *code_ptr++ = ((insn & 0xf0000000) | (1 << 25) | ARITH_ADD
63
- | (TCG_REG_PC << 16) | (rt << 12)
64
- | (20 << 7) | (diff >> 12));
65
- /* ldr rd, [rt, #low] */
66
- insn = deposit32(insn, 12, 4, rt);
67
- diff &= 0xfff;
68
- u = 1;
69
- }
70
- insn = deposit32(insn, 23, 1, u);
71
- insn = deposit32(insn, 0, 12, diff);
72
- *code_ptr = insn;
73
+ return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
74
} else {
75
g_assert_not_reached();
76
}
77
- return true;
78
}
35
}
79
36
80
#define TCG_CT_CONST_ARM 0x100
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
81
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
82
83
static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
84
{
38
{
85
- /* The 12-bit range on the ldr insn is sometimes a bit too small.
39
- TempOptInfo *ti = ts_info(ts);
86
- In order to get around that we require two insns, one of which
40
- return ti->is_const && ti->val == val;
87
- will usually be a nop, but may be replaced in patch_reloc. */
41
+ return ti_is_const_val(ts_info(ts), val);
88
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
89
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
90
- tcg_out_nop(s);
91
}
42
}
92
43
93
static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
44
static inline bool arg_is_const(TCGArg arg)
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
95
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
96
tcg_out_blx(s, COND_AL, TCG_REG_TMP);
97
} else {
98
- /* ??? Know that movi_pool emits exactly 2 insns. */
99
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
100
+ /* ??? Know that movi_pool emits exactly 1 insn. */
101
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
102
tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
103
}
104
}
105
--
45
--
106
2.17.1
46
2.43.0
107
108
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
There is no point in coding for a 2GB offset when the max TB size
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
is already limited to 64k. If we further restrict to 32k then we
2
Explicitly sign-extend z_mask instead of doing that manually.
3
can eliminate the extra ADDIS instruction.
4
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/ppc/tcg-target.inc.c | 28 ++++++++++------------------
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 10 insertions(+), 18 deletions(-)
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
9
10
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.inc.c
12
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.inc.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
intptr_t value, intptr_t addend)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
16
{
17
{
17
tcg_insn_unit *target;
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
18
- tcg_insn_unit old;
19
+ uint64_t s_mask_old, s_mask, z_mask;
19
20
bool type_change = false;
20
value += addend;
21
+ TempOptInfo *t1;
21
target = (tcg_insn_unit *)value;
22
22
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
23
if (fold_const1(ctx, op)) {
23
case R_PPC_REL24:
24
return true;
24
return reloc_pc24(code_ptr, target);
25
}
25
case R_PPC_ADDR16:
26
26
- /* We are abusing this relocation type. This points to a pair
27
- z_mask = arg_info(op->args[1])->z_mask;
27
- of insns, addis + load. If the displacement is small, we
28
- s_mask = arg_info(op->args[1])->s_mask;
28
- can nop out the addis. */
29
+ t1 = arg_info(op->args[1]);
29
- if (value == (int16_t)value) {
30
+ z_mask = t1->z_mask;
30
- code_ptr[0] = NOP;
31
+ s_mask = t1->s_mask;
31
- old = deposit32(code_ptr[1], 0, 16, value);
32
s_mask_old = s_mask;
32
- code_ptr[1] = deposit32(old, 16, 5, TCG_REG_TB);
33
33
- } else {
34
switch (op->opc) {
34
- int16_t lo = value;
35
CASE_OP_32_64(ext8s):
35
- int hi = value - lo;
36
- sign = INT8_MIN;
36
- if (hi + lo != value) {
37
- z_mask = (uint8_t)z_mask;
37
- return false;
38
+ s_mask |= INT8_MIN;
38
- }
39
+ z_mask = (int8_t)z_mask;
39
- code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
40
break;
40
- code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
41
CASE_OP_32_64(ext16s):
41
+ /*
42
- sign = INT16_MIN;
42
+ * We are (slightly) abusing this relocation type. In particular,
43
- z_mask = (uint16_t)z_mask;
43
+ * assert that the low 2 bits are zero, and do not modify them.
44
+ s_mask |= INT16_MIN;
44
+ * That way we can use this with LD et al that have opcode bits
45
+ z_mask = (int16_t)z_mask;
45
+ * in the low 2 bits of the insn.
46
break;
46
+ */
47
case INDEX_op_ext_i32_i64:
47
+ if ((value & 3) || value != (int16_t)value) {
48
type_change = true;
48
+ return false;
49
QEMU_FALLTHROUGH;
49
}
50
case INDEX_op_ext32s_i64:
50
+ *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
51
break;
55
break;
52
default:
56
default:
53
g_assert_not_reached();
57
g_assert_not_reached();
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
55
if (!in_prologue && USE_REG_TB) {
56
new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
57
-(intptr_t)s->code_gen_ptr);
58
- tcg_out32(s, ADDIS | TAI(ret, TCG_REG_TB, 0));
59
- tcg_out32(s, LD | TAI(ret, ret, 0));
60
+ tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
61
return;
62
}
58
}
63
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
64
--
76
--
65
2.17.1
77
2.43.0
66
67
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
1
This is part c of relocation overflow handling.
1
Stores have no output operands, and so need no further work.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/aarch64/tcg-target.inc.c | 16 ++++++++++------
6
tcg/optimize.c | 11 +++++------
6
tcg/arm/tcg-target.inc.c | 16 ++++++++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
7
tcg/i386/tcg-target.inc.c | 6 ++++--
8
tcg/mips/tcg-target.inc.c | 6 ++++--
9
tcg/ppc/tcg-target.inc.c | 14 ++++++++++----
10
tcg/riscv/tcg-target.inc.c | 16 ++++++++++++----
11
tcg/s390/tcg-target.inc.c | 20 ++++++++++++--------
12
tcg/tcg-ldst.inc.c | 18 +++++++++---------
13
tcg/tcg.c | 7 ++++---
14
9 files changed, 75 insertions(+), 44 deletions(-)
15
8
16
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/aarch64/tcg-target.inc.c
11
--- a/tcg/optimize.c
19
+++ b/tcg/aarch64/tcg-target.inc.c
12
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
21
tcg_out_insn(s, 3406, ADR, rd, offset);
22
}
23
24
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
25
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
26
{
14
{
27
TCGMemOpIdx oi = lb->oi;
15
/* Opcodes that touch guest memory stop the mb optimization. */
28
TCGMemOp opc = get_memop(oi);
16
ctx->prev_mb = NULL;
29
TCGMemOp size = opc & MO_SIZE;
17
- return false;
30
31
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
32
- tcg_debug_assert(ok);
33
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
34
+ return false;
35
+ }
36
37
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
38
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
40
}
41
42
tcg_out_goto(s, lb->raddr);
43
+ return true;
18
+ return true;
44
}
19
}
45
20
46
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
47
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
48
{
23
49
TCGMemOpIdx oi = lb->oi;
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
50
TCGMemOp opc = get_memop(oi);
25
remove_mem_copy_all(ctx);
51
TCGMemOp size = opc & MO_SIZE;
26
- return false;
52
27
+ return true;
53
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
28
}
54
- tcg_debug_assert(ok);
29
55
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
30
switch (op->opc) {
56
+ return false;
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
57
+ }
32
g_assert_not_reached();
58
33
}
59
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
60
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
35
- return false;
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
62
tcg_out_adr(s, TCG_REG_X4, lb->raddr);
63
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
64
tcg_out_goto(s, lb->raddr);
65
+ return true;
36
+ return true;
66
}
37
}
67
38
68
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
69
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
70
index XXXXXXX..XXXXXXX 100644
41
TCGType type;
71
--- a/tcg/arm/tcg-target.inc.c
42
72
+++ b/tcg/arm/tcg-target.inc.c
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
73
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
44
- fold_tcg_st(ctx, op);
74
label->label_ptr[0] = label_ptr;
45
- return false;
75
}
46
+ return fold_tcg_st(ctx, op);
76
77
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
78
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
79
{
80
TCGReg argreg, datalo, datahi;
81
TCGMemOpIdx oi = lb->oi;
82
TCGMemOp opc = get_memop(oi);
83
void *func;
84
85
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
86
- tcg_debug_assert(ok);
87
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
88
+ return false;
89
+ }
90
91
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
92
if (TARGET_LONG_BITS == 64) {
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
94
}
47
}
95
48
96
tcg_out_goto(s, COND_AL, lb->raddr);
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
97
+ return true;
55
+ return true;
98
}
56
}
99
57
100
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
101
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
102
{
103
TCGReg argreg, datalo, datahi;
104
TCGMemOpIdx oi = lb->oi;
105
TCGMemOp opc = get_memop(oi);
106
107
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
108
- tcg_debug_assert(ok);
109
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
110
+ return false;
111
+ }
112
113
argreg = TCG_REG_R0;
114
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
116
117
/* Tail-call to the helper, which will return to the fast path. */
118
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
119
+ return true;
120
}
121
#endif /* SOFTMMU */
122
123
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/i386/tcg-target.inc.c
126
+++ b/tcg/i386/tcg-target.inc.c
127
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
128
/*
129
* Generate code for the slow path for a load at the end of block
130
*/
131
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
132
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
133
{
134
TCGMemOpIdx oi = l->oi;
135
TCGMemOp opc = get_memop(oi);
136
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
137
138
/* Jump to the code corresponding to next IR of qemu_st */
139
tcg_out_jmp(s, l->raddr);
140
+ return true;
141
}
142
143
/*
144
* Generate code for the slow path for a store at the end of block
145
*/
146
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
147
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
148
{
149
TCGMemOpIdx oi = l->oi;
150
TCGMemOp opc = get_memop(oi);
151
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
152
/* "Tail call" to the helper, with the return address back inline. */
153
tcg_out_push(s, retaddr);
154
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
155
+ return true;
156
}
157
#elif TCG_TARGET_REG_BITS == 32
158
# define x86_guest_base_seg 0
159
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/mips/tcg-target.inc.c
162
+++ b/tcg/mips/tcg-target.inc.c
163
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
164
}
165
}
166
167
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
168
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
169
{
170
TCGMemOpIdx oi = l->oi;
171
TCGMemOp opc = get_memop(oi);
172
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
173
} else {
174
tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
175
}
176
+ return true;
177
}
178
179
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
180
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
181
{
182
TCGMemOpIdx oi = l->oi;
183
TCGMemOp opc = get_memop(oi);
184
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
185
tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
186
/* delay slot */
187
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
188
+ return true;
189
}
190
#endif
191
192
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/tcg/ppc/tcg-target.inc.c
195
+++ b/tcg/ppc/tcg-target.inc.c
196
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
197
label->label_ptr[0] = lptr;
198
}
199
200
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
201
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
202
{
203
TCGMemOpIdx oi = lb->oi;
204
TCGMemOp opc = get_memop(oi);
205
TCGReg hi, lo, arg = TCG_REG_R3;
206
207
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
208
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
209
+ return false;
210
+ }
211
212
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
213
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
215
}
216
217
tcg_out_b(s, 0, lb->raddr);
218
+ return true;
219
}
220
221
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
222
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
223
{
224
TCGMemOpIdx oi = lb->oi;
225
TCGMemOp opc = get_memop(oi);
226
TCGMemOp s_bits = opc & MO_SIZE;
227
TCGReg hi, lo, arg = TCG_REG_R3;
228
229
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
230
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
231
+ return false;
232
+ }
233
234
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
235
236
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
237
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
238
239
tcg_out_b(s, 0, lb->raddr);
240
+ return true;
241
}
242
#endif /* SOFTMMU */
243
244
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
245
index XXXXXXX..XXXXXXX 100644
246
--- a/tcg/riscv/tcg-target.inc.c
247
+++ b/tcg/riscv/tcg-target.inc.c
248
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
249
label->label_ptr[0] = label_ptr[0];
250
}
251
252
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
253
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
254
{
255
TCGMemOpIdx oi = l->oi;
256
TCGMemOp opc = get_memop(oi);
257
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
258
}
259
260
/* resolve label address */
261
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
262
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
263
+ (intptr_t) s->code_ptr, 0)) {
264
+ return false;
265
+ }
266
267
/* call load helper */
268
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
270
tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
271
272
tcg_out_goto(s, l->raddr);
273
+ return true;
274
}
275
276
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
277
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
278
{
279
TCGMemOpIdx oi = l->oi;
280
TCGMemOp opc = get_memop(oi);
281
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
282
}
283
284
/* resolve label address */
285
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
286
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
287
+ (intptr_t) s->code_ptr, 0)) {
288
+ return false;
289
+ }
290
291
/* call store helper */
292
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
293
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
294
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
295
296
tcg_out_goto(s, l->raddr);
297
+ return true;
298
}
299
#endif /* CONFIG_SOFTMMU */
300
301
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
302
index XXXXXXX..XXXXXXX 100644
303
--- a/tcg/s390/tcg-target.inc.c
304
+++ b/tcg/s390/tcg-target.inc.c
305
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
306
label->label_ptr[0] = label_ptr;
307
}
308
309
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
310
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
311
{
312
TCGReg addr_reg = lb->addrlo_reg;
313
TCGReg data_reg = lb->datalo_reg;
314
TCGMemOpIdx oi = lb->oi;
315
TCGMemOp opc = get_memop(oi);
316
317
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
318
- (intptr_t)s->code_ptr, 2);
319
- tcg_debug_assert(ok);
320
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
321
+ (intptr_t)s->code_ptr, 2)) {
322
+ return false;
323
+ }
324
325
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
326
if (TARGET_LONG_BITS == 64) {
327
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
328
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
329
330
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
331
+ return true;
332
}
333
334
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
335
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
336
{
337
TCGReg addr_reg = lb->addrlo_reg;
338
TCGReg data_reg = lb->datalo_reg;
339
TCGMemOpIdx oi = lb->oi;
340
TCGMemOp opc = get_memop(oi);
341
342
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
343
- (intptr_t)s->code_ptr, 2);
344
- tcg_debug_assert(ok);
345
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
346
+ (intptr_t)s->code_ptr, 2)) {
347
+ return false;
348
+ }
349
350
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
351
if (TARGET_LONG_BITS == 64) {
352
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
353
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
354
355
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
356
+ return true;
357
}
358
#else
359
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
360
diff --git a/tcg/tcg-ldst.inc.c b/tcg/tcg-ldst.inc.c
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/tcg-ldst.inc.c
363
+++ b/tcg/tcg-ldst.inc.c
364
@@ -XXX,XX +XXX,XX @@ typedef struct TCGLabelQemuLdst {
365
* Generate TB finalization at the end of block
366
*/
367
368
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
369
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
370
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
371
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
372
373
-static bool tcg_out_ldst_finalize(TCGContext *s)
374
+static int tcg_out_ldst_finalize(TCGContext *s)
375
{
376
TCGLabelQemuLdst *lb;
377
378
/* qemu_ld/st slow paths */
379
QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
380
- if (lb->is_ld) {
381
- tcg_out_qemu_ld_slow_path(s, lb);
382
- } else {
383
- tcg_out_qemu_st_slow_path(s, lb);
384
+ if (lb->is_ld
385
+ ? !tcg_out_qemu_ld_slow_path(s, lb)
386
+ : !tcg_out_qemu_st_slow_path(s, lb)) {
387
+ return -2;
388
}
389
390
/* Test for (pending) buffer overflow. The assumption is that any
391
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_ldst_finalize(TCGContext *s)
392
the buffer completely. Thus we can test for overflow after
393
generating code without having to check during generation. */
394
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
395
- return false;
396
+ return -1;
397
}
398
}
399
- return true;
400
+ return 0;
401
}
402
403
/*
404
diff --git a/tcg/tcg.c b/tcg/tcg.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/tcg/tcg.c
407
+++ b/tcg/tcg.c
408
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
409
static int tcg_target_const_match(tcg_target_long val, TCGType type,
410
const TCGArgConstraint *arg_ct);
411
#ifdef TCG_TARGET_NEED_LDST_LABELS
412
-static bool tcg_out_ldst_finalize(TCGContext *s);
413
+static int tcg_out_ldst_finalize(TCGContext *s);
414
#endif
415
416
#define TCG_HIGHWATER 1024
417
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
418
419
/* Generate TB finalization at the end of block */
420
#ifdef TCG_TARGET_NEED_LDST_LABELS
421
- if (!tcg_out_ldst_finalize(s)) {
422
- return -1;
423
+ i = tcg_out_ldst_finalize(s);
424
+ if (i < 0) {
425
+ return i;
426
}
427
#endif
428
#ifdef TCG_TARGET_NEED_POOL_LABELS
429
--
59
--
430
2.17.1
60
2.43.0
431
432
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
7
---
3
tcg/i386/tcg-target.h | 4 ++--
8
tcg/optimize.c | 5 ++---
4
tcg/i386/tcg-target.inc.c | 11 +++++++++++
9
1 file changed, 2 insertions(+), 3 deletions(-)
5
2 files changed, 13 insertions(+), 2 deletions(-)
6
10
7
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/i386/tcg-target.h
13
--- a/tcg/optimize.c
10
+++ b/tcg/i386/tcg-target.h
14
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
12
#define TCG_TARGET_HAS_deposit_i32 1
16
13
#define TCG_TARGET_HAS_extract_i32 1
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
14
#define TCG_TARGET_HAS_sextract_i32 1
18
{
15
-#define TCG_TARGET_HAS_extract2_i32 0
19
- uint64_t s_mask, z_mask, sign;
16
+#define TCG_TARGET_HAS_extract2_i32 1
20
+ uint64_t s_mask, z_mask;
17
#define TCG_TARGET_HAS_movcond_i32 1
21
TempOptInfo *t1, *t2;
18
#define TCG_TARGET_HAS_add2_i32 1
22
19
#define TCG_TARGET_HAS_sub2_i32 1
23
if (fold_const2(ctx, op) ||
20
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
21
#define TCG_TARGET_HAS_deposit_i64 1
25
* If the sign bit is known zero, then logical right shift
22
#define TCG_TARGET_HAS_extract_i64 1
26
* will not reduce the number of input sign repetitions.
23
#define TCG_TARGET_HAS_sextract_i64 0
27
*/
24
-#define TCG_TARGET_HAS_extract2_i64 0
28
- sign = -s_mask;
25
+#define TCG_TARGET_HAS_extract2_i64 1
29
- if (sign && !(z_mask & sign)) {
26
#define TCG_TARGET_HAS_movcond_i64 1
30
+ if (~z_mask & -s_mask) {
27
#define TCG_TARGET_HAS_add2_i64 1
31
return fold_masks_s(ctx, op, s_mask);
28
#define TCG_TARGET_HAS_sub2_i64 1
29
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/i386/tcg-target.inc.c
32
+++ b/tcg/i386/tcg-target.inc.c
33
@@ -XXX,XX +XXX,XX @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
34
#define OPC_SHUFPS (0xc6 | P_EXT)
35
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
36
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
37
+#define OPC_SHRD_Ib (0xac | P_EXT)
38
#define OPC_TESTL    (0x85)
39
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
40
#define OPC_UD2 (0x0b | P_EXT)
41
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
42
}
32
}
43
break;
33
break;
44
45
+ OP_32_64(extract2):
46
+ /* Note that SHRD outputs to the r/m operand. */
47
+ tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
48
+ tcg_out8(s, args[3]);
49
+ break;
50
+
51
case INDEX_op_mb:
52
tcg_out_mb(s, a0);
53
break;
54
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
55
static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
56
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
57
static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
58
+ static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
59
static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
60
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
61
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
62
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
63
case INDEX_op_ctpop_i32:
64
case INDEX_op_ctpop_i64:
65
return &r_r;
66
+ case INDEX_op_extract2_i32:
67
+ case INDEX_op_extract2_i64:
68
+ return &r_0_r;
69
70
case INDEX_op_deposit_i32:
71
case INDEX_op_deposit_i64:
72
--
34
--
73
2.17.1
35
2.43.0
74
75
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
type = ctx->type;
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 18 ++++++++----------
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
15
return fold_masks_zs(ctx, op, -1, s_mask);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
1
This will let backends implement the double-word shift operation.
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
2
3
3
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/aarch64/tcg-target.h | 2 ++
7
tcg/optimize.c | 6 ++----
7
tcg/arm/tcg-target.h | 1 +
8
1 file changed, 2 insertions(+), 4 deletions(-)
8
tcg/i386/tcg-target.h | 2 ++
9
tcg/mips/tcg-target.h | 2 ++
10
tcg/ppc/tcg-target.h | 2 ++
11
tcg/riscv/tcg-target.h | 2 ++
12
tcg/s390/tcg-target.h | 2 ++
13
tcg/sparc/tcg-target.h | 2 ++
14
tcg/tcg-opc.h | 2 ++
15
tcg/tcg.h | 1 +
16
tcg/tci/tcg-target.h | 2 ++
17
tcg/optimize.c | 16 ++++++++++++++++
18
tcg/tcg-op.c | 4 ++++
19
tcg/tcg.c | 4 ++++
20
tcg/README | 7 +++++++
21
15 files changed, 51 insertions(+)
22
9
23
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/aarch64/tcg-target.h
26
+++ b/tcg/aarch64/tcg-target.h
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
28
#define TCG_TARGET_HAS_deposit_i32 1
29
#define TCG_TARGET_HAS_extract_i32 1
30
#define TCG_TARGET_HAS_sextract_i32 1
31
+#define TCG_TARGET_HAS_extract2_i32 0
32
#define TCG_TARGET_HAS_movcond_i32 1
33
#define TCG_TARGET_HAS_add2_i32 1
34
#define TCG_TARGET_HAS_sub2_i32 1
35
@@ -XXX,XX +XXX,XX @@ typedef enum {
36
#define TCG_TARGET_HAS_deposit_i64 1
37
#define TCG_TARGET_HAS_extract_i64 1
38
#define TCG_TARGET_HAS_sextract_i64 1
39
+#define TCG_TARGET_HAS_extract2_i64 0
40
#define TCG_TARGET_HAS_movcond_i64 1
41
#define TCG_TARGET_HAS_add2_i64 1
42
#define TCG_TARGET_HAS_sub2_i64 1
43
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/arm/tcg-target.h
46
+++ b/tcg/arm/tcg-target.h
47
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
48
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
49
#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
50
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
51
+#define TCG_TARGET_HAS_extract2_i32 0
52
#define TCG_TARGET_HAS_movcond_i32 1
53
#define TCG_TARGET_HAS_mulu2_i32 1
54
#define TCG_TARGET_HAS_muls2_i32 1
55
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/i386/tcg-target.h
58
+++ b/tcg/i386/tcg-target.h
59
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
60
#define TCG_TARGET_HAS_deposit_i32 1
61
#define TCG_TARGET_HAS_extract_i32 1
62
#define TCG_TARGET_HAS_sextract_i32 1
63
+#define TCG_TARGET_HAS_extract2_i32 0
64
#define TCG_TARGET_HAS_movcond_i32 1
65
#define TCG_TARGET_HAS_add2_i32 1
66
#define TCG_TARGET_HAS_sub2_i32 1
67
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
68
#define TCG_TARGET_HAS_deposit_i64 1
69
#define TCG_TARGET_HAS_extract_i64 1
70
#define TCG_TARGET_HAS_sextract_i64 0
71
+#define TCG_TARGET_HAS_extract2_i64 0
72
#define TCG_TARGET_HAS_movcond_i64 1
73
#define TCG_TARGET_HAS_add2_i64 1
74
#define TCG_TARGET_HAS_sub2_i64 1
75
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/mips/tcg-target.h
78
+++ b/tcg/mips/tcg-target.h
79
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
80
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
81
#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
82
#define TCG_TARGET_HAS_sextract_i32 0
83
+#define TCG_TARGET_HAS_extract2_i32 0
84
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
85
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
86
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
87
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
88
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
89
#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
90
#define TCG_TARGET_HAS_sextract_i64 0
91
+#define TCG_TARGET_HAS_extract2_i64 0
92
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
93
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
94
#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
95
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/ppc/tcg-target.h
98
+++ b/tcg/ppc/tcg-target.h
99
@@ -XXX,XX +XXX,XX @@ extern bool have_isa_3_00;
100
#define TCG_TARGET_HAS_deposit_i32 1
101
#define TCG_TARGET_HAS_extract_i32 1
102
#define TCG_TARGET_HAS_sextract_i32 0
103
+#define TCG_TARGET_HAS_extract2_i32 0
104
#define TCG_TARGET_HAS_movcond_i32 1
105
#define TCG_TARGET_HAS_mulu2_i32 0
106
#define TCG_TARGET_HAS_muls2_i32 0
107
@@ -XXX,XX +XXX,XX @@ extern bool have_isa_3_00;
108
#define TCG_TARGET_HAS_deposit_i64 1
109
#define TCG_TARGET_HAS_extract_i64 1
110
#define TCG_TARGET_HAS_sextract_i64 0
111
+#define TCG_TARGET_HAS_extract2_i64 0
112
#define TCG_TARGET_HAS_movcond_i64 1
113
#define TCG_TARGET_HAS_add2_i64 1
114
#define TCG_TARGET_HAS_sub2_i64 1
115
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/riscv/tcg-target.h
118
+++ b/tcg/riscv/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ typedef enum {
120
#define TCG_TARGET_HAS_deposit_i32 0
121
#define TCG_TARGET_HAS_extract_i32 0
122
#define TCG_TARGET_HAS_sextract_i32 0
123
+#define TCG_TARGET_HAS_extract2_i32 0
124
#define TCG_TARGET_HAS_add2_i32 1
125
#define TCG_TARGET_HAS_sub2_i32 1
126
#define TCG_TARGET_HAS_mulu2_i32 0
127
@@ -XXX,XX +XXX,XX @@ typedef enum {
128
#define TCG_TARGET_HAS_deposit_i64 0
129
#define TCG_TARGET_HAS_extract_i64 0
130
#define TCG_TARGET_HAS_sextract_i64 0
131
+#define TCG_TARGET_HAS_extract2_i64 0
132
#define TCG_TARGET_HAS_extrl_i64_i32 1
133
#define TCG_TARGET_HAS_extrh_i64_i32 1
134
#define TCG_TARGET_HAS_ext8s_i64 1
135
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/s390/tcg-target.h
138
+++ b/tcg/s390/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
140
#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
141
#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
142
#define TCG_TARGET_HAS_sextract_i32 0
143
+#define TCG_TARGET_HAS_extract2_i32 0
144
#define TCG_TARGET_HAS_movcond_i32 1
145
#define TCG_TARGET_HAS_add2_i32 1
146
#define TCG_TARGET_HAS_sub2_i32 1
147
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
148
#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
149
#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
150
#define TCG_TARGET_HAS_sextract_i64 0
151
+#define TCG_TARGET_HAS_extract2_i64 0
152
#define TCG_TARGET_HAS_movcond_i64 1
153
#define TCG_TARGET_HAS_add2_i64 1
154
#define TCG_TARGET_HAS_sub2_i64 1
155
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/sparc/tcg-target.h
158
+++ b/tcg/sparc/tcg-target.h
159
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
160
#define TCG_TARGET_HAS_deposit_i32 0
161
#define TCG_TARGET_HAS_extract_i32 0
162
#define TCG_TARGET_HAS_sextract_i32 0
163
+#define TCG_TARGET_HAS_extract2_i32 0
164
#define TCG_TARGET_HAS_movcond_i32 1
165
#define TCG_TARGET_HAS_add2_i32 1
166
#define TCG_TARGET_HAS_sub2_i32 1
167
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
168
#define TCG_TARGET_HAS_deposit_i64 0
169
#define TCG_TARGET_HAS_extract_i64 0
170
#define TCG_TARGET_HAS_sextract_i64 0
171
+#define TCG_TARGET_HAS_extract2_i64 0
172
#define TCG_TARGET_HAS_movcond_i64 1
173
#define TCG_TARGET_HAS_add2_i64 1
174
#define TCG_TARGET_HAS_sub2_i64 1
175
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
176
index XXXXXXX..XXXXXXX 100644
177
--- a/tcg/tcg-opc.h
178
+++ b/tcg/tcg-opc.h
179
@@ -XXX,XX +XXX,XX @@ DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
180
DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
181
DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
182
DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
183
+DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
184
185
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END)
186
187
@@ -XXX,XX +XXX,XX @@ DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
188
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
189
DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
190
DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
191
+DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
192
193
/* size changing ops */
194
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
195
diff --git a/tcg/tcg.h b/tcg/tcg.h
196
index XXXXXXX..XXXXXXX 100644
197
--- a/tcg/tcg.h
198
+++ b/tcg/tcg.h
199
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
200
#define TCG_TARGET_HAS_deposit_i64 0
201
#define TCG_TARGET_HAS_extract_i64 0
202
#define TCG_TARGET_HAS_sextract_i64 0
203
+#define TCG_TARGET_HAS_extract2_i64 0
204
#define TCG_TARGET_HAS_movcond_i64 0
205
#define TCG_TARGET_HAS_add2_i64 0
206
#define TCG_TARGET_HAS_sub2_i64 0
207
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
208
index XXXXXXX..XXXXXXX 100644
209
--- a/tcg/tci/tcg-target.h
210
+++ b/tcg/tci/tcg-target.h
211
@@ -XXX,XX +XXX,XX @@
212
#define TCG_TARGET_HAS_deposit_i32 1
213
#define TCG_TARGET_HAS_extract_i32 0
214
#define TCG_TARGET_HAS_sextract_i32 0
215
+#define TCG_TARGET_HAS_extract2_i32 0
216
#define TCG_TARGET_HAS_eqv_i32 0
217
#define TCG_TARGET_HAS_nand_i32 0
218
#define TCG_TARGET_HAS_nor_i32 0
219
@@ -XXX,XX +XXX,XX @@
220
#define TCG_TARGET_HAS_deposit_i64 1
221
#define TCG_TARGET_HAS_extract_i64 0
222
#define TCG_TARGET_HAS_sextract_i64 0
223
+#define TCG_TARGET_HAS_extract2_i64 0
224
#define TCG_TARGET_HAS_div_i64 0
225
#define TCG_TARGET_HAS_rem_i64 0
226
#define TCG_TARGET_HAS_ext8s_i64 1
227
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
228
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
229
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
230
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
}
15
done = true;
233
goto do_default;
16
break;
234
17
default:
235
+ CASE_OP_32_64(extract2):
18
+ done = finish_folding(&ctx, op);
236
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
19
break;
237
+ TCGArg v1 = arg_info(op->args[1])->val;
20
}
238
+ TCGArg v2 = arg_info(op->args[2])->val;
21
-
239
+
22
- if (!done) {
240
+ if (opc == INDEX_op_extract2_i64) {
23
- finish_folding(&ctx, op);
241
+ tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
24
- }
242
+ } else {
25
+ tcg_debug_assert(done);
243
+ tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3]));
26
}
244
+ tmp = (int32_t)tmp;
27
}
245
+ }
246
+ tcg_opt_gen_movi(s, op, op->args[0], tmp);
247
+ break;
248
+ }
249
+ goto do_default;
250
+
251
CASE_OP_32_64(setcond):
252
tmp = do_constant_folding_cond(opc, op->args[1],
253
op->args[2], op->args[3]);
254
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tcg/tcg-op.c
257
+++ b/tcg/tcg-op.c
258
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
259
tcg_gen_mov_i32(ret, ah);
260
} else if (al == ah) {
261
tcg_gen_rotri_i32(ret, al, ofs);
262
+ } else if (TCG_TARGET_HAS_extract2_i32) {
263
+ tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
264
} else {
265
TCGv_i32 t0 = tcg_temp_new_i32();
266
tcg_gen_shri_i32(t0, al, ofs);
267
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
268
tcg_gen_mov_i64(ret, ah);
269
} else if (al == ah) {
270
tcg_gen_rotri_i64(ret, al, ofs);
271
+ } else if (TCG_TARGET_HAS_extract2_i64) {
272
+ tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
273
} else {
274
TCGv_i64 t0 = tcg_temp_new_i64();
275
tcg_gen_shri_i64(t0, al, ofs);
276
diff --git a/tcg/tcg.c b/tcg/tcg.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tcg/tcg.c
279
+++ b/tcg/tcg.c
280
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
281
return TCG_TARGET_HAS_extract_i32;
282
case INDEX_op_sextract_i32:
283
return TCG_TARGET_HAS_sextract_i32;
284
+ case INDEX_op_extract2_i32:
285
+ return TCG_TARGET_HAS_extract2_i32;
286
case INDEX_op_add2_i32:
287
return TCG_TARGET_HAS_add2_i32;
288
case INDEX_op_sub2_i32:
289
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
290
return TCG_TARGET_HAS_extract_i64;
291
case INDEX_op_sextract_i64:
292
return TCG_TARGET_HAS_sextract_i64;
293
+ case INDEX_op_extract2_i64:
294
+ return TCG_TARGET_HAS_extract2_i64;
295
case INDEX_op_extrl_i64_i32:
296
return TCG_TARGET_HAS_extrl_i64_i32;
297
case INDEX_op_extrh_i64_i32:
298
diff --git a/tcg/README b/tcg/README
299
index XXXXXXX..XXXXXXX 100644
300
--- a/tcg/README
301
+++ b/tcg/README
302
@@ -XXX,XX +XXX,XX @@ at bit 8. This operation would be equivalent to
303
304
(using an arithmetic right shift).
305
306
+* extract2_i32/i64 dest, t1, t2, pos
307
+
308
+For N = {32,64}, extract an N-bit quantity from the concatenation
309
+of t2:t1, beginning at pos. The tcg_gen_extract2_{i32,i64} expander
310
+accepts 0 <= pos <= N as inputs. The backend code generator will
311
+not see either 0 or N as inputs for these opcodes.
312
+
313
* extrl_i64_i32 t0, t1
314
315
For 64-bit hosts only, extract the low 32-bits of input T1 and place it
316
--
28
--
317
2.17.1
29
2.43.0
318
319
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
This is part b of relocation overflow handling.
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
2
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
tcg/tcg-pool.inc.c | 12 +++++++-----
7
tcg/optimize.c | 4 ++--
6
tcg/tcg.c | 9 +++++----
8
1 file changed, 2 insertions(+), 2 deletions(-)
7
2 files changed, 12 insertions(+), 9 deletions(-)
8
9
9
diff --git a/tcg/tcg-pool.inc.c b/tcg/tcg-pool.inc.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tcg-pool.inc.c
12
--- a/tcg/optimize.c
12
+++ b/tcg/tcg-pool.inc.c
13
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
14
/* To be provided by cpu/tcg-target.inc.c. */
15
g_assert_not_reached();
15
static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
16
17
-static bool tcg_out_pool_finalize(TCGContext *s)
18
+static int tcg_out_pool_finalize(TCGContext *s)
19
{
20
TCGLabelPoolData *p = s->pool_labels;
21
TCGLabelPoolData *l = NULL;
22
void *a;
23
24
if (p == NULL) {
25
- return true;
26
+ return 0;
27
}
16
}
28
17
29
/* ??? Round up to qemu_icache_linesize, but then do not round
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_pool_finalize(TCGContext *s)
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
31
size_t size = sizeof(tcg_target_ulong) * p->nlong;
20
return true;
32
if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
33
if (unlikely(a > s->code_gen_highwater)) {
34
- return false;
35
+ return -1;
36
}
37
memcpy(a, p->data, size);
38
a += size;
39
l = p;
40
}
41
- patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend);
42
+ if (!patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend)) {
43
+ return -2;
44
+ }
45
}
21
}
46
22
47
s->code_ptr = a;
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
48
- return true;
24
s_mask = s_mask_old >> pos;
49
+ return 0;
25
s_mask |= -1ull << (len - 1);
50
}
26
51
diff --git a/tcg/tcg.c b/tcg/tcg.c
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
52
index XXXXXXX..XXXXXXX 100644
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
53
--- a/tcg/tcg.c
29
return true;
54
+++ b/tcg/tcg.c
55
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
56
#ifdef TCG_TARGET_NEED_POOL_LABELS
57
/* Allow the prologue to put e.g. guest_base into a pool entry. */
58
{
59
- bool ok = tcg_out_pool_finalize(s);
60
- tcg_debug_assert(ok);
61
+ int result = tcg_out_pool_finalize(s);
62
+ tcg_debug_assert(result == 0);
63
}
30
}
64
#endif
31
65
66
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
67
}
68
#endif
69
#ifdef TCG_TARGET_NEED_POOL_LABELS
70
- if (!tcg_out_pool_finalize(s)) {
71
- return -1;
72
+ i = tcg_out_pool_finalize(s);
73
+ if (i < 0) {
74
+ return i;
75
}
76
#endif
77
if (!tcg_resolve_relocs(s)) {
78
--
32
--
79
2.17.1
33
2.43.0
80
81
diff view generated by jsdifflib
1
If the TB generates too much code, such that backend relocations
1
The big comment just above says functions should be sorted.
2
overflow, try again with a smaller TB. In support of this, move
2
Add forward declarations as needed.
3
relocation processing from a random place within tcg_out_op, in
4
the handling of branch opcodes, to a new function at the end of
5
tcg_gen_code.
6
3
7
This is not a complete solution, as there are additional relocs
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
generated for out-of-line ldst handling and constant pools.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
tcg/tcg.h | 15 +++++++-------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
13
tcg/tcg.c | 61 ++++++++++++++++++++++++++-----------------------------
8
1 file changed, 59 insertions(+), 55 deletions(-)
14
2 files changed, 36 insertions(+), 40 deletions(-)
15
9
16
diff --git a/tcg/tcg.h b/tcg/tcg.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.h
12
--- a/tcg/optimize.c
19
+++ b/tcg/tcg.h
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ typedef uint64_t tcg_insn_unit;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
do { if (!(X)) { __builtin_unreachable(); } } while (0)
15
* 3) those that produce information about the result value.
22
#endif
16
*/
23
17
24
-typedef struct TCGRelocation {
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
25
- struct TCGRelocation *next;
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
26
- int type;
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
27
+typedef struct TCGRelocation TCGRelocation;
21
+
28
+struct TCGRelocation {
22
static bool fold_add(OptContext *ctx, TCGOp *op)
29
+ QSIMPLEQ_ENTRY(TCGRelocation) next;
30
tcg_insn_unit *ptr;
31
intptr_t addend;
32
-} TCGRelocation;
33
+ int type;
34
+};
35
36
typedef struct TCGLabel TCGLabel;
37
struct TCGLabel {
38
@@ -XXX,XX +XXX,XX @@ struct TCGLabel {
39
union {
40
uintptr_t value;
41
tcg_insn_unit *value_ptr;
42
- TCGRelocation *first_reloc;
43
} u;
44
-#ifdef CONFIG_DEBUG_TCG
45
+ QSIMPLEQ_HEAD(, TCGRelocation) relocs;
46
QSIMPLEQ_ENTRY(TCGLabel) next;
47
-#endif
48
};
49
50
typedef struct TCGPool {
51
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
52
#endif
53
54
#ifdef CONFIG_DEBUG_TCG
55
- QSIMPLEQ_HEAD(, TCGLabel) labels;
56
int temps_in_use;
57
int goto_tb_issue_mask;
58
#endif
59
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
60
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
61
62
QTAILQ_HEAD(, TCGOp) ops, free_ops;
63
+ QSIMPLEQ_HEAD(, TCGLabel) labels;
64
65
/* Tells which temporary holds a given register.
66
It does not take into account fixed registers */
67
diff --git a/tcg/tcg.c b/tcg/tcg.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/tcg.c
70
+++ b/tcg/tcg.c
71
@@ -XXX,XX +XXX,XX @@ static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
72
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
73
TCGLabel *l, intptr_t addend)
74
{
23
{
75
- TCGRelocation *r;
24
if (fold_const2_commutative(ctx, op) ||
76
+ TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
77
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
78
- if (l->has_value) {
79
- /* FIXME: This may break relocations on RISC targets that
80
- modify instruction fields in place. The caller may not have
81
- written the initial value. */
82
- bool ok = patch_reloc(code_ptr, type, l->u.value, addend);
83
- tcg_debug_assert(ok);
84
- } else {
85
- /* add a new relocation entry */
86
- r = tcg_malloc(sizeof(TCGRelocation));
87
- r->type = type;
88
- r->ptr = code_ptr;
89
- r->addend = addend;
90
- r->next = l->u.first_reloc;
91
- l->u.first_reloc = r;
92
- }
93
+ r->type = type;
94
+ r->ptr = code_ptr;
95
+ r->addend = addend;
96
+ QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
97
}
27
}
98
28
99
static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
100
{
30
+{
101
- intptr_t value = (intptr_t)ptr;
31
+ /* If true and false values are the same, eliminate the cmp. */
102
- TCGRelocation *r;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
103
-
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
104
tcg_debug_assert(!l->has_value);
34
+ }
105
-
106
- for (r = l->u.first_reloc; r != NULL; r = r->next) {
107
- bool ok = patch_reloc(r->ptr, r->type, value, r->addend);
108
- tcg_debug_assert(ok);
109
- }
110
-
111
l->has_value = 1;
112
l->u.value_ptr = ptr;
113
}
114
@@ -XXX,XX +XXX,XX @@ TCGLabel *gen_new_label(void)
115
TCGContext *s = tcg_ctx;
116
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
117
118
- *l = (TCGLabel){
119
- .id = s->nb_labels++
120
- };
121
-#ifdef CONFIG_DEBUG_TCG
122
+ memset(l, 0, sizeof(TCGLabel));
123
+ l->id = s->nb_labels++;
124
+ QSIMPLEQ_INIT(&l->relocs);
125
+
35
+
126
QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
127
-#endif
37
+ uint64_t tv = arg_info(op->args[2])->val;
128
38
+ uint64_t fv = arg_info(op->args[3])->val;
129
return l;
130
}
131
132
+static bool tcg_resolve_relocs(TCGContext *s)
133
+{
134
+ TCGLabel *l;
135
+
39
+
136
+ QSIMPLEQ_FOREACH(l, &s->labels, next) {
40
+ if (tv == -1 && fv == 0) {
137
+ TCGRelocation *r;
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
138
+ uintptr_t value = l->u.value;
42
+ }
139
+
43
+ if (tv == 0 && fv == -1) {
140
+ QSIMPLEQ_FOREACH(r, &l->relocs, next) {
44
+ if (TCG_TARGET_HAS_not_vec) {
141
+ if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
45
+ op->opc = INDEX_op_not_vec;
142
+ return false;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
143
+ }
51
+ }
144
+ }
52
+ }
145
+ }
53
+ }
146
+ return true;
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
147
+}
82
+}
148
+
83
+
149
static void set_jmp_reset_offset(TCGContext *s, int which)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
150
{
85
{
151
size_t off = tcg_current_code_size(s);
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
152
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
153
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
154
QTAILQ_INIT(&s->ops);
155
QTAILQ_INIT(&s->free_ops);
156
-#ifdef CONFIG_DEBUG_TCG
157
QSIMPLEQ_INIT(&s->labels);
158
-#endif
159
}
89
}
160
90
161
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
162
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
92
-{
163
return -1;
93
- /* If true and false values are the same, eliminate the cmp. */
164
}
94
- if (args_are_copies(op->args[2], op->args[3])) {
165
#endif
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
166
+ if (!tcg_resolve_relocs(s)) {
96
- }
167
+ return -2;
97
-
168
+ }
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
169
99
- uint64_t tv = arg_info(op->args[2])->val;
170
/* flush instruction cache */
100
- uint64_t fv = arg_info(op->args[3])->val;
171
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
172
--
149
--
173
2.17.1
150
2.43.0
174
175
diff view generated by jsdifflib
1
From: David Hildenbrand <david@redhat.com>
1
The big comment just above says functions should be sorted.
2
2
3
Will be helpful for s390x. Input 128 bit and output 64 bit only,
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
which is sufficient for now.
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: David Hildenbrand <david@redhat.com>
9
Message-Id: <20190225154204.26751-1-david@redhat.com>
10
[rth: Add matching tcg_gen_extract2_i32.]
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
tcg/tcg-op.h | 6 ++++++
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
14
tcg/tcg-op.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
7
1 file changed, 30 insertions(+), 30 deletions(-)
15
2 files changed, 50 insertions(+)
16
8
17
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg-op.h
11
--- a/tcg/optimize.c
20
+++ b/tcg/tcg-op.h
12
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
22
unsigned int ofs, unsigned int len);
14
return true;
23
void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
24
unsigned int ofs, unsigned int len);
25
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
26
+ unsigned int ofs);
27
void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
28
void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
29
void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
30
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
31
unsigned int ofs, unsigned int len);
32
void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
33
unsigned int ofs, unsigned int len);
34
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
35
+ unsigned int ofs);
36
void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
37
void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
38
void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
39
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
40
#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64
41
#define tcg_gen_extract_tl tcg_gen_extract_i64
42
#define tcg_gen_sextract_tl tcg_gen_sextract_i64
43
+#define tcg_gen_extract2_tl tcg_gen_extract2_i64
44
#define tcg_const_tl tcg_const_i64
45
#define tcg_const_local_tl tcg_const_local_i64
46
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
47
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
48
#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32
49
#define tcg_gen_extract_tl tcg_gen_extract_i32
50
#define tcg_gen_sextract_tl tcg_gen_sextract_i32
51
+#define tcg_gen_extract2_tl tcg_gen_extract2_i32
52
#define tcg_const_tl tcg_const_i32
53
#define tcg_const_local_tl tcg_const_local_i32
54
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
55
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/tcg-op.c
58
+++ b/tcg/tcg-op.c
59
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
60
tcg_gen_sari_i32(ret, ret, 32 - len);
61
}
15
}
62
16
63
+/*
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
64
+ * Extract 32-bits from a 64-bit input, ah:al, starting from ofs.
65
+ * Unlike tcg_gen_extract_i32 above, len is fixed at 32.
66
+ */
67
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
68
+ unsigned int ofs)
69
+{
18
+{
70
+ tcg_debug_assert(ofs <= 32);
19
+ /* Canonicalize the comparison to put immediate second. */
71
+ if (ofs == 0) {
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
+ tcg_gen_mov_i32(ret, al);
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
73
+ } else if (ofs == 32) {
74
+ tcg_gen_mov_i32(ret, ah);
75
+ } else if (al == ah) {
76
+ tcg_gen_rotri_i32(ret, al, ofs);
77
+ } else {
78
+ TCGv_i32 t0 = tcg_temp_new_i32();
79
+ tcg_gen_shri_i32(t0, al, ofs);
80
+ tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs);
81
+ tcg_temp_free_i32(t0);
82
+ }
22
+ }
23
+ return finish_folding(ctx, op);
83
+}
24
+}
84
+
25
+
85
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
86
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
87
{
88
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
89
tcg_gen_sari_i64(ret, ret, 64 - len);
90
}
91
92
+/*
93
+ * Extract 64 bits from a 128-bit input, ah:al, starting from ofs.
94
+ * Unlike tcg_gen_extract_i64 above, len is fixed at 64.
95
+ */
96
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
97
+ unsigned int ofs)
98
+{
27
+{
99
+ tcg_debug_assert(ofs <= 64);
28
+ /* If true and false values are the same, eliminate the cmp. */
100
+ if (ofs == 0) {
29
+ if (args_are_copies(op->args[3], op->args[4])) {
101
+ tcg_gen_mov_i64(ret, al);
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
102
+ } else if (ofs == 64) {
103
+ tcg_gen_mov_i64(ret, ah);
104
+ } else if (al == ah) {
105
+ tcg_gen_rotri_i64(ret, al, ofs);
106
+ } else {
107
+ TCGv_i64 t0 = tcg_temp_new_i64();
108
+ tcg_gen_shri_i64(t0, al, ofs);
109
+ tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs);
110
+ tcg_temp_free_i64(t0);
111
+ }
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
112
+}
45
+}
113
+
46
+
114
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
115
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
116
{
48
{
49
uint64_t z_mask, s_mask;
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
52
}
53
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
-{
56
- /* Canonicalize the comparison to put immediate second. */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
61
-}
62
-
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
-{
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
117
--
87
--
118
2.17.1
88
2.43.0
119
120
diff view generated by jsdifflib
New patch
1
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/fpu/softfloat.h | 6 ++++
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
9
fpu/softfloat-parts.c.inc | 7 +++--
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/fpu/softfloat.h
15
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
17
float16 float16_sub(float16, float16, float_status *status);
18
float16 float16_mul(float16, float16, float_status *status);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
217
--
218
2.43.0
219
220
diff view generated by jsdifflib
1
From: Shahab Vahedi <shahab.vahedi@gmail.com>
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
3
This change adapts io_readx() to its input access_type. Currently
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
io_readx() treats any memory access as a read, although it has an
5
input argument "MMUAccessType access_type". This results in:
6
7
1) Calling the tlb_fill() only with MMU_DATA_LOAD
8
2) Considering only entry->addr_read as the tlb_addr
9
10
Buglink: https://bugs.launchpad.net/qemu/+bug/1825359
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Shahab Vahedi <shahab.vahedi@gmail.com>
13
Message-Id: <20190420072236.12347-1-shahab.vahedi@gmail.com>
14
[rth: Remove assert; fix expression formatting.]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
5
---
17
accel/tcg/cputlb.c | 5 +++--
6
target/arm/tcg/helper-a64.c | 6 +++---
18
1 file changed, 3 insertions(+), 2 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
19
8
20
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
21
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
22
--- a/accel/tcg/cputlb.c
11
--- a/target/arm/tcg/helper-a64.c
23
+++ b/accel/tcg/cputlb.c
12
+++ b/target/arm/tcg/helper-a64.c
24
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
25
CPUTLBEntry *entry;
14
(float16_is_infinity(b) && float16_is_zero(a))) {
26
target_ulong tlb_addr;
15
return float16_one_point_five;
27
16
}
28
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
29
+ tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
30
19
}
31
entry = tlb_entry(env, mmu_idx, addr);
20
32
- tlb_addr = entry->addr_read;
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
33
+ tlb_addr = (access_type == MMU_DATA_LOAD ?
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
34
+ entry->addr_read : entry->addr_code);
23
(float32_is_infinity(b) && float32_is_zero(a))) {
35
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
24
return float32_one_point_five;
36
/* RAM access */
25
}
37
uintptr_t haddr = addr + entry->addend;
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
28
}
29
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
38
--
40
--
39
2.17.1
41
2.43.0
40
42
41
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
If a TB generates too much code, try again with fewer insns.
1
All uses have been convered to float*_muladd_scalbn.
2
2
3
Fixes: https://bugs.launchpad.net/bugs/1824853
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/translate-all.c | 38 ++++++++++++++++++++++++++++++++------
6
include/fpu/softfloat.h | 3 ---
8
tcg/tcg.c | 4 ++++
7
fpu/softfloat.c | 6 ------
9
2 files changed, 36 insertions(+), 6 deletions(-)
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
10
10
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
13
--- a/include/fpu/softfloat.h
14
+++ b/accel/tcg/translate-all.c
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
tb->cflags = cflags;
16
| Using these differs from negating an input or output before calling
17
tb->trace_vcpu_dstate = *cpu->trace_dstate;
17
| the muladd function in that this means that a NaN doesn't have its
18
tcg_ctx->tb_cflags = cflags;
18
| sign bit inverted before it is propagated.
19
+ tb_overflow:
19
-| We also support halving the result before rounding, as a special
20
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
21
#ifdef CONFIG_PROFILER
21
*----------------------------------------------------------------------------*/
22
/* includes aborted translations because of exceptions */
22
enum {
23
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
23
float_muladd_negate_c = 1,
24
ti = profile_getclock();
24
float_muladd_negate_product = 2,
25
#endif
25
float_muladd_negate_result = 4,
26
26
- float_muladd_halve_result = 8,
27
- /* ??? Overflow could be handled better here. In particular, we
27
};
28
- don't need to re-do gen_intermediate_code, nor should we re-do
28
29
- the tcg optimization currently hidden inside tcg_gen_code. All
29
/*----------------------------------------------------------------------------
30
- that should be required is to flush the TBs, allocate a new TB,
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
31
- re-initialize it per above, and re-do the actual code generation. */
31
index XXXXXXX..XXXXXXX 100644
32
gen_code_size = tcg_gen_code(tcg_ctx, tb);
32
--- a/fpu/softfloat.c
33
if (unlikely(gen_code_size < 0)) {
33
+++ b/fpu/softfloat.c
34
- goto buffer_overflow;
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
35
+ switch (gen_code_size) {
35
if (unlikely(!can_use_fpu(s))) {
36
+ case -1:
36
goto soft;
37
+ /*
38
+ * Overflow of code_gen_buffer, or the current slice of it.
39
+ *
40
+ * TODO: We don't need to re-do gen_intermediate_code, nor
41
+ * should we re-do the tcg optimization currently hidden
42
+ * inside tcg_gen_code. All that should be required is to
43
+ * flush the TBs, allocate a new TB, re-initialize it per
44
+ * above, and re-do the actual code generation.
45
+ */
46
+ goto buffer_overflow;
47
+
48
+ case -2:
49
+ /*
50
+ * The code generated for the TranslationBlock is too large.
51
+ * The maximum size allowed by the unwind info is 64k.
52
+ * There may be stricter constraints from relocations
53
+ * in the tcg backend.
54
+ *
55
+ * Try again with half as many insns as we attempted this time.
56
+ * If a single insn overflows, there's a bug somewhere...
57
+ */
58
+ max_insns = tb->icount;
59
+ assert(max_insns > 1);
60
+ max_insns /= 2;
61
+ goto tb_overflow;
62
+
63
+ default:
64
+ g_assert_not_reached();
65
+ }
66
}
37
}
67
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
38
- if (unlikely(flags & float_muladd_halve_result)) {
68
if (unlikely(search_size < 0)) {
39
- goto soft;
69
diff --git a/tcg/tcg.c b/tcg/tcg.c
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
70
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg.c
56
--- a/fpu/softfloat-parts.c.inc
72
+++ b/tcg/tcg.c
57
+++ b/fpu/softfloat-parts.c.inc
73
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
74
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
59
a->exp = p_widen.exp;
75
return -1;
60
76
}
61
return_normal:
77
+ /* Test for TB overflow, as seen by gen_insn_end_off. */
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
78
+ if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
63
- if (flags & float_muladd_halve_result) {
79
+ return -2;
64
- a->exp -= 1;
80
+ }
65
- }
81
}
66
a->exp += scale;
82
tcg_debug_assert(num_insns >= 0);
67
finish_sign:
83
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
68
if (flags & float_muladd_negate_result) {
84
--
69
--
85
2.17.1
70
2.43.0
86
71
87
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
There are multiple special cases for this instruction.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
8
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
11
---
3
tcg/tcg-op.c | 34 ++++++++++++++++++++++++++++++----
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
4
1 file changed, 30 insertions(+), 4 deletions(-)
13
1 file changed, 26 insertions(+), 79 deletions(-)
5
14
6
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
7
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tcg-op.c
17
--- a/target/hexagon/op_helper.c
9
+++ b/tcg/tcg-op.c
18
+++ b/target/hexagon/op_helper.c
10
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
11
return;
20
return RxV;
12
}
21
}
13
22
14
- mask = (1u << len) - 1;
23
-static bool is_zero_prod(float32 a, float32 b)
15
t1 = tcg_temp_new_i32();
24
-{
16
25
- return ((float32_is_zero(a) && is_finite(b)) ||
17
+ if (TCG_TARGET_HAS_extract2_i32) {
26
- (float32_is_zero(b) && is_finite(a)));
18
+ if (ofs + len == 32) {
27
-}
19
+ tcg_gen_shli_i32(t1, arg1, len);
28
-
20
+ tcg_gen_extract2_i32(ret, t1, arg2, len);
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
21
+ goto done;
30
-{
22
+ }
31
- float32 ret = dst;
23
+ if (ofs == 0) {
32
- if (float32_is_any_nan(x)) {
24
+ tcg_gen_extract2_i32(ret, arg1, arg2, len);
33
- if (extract32(x, 22, 1) == 0) {
25
+ tcg_gen_rotli_i32(ret, ret, len);
34
- float_raise(float_flag_invalid, fp_status);
26
+ goto done;
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
43
{
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
45
return RxV;
46
}
47
48
-static bool is_inf_prod(int32_t a, int32_t b)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
50
+ float32 RsV, float32 RtV, int negate)
51
{
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
56
+
57
+ arch_fpop_start(env);
58
+
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
27
+ }
72
+ }
28
+ }
73
+ }
29
+
74
+
30
+ mask = (1u << len) - 1;
75
+ arch_fpop_end(env);
31
if (ofs + len < 32) {
76
+ return RxV;
32
tcg_gen_andi_i32(t1, arg2, mask);
77
}
33
tcg_gen_shli_i32(t1, t1, ofs);
78
34
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
35
}
80
float32 RsV, float32 RtV)
36
tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
81
{
37
tcg_gen_or_i32(ret, ret, t1);
82
- bool infinp;
83
- bool infminusinf;
84
- float32 tmp;
38
-
85
-
39
+ done:
86
- arch_fpop_start(env);
40
tcg_temp_free_i32(t1);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
41
}
111
}
42
112
43
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
44
}
114
float32 RsV, float32 RtV)
45
}
115
{
46
116
- bool infinp;
47
- mask = (1ull << len) - 1;
117
- bool infminusinf;
48
t1 = tcg_temp_new_i64();
118
- float32 tmp;
49
50
+ if (TCG_TARGET_HAS_extract2_i64) {
51
+ if (ofs + len == 64) {
52
+ tcg_gen_shli_i64(t1, arg1, len);
53
+ tcg_gen_extract2_i64(ret, t1, arg2, len);
54
+ goto done;
55
+ }
56
+ if (ofs == 0) {
57
+ tcg_gen_extract2_i64(ret, arg1, arg2, len);
58
+ tcg_gen_rotli_i64(ret, ret, len);
59
+ goto done;
60
+ }
61
+ }
62
+
63
+ mask = (1ull << len) - 1;
64
if (ofs + len < 64) {
65
tcg_gen_andi_i64(t1, arg2, mask);
66
tcg_gen_shli_i64(t1, t1, ofs);
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
68
}
69
tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
70
tcg_gen_or_i64(ret, ret, t1);
71
-
119
-
72
+ done:
120
- arch_fpop_start(env);
73
tcg_temp_free_i64(t1);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
74
}
146
}
75
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
76
--
149
--
77
2.17.1
150
2.43.0
78
79
diff view generated by jsdifflib
New patch
1
The function is now unused.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
New patch
1
This massive macro is now only used once.
2
Expand it for use only by float64.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
8
1 file changed, 127 insertions(+), 128 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
15
}
16
17
/* Return a maximum finite value with the requested sign */
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
20
-{ \
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
- /* result zero */ \
24
- switch (fp_status->float_rounding_mode) { \
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
278
--
279
2.43.0
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
};
16
} Double;
17
18
-typedef union {
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
8
1 file changed, 16 insertions(+), 30 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@
15
16
#define WAY_BIG_EXP 4096
17
18
-typedef union {
19
- double f;
20
- uint64_t i;
21
- struct {
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
100
--
101
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
No need to open-code 64x64->128-bit multiplication.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/aarch64/tcg-target.h | 4 ++--
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
5
tcg/aarch64/tcg-target.inc.c | 11 +++++++++++
7
1 file changed, 3 insertions(+), 29 deletions(-)
6
2 files changed, 13 insertions(+), 2 deletions(-)
7
8
8
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/aarch64/tcg-target.h
11
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
13
#define TCG_TARGET_HAS_deposit_i32 1
14
return -1;
14
#define TCG_TARGET_HAS_extract_i32 1
15
}
15
#define TCG_TARGET_HAS_sextract_i32 1
16
16
-#define TCG_TARGET_HAS_extract2_i32 0
17
-static uint32_t int128_getw0(Int128 x)
17
+#define TCG_TARGET_HAS_extract2_i32 1
18
-{
18
#define TCG_TARGET_HAS_movcond_i32 1
19
- return int128_getlo(x);
19
#define TCG_TARGET_HAS_add2_i32 1
20
-}
20
#define TCG_TARGET_HAS_sub2_i32 1
21
-
21
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
-static uint32_t int128_getw1(Int128 x)
22
#define TCG_TARGET_HAS_deposit_i64 1
23
-{
23
#define TCG_TARGET_HAS_extract_i64 1
24
- return int128_getlo(x) >> 32;
24
#define TCG_TARGET_HAS_sextract_i64 1
25
-}
25
-#define TCG_TARGET_HAS_extract2_i64 0
26
-
26
+#define TCG_TARGET_HAS_extract2_i64 1
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
27
#define TCG_TARGET_HAS_movcond_i64 1
28
{
28
#define TCG_TARGET_HAS_add2_i64 1
29
- Int128 a, b;
29
#define TCG_TARGET_HAS_sub2_i64 1
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
30
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
31
+ uint64_t l, h;
31
index XXXXXXX..XXXXXXX 100644
32
32
--- a/tcg/aarch64/tcg-target.inc.c
33
- a = int128_make64(ai);
33
+++ b/tcg/aarch64/tcg-target.inc.c
34
- b = int128_make64(bi);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
35
tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
36
break;
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
37
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
38
+ case INDEX_op_extract2_i64:
39
-
39
+ case INDEX_op_extract2_i32:
40
- pp1s = pp1a + pp1b;
40
+ tcg_out_extr(s, ext, a0, a1, a2, args[3]);
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
41
+ break;
42
- pp2 += (1ULL << 32);
42
+
43
- }
43
case INDEX_op_add2_i32:
44
- uint64_t ret_low = pp0 + (pp1s << 32);
44
tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
45
(int32_t)args[4], args[5], const_args[4],
46
- pp2 += 1;
46
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
47
- }
47
= { .args_ct_str = { "r", "r", "rAL" } };
48
-
48
static const TCGTargetOpDef dep
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
49
= { .args_ct_str = { "r", "0", "rZ" } };
50
+ mulu64(&l, &h, ai, bi);
50
+ static const TCGTargetOpDef ext2
51
+ return int128_make128(l, h);
51
+ = { .args_ct_str = { "r", "rZ", "rZ" } };
52
}
52
static const TCGTargetOpDef movc
53
53
= { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } };
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
54
static const TCGTargetOpDef add2
55
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
56
case INDEX_op_deposit_i64:
57
return &dep;
58
59
+ case INDEX_op_extract2_i32:
60
+ case INDEX_op_extract2_i64:
61
+ return &ext2;
62
+
63
case INDEX_op_add2_i32:
64
case INDEX_op_add2_i64:
65
case INDEX_op_sub2_i32:
66
--
55
--
67
2.17.1
56
2.43.0
68
69
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/arm/tcg-target.h | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
5
tcg/arm/tcg-target.inc.c | 25 +++++++++++++++++++++++++
8
1 file changed, 1 insertion(+), 1 deletion(-)
6
2 files changed, 26 insertions(+), 1 deletion(-)
7
9
8
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/arm/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/arm/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
13
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
15
float64_is_infinity(b)) {
14
#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
16
return float64_mul(a, b, fp_status);
15
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
17
}
16
-#define TCG_TARGET_HAS_extract2_i32 0
18
- x.mant = int128_mul_6464(accumulated, 1);
17
+#define TCG_TARGET_HAS_extract2_i32 1
19
+ x.mant = int128_make64(accumulated);
18
#define TCG_TARGET_HAS_movcond_i32 1
20
x.sticky = sticky;
19
#define TCG_TARGET_HAS_mulu2_i32 1
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
20
#define TCG_TARGET_HAS_muls2_i32 1
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
21
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/arm/tcg-target.inc.c
24
+++ b/tcg/arm/tcg-target.inc.c
25
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
26
case INDEX_op_sextract_i32:
27
tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
28
break;
29
+ case INDEX_op_extract2_i32:
30
+ /* ??? These optimization vs zero should be generic. */
31
+ /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
32
+ if (const_args[1]) {
33
+ if (const_args[2]) {
34
+ tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
35
+ } else {
36
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
37
+ args[2], SHIFT_IMM_LSL(32 - args[3]));
38
+ }
39
+ } else if (const_args[2]) {
40
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
41
+ args[1], SHIFT_IMM_LSR(args[3]));
42
+ } else {
43
+ /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
44
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
45
+ args[2], SHIFT_IMM_LSL(32 - args[3]));
46
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
47
+ args[1], SHIFT_IMM_LSR(args[3]));
48
+ }
49
+ break;
50
51
case INDEX_op_div_i32:
52
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
53
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
54
= { .args_ct_str = { "s", "s", "s", "s" } };
55
static const TCGTargetOpDef br
56
= { .args_ct_str = { "r", "rIN" } };
57
+ static const TCGTargetOpDef ext2
58
+ = { .args_ct_str = { "r", "rZ", "rZ" } };
59
static const TCGTargetOpDef dep
60
= { .args_ct_str = { "r", "0", "rZ" } };
61
static const TCGTargetOpDef movc
62
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
63
return &br;
64
case INDEX_op_deposit_i32:
65
return &dep;
66
+ case INDEX_op_extract2_i32:
67
+ return &ext2;
68
case INDEX_op_movcond_i32:
69
return &movc;
70
case INDEX_op_add2_i32:
71
--
23
--
72
2.17.1
24
2.43.0
73
74
diff view generated by jsdifflib
1
In order to handle TB's that translate to too much code, we
1
Convert all targets simultaneously, as the gen_intermediate_code
2
need to place the control of the length of the translation
2
function disappears from the target. While there are possible
3
in the hands of the code gen master loop.
3
workarounds, they're larger than simply performing the conversion.
4
4
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/exec/exec-all.h | 4 ++--
8
include/exec/translator.h | 14 --------------
10
include/exec/translator.h | 3 ++-
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
11
accel/tcg/translate-all.c | 15 +++++++++++++--
10
target/alpha/cpu.h | 2 ++
12
accel/tcg/translator.c | 15 ++-------------
11
target/arm/internals.h | 2 ++
13
target/alpha/translate.c | 4 ++--
12
target/avr/cpu.h | 2 ++
14
target/arm/translate.c | 4 ++--
13
target/hexagon/cpu.h | 2 ++
15
target/cris/translate.c | 10 +---------
14
target/hppa/cpu.h | 2 ++
16
target/hppa/translate.c | 5 ++---
15
target/i386/tcg/helper-tcg.h | 2 ++
17
target/i386/translate.c | 4 ++--
16
target/loongarch/internals.h | 2 ++
18
target/lm32/translate.c | 10 +---------
17
target/m68k/cpu.h | 2 ++
19
target/m68k/translate.c | 4 ++--
18
target/microblaze/cpu.h | 2 ++
20
target/microblaze/translate.c | 10 +---------
19
target/mips/tcg/tcg-internal.h | 2 ++
21
target/mips/translate.c | 4 ++--
20
target/openrisc/cpu.h | 2 ++
22
target/moxie/translate.c | 11 ++---------
21
target/ppc/cpu.h | 2 ++
23
target/nios2/translate.c | 14 ++------------
22
target/riscv/cpu.h | 3 +++
24
target/openrisc/translate.c | 4 ++--
23
target/rx/cpu.h | 2 ++
25
target/ppc/translate.c | 4 ++--
24
target/s390x/s390x-internal.h | 2 ++
26
target/riscv/translate.c | 4 ++--
25
target/sh4/cpu.h | 2 ++
27
target/s390x/translate.c | 4 ++--
26
target/sparc/cpu.h | 2 ++
28
target/sh4/translate.c | 4 ++--
27
target/tricore/cpu.h | 2 ++
29
target/sparc/translate.c | 4 ++--
28
target/xtensa/cpu.h | 2 ++
30
target/tilegx/translate.c | 12 +-----------
29
accel/tcg/cpu-exec.c | 8 +++++---
31
target/tricore/translate.c | 16 ++--------------
30
accel/tcg/translate-all.c | 8 +++++---
32
target/unicore32/translate.c | 10 +---------
31
target/alpha/cpu.c | 1 +
33
target/xtensa/translate.c | 4 ++--
32
target/alpha/translate.c | 4 ++--
34
25 files changed, 56 insertions(+), 127 deletions(-)
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
35
71
36
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/exec/exec-all.h
39
+++ b/include/exec/exec-all.h
40
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
41
42
#include "qemu/log.h"
43
44
-void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
45
-void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
46
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
47
+void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
48
target_ulong *data);
49
50
void cpu_gen_init(void);
51
diff --git a/include/exec/translator.h b/include/exec/translator.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
52
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
53
--- a/include/exec/translator.h
74
--- a/include/exec/translator.h
54
+++ b/include/exec/translator.h
75
+++ b/include/exec/translator.h
55
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
76
@@ -XXX,XX +XXX,XX @@
56
* @db: Disassembly context.
77
#include "qemu/bswap.h"
57
* @cpu: Target vCPU.
78
#include "exec/vaddr.h"
58
* @tb: Translation block.
79
59
+ * @max_insns: Maximum number of insns to translate.
80
-/**
60
*
81
- * gen_intermediate_code
61
* Generic translator loop.
82
- * @cpu: cpu context
62
*
83
- * @tb: translation block
63
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
84
- * @max_insns: max number of instructions to translate
64
* - When too many instructions have been translated.
85
- * @pc: guest virtual program counter address
65
*/
86
- * @host_pc: host physical program counter address
66
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
87
- *
67
- CPUState *cpu, TranslationBlock *tb);
88
- * This function must be provided by the target, which should create
68
+ CPUState *cpu, TranslationBlock *tb, int max_insns);
89
- * the target-specific DisasContext, and then invoke translator_loop.
69
90
- */
70
void translator_loop_temp_check(DisasContextBase *db);
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
71
389
72
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
73
index XXXXXXX..XXXXXXX 100644
391
index XXXXXXX..XXXXXXX 100644
74
--- a/accel/tcg/translate-all.c
392
--- a/accel/tcg/translate-all.c
75
+++ b/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
76
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
77
tb_page_addr_t phys_pc, phys_page2;
408
/*
78
target_ulong virt_page2;
409
* Overflow of code_gen_buffer, or the current slice of it.
79
tcg_insn_unit *gen_code_buf;
410
*
80
- int gen_code_size, search_size;
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
81
+ int gen_code_size, search_size, max_insns;
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
82
#ifdef CONFIG_PROFILER
413
* should we re-do the tcg optimization currently hidden
83
TCGProfile *prof = &tcg_ctx->prof;
414
* inside tcg_gen_code. All that should be required is to
84
int64_t ti;
415
* flush the TBs, allocate a new TB, re-initialize it per
85
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
86
cflags &= ~CF_CLUSTER_MASK;
417
index XXXXXXX..XXXXXXX 100644
87
cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
418
--- a/target/alpha/cpu.c
88
419
+++ b/target/alpha/cpu.c
89
+ max_insns = cflags & CF_COUNT_MASK;
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
90
+ if (max_insns == 0) {
421
91
+ max_insns = CF_COUNT_MASK;
422
static const TCGCPUOps alpha_tcg_ops = {
92
+ }
423
.initialize = alpha_translate_init,
93
+ if (max_insns > TCG_MAX_INSNS) {
424
+ .translate_code = alpha_translate_code,
94
+ max_insns = TCG_MAX_INSNS;
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
95
+ }
426
.restore_state_to_opc = alpha_restore_state_to_opc,
96
+ if (cpu->singlestep_enabled || singlestep) {
97
+ max_insns = 1;
98
+ }
99
+
100
buffer_overflow:
101
tb = tb_alloc(pc);
102
if (unlikely(!tb)) {
103
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
104
tcg_func_start(tcg_ctx);
105
106
tcg_ctx->cpu = ENV_GET_CPU(env);
107
- gen_intermediate_code(cpu, tb);
108
+ gen_intermediate_code(cpu, tb, max_insns);
109
tcg_ctx->cpu = NULL;
110
111
trace_translate_block(tb, tb->pc, tb->tc.ptr);
112
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/accel/tcg/translator.c
115
+++ b/accel/tcg/translator.c
116
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db)
117
}
118
119
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
120
- CPUState *cpu, TranslationBlock *tb)
121
+ CPUState *cpu, TranslationBlock *tb, int max_insns)
122
{
123
int bp_insn = 0;
124
125
@@ -XXX,XX +XXX,XX @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
126
db->pc_next = db->pc_first;
127
db->is_jmp = DISAS_NEXT;
128
db->num_insns = 0;
129
+ db->max_insns = max_insns;
130
db->singlestep_enabled = cpu->singlestep_enabled;
131
132
- /* Instruction counting */
133
- db->max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
134
- if (db->max_insns == 0) {
135
- db->max_insns = CF_COUNT_MASK;
136
- }
137
- if (db->max_insns > TCG_MAX_INSNS) {
138
- db->max_insns = TCG_MAX_INSNS;
139
- }
140
- if (db->singlestep_enabled || singlestep) {
141
- db->max_insns = 1;
142
- }
143
-
144
ops->init_disas_context(db, cpu);
145
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
146
427
147
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
148
index XXXXXXX..XXXXXXX 100644
429
index XXXXXXX..XXXXXXX 100644
149
--- a/target/alpha/translate.c
430
--- a/target/alpha/translate.c
150
+++ b/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
151
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
152
.disas_log = alpha_tr_disas_log,
433
.tb_stop = alpha_tr_tb_stop,
153
};
434
};
154
435
155
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
156
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
157
{
440
{
158
DisasContext dc;
441
DisasContext dc;
159
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb);
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
160
+ translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
161
}
444
index XXXXXXX..XXXXXXX 100644
162
445
--- a/target/arm/cpu.c
163
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
446
+++ b/target/arm/cpu.c
164
diff --git a/target/arm/translate.c b/target/arm/translate.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
165
index XXXXXXX..XXXXXXX 100644
448
#ifdef CONFIG_TCG
166
--- a/target/arm/translate.c
449
static const TCGCPUOps arm_tcg_ops = {
167
+++ b/target/arm/translate.c
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
168
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
169
};
472
.tb_stop = arm_tr_tb_stop,
170
473
};
171
/* generate intermediate code for basic block 'tb'. */
474
172
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
475
-/* generate intermediate code for basic block 'tb'. */
173
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
174
{
477
- vaddr pc, void *host_pc)
175
DisasContext dc;
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
176
const TranslatorOps *ops = &arm_translator_ops;
482
const TranslatorOps *ops = &arm_translator_ops;
177
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
178
}
484
index XXXXXXX..XXXXXXX 100644
179
#endif
485
--- a/target/avr/cpu.c
180
486
+++ b/target/avr/cpu.c
181
- translator_loop(ops, &dc.base, cpu, tb);
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
182
+ translator_loop(ops, &dc.base, cpu, tb, max_insns);
488
183
}
489
static const TCGCPUOps avr_tcg_ops = {
184
490
.initialize = avr_cpu_tcg_init,
185
void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
491
+ .translate_code = avr_cpu_translate_code,
186
diff --git a/target/cris/translate.c b/target/cris/translate.c
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
187
index XXXXXXX..XXXXXXX 100644
493
.restore_state_to_opc = avr_restore_state_to_opc,
188
--- a/target/cris/translate.c
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
189
+++ b/target/cris/translate.c
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
190
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
191
*/
507
*/
192
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
193
/* generate intermediate code for basic block 'tb'. */
509
.tb_stop = avr_tr_tb_stop,
194
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
510
};
195
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
511
196
{
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
197
CPUCRISState *env = cs->env_ptr;
513
- vaddr pc, void *host_pc)
198
uint32_t pc_start;
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
199
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
515
+ int *max_insns, vaddr pc, void *host_pc)
200
uint32_t page_start;
516
{
201
target_ulong npc;
517
DisasContext dc = { };
202
int num_insns;
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
203
- int max_insns;
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
204
520
index XXXXXXX..XXXXXXX 100644
205
if (env->pregs[PR_VR] == 32) {
521
--- a/target/hexagon/cpu.c
206
dc->decoder = crisv32_decoder;
522
+++ b/target/hexagon/cpu.c
207
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
208
524
209
page_start = pc_start & TARGET_PAGE_MASK;
525
static const TCGCPUOps hexagon_tcg_ops = {
210
num_insns = 0;
526
.initialize = hexagon_translate_init,
211
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
527
+ .translate_code = hexagon_translate_code,
212
- if (max_insns == 0) {
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
213
- max_insns = CF_COUNT_MASK;
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
214
- }
530
};
215
- if (max_insns > TCG_MAX_INSNS) {
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
216
- max_insns = TCG_MAX_INSNS;
532
index XXXXXXX..XXXXXXX 100644
217
- }
533
--- a/target/hexagon/translate.c
218
534
+++ b/target/hexagon/translate.c
219
gen_tb_start(tb);
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
220
do {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
221
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
222
index XXXXXXX..XXXXXXX 100644
559
index XXXXXXX..XXXXXXX 100644
223
--- a/target/hppa/translate.c
560
--- a/target/hppa/translate.c
224
+++ b/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
226
.disas_log = hppa_tr_disas_log,
563
#endif
227
};
564
};
228
565
229
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
230
-
567
- vaddr pc, void *host_pc)
231
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
232
{
625
{
233
DisasContext ctx;
626
DisasContext ctx;
234
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
627
235
+ translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
236
}
629
index XXXXXXX..XXXXXXX 100644
237
630
--- a/target/m68k/cpu.c
238
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
631
+++ b/target/m68k/cpu.c
239
diff --git a/target/i386/translate.c b/target/i386/translate.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
240
index XXXXXXX..XXXXXXX 100644
633
241
--- a/target/i386/translate.c
634
static const TCGCPUOps m68k_tcg_ops = {
242
+++ b/target/i386/translate.c
635
.initialize = m68k_tcg_init,
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
636
+ .translate_code = m68k_translate_code,
244
};
637
.restore_state_to_opc = m68k_restore_state_to_opc,
245
638
246
/* generate intermediate code for basic block 'tb'. */
639
#ifndef CONFIG_USER_ONLY
247
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
248
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
249
{
250
DisasContext dc;
251
252
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb);
253
+ translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
254
}
255
256
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
257
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
258
index XXXXXXX..XXXXXXX 100644
259
--- a/target/lm32/translate.c
260
+++ b/target/lm32/translate.c
261
@@ -XXX,XX +XXX,XX @@ static inline void decode(DisasContext *dc, uint32_t ir)
262
}
263
264
/* generate intermediate code for basic block 'tb'. */
265
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
266
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
267
{
268
CPULM32State *env = cs->env_ptr;
269
LM32CPU *cpu = lm32_env_get_cpu(env);
270
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
271
uint32_t pc_start;
272
uint32_t page_start;
273
int num_insns;
274
- int max_insns;
275
276
pc_start = tb->pc;
277
dc->features = cpu->features;
278
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
279
280
page_start = pc_start & TARGET_PAGE_MASK;
281
num_insns = 0;
282
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
283
- if (max_insns == 0) {
284
- max_insns = CF_COUNT_MASK;
285
- }
286
- if (max_insns > TCG_MAX_INSNS) {
287
- max_insns = TCG_MAX_INSNS;
288
- }
289
290
gen_tb_start(tb);
291
do {
292
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
293
index XXXXXXX..XXXXXXX 100644
641
index XXXXXXX..XXXXXXX 100644
294
--- a/target/m68k/translate.c
642
--- a/target/m68k/translate.c
295
+++ b/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
296
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
297
.disas_log = m68k_tr_disas_log,
645
.tb_stop = m68k_tr_tb_stop,
298
};
646
};
299
647
300
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
301
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
302
{
652
{
303
DisasContext dc;
653
DisasContext dc;
304
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb);
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
305
+ translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
306
}
656
index XXXXXXX..XXXXXXX 100644
307
657
--- a/target/microblaze/cpu.c
308
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
309
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
310
index XXXXXXX..XXXXXXX 100644
668
index XXXXXXX..XXXXXXX 100644
311
--- a/target/microblaze/translate.c
669
--- a/target/microblaze/translate.c
312
+++ b/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
313
@@ -XXX,XX +XXX,XX @@ static inline void decode(DisasContext *dc, uint32_t ir)
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
314
}
672
.tb_stop = mb_tr_tb_stop,
315
673
};
316
/* generate intermediate code for basic block 'tb'. */
674
317
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
318
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
676
- vaddr pc, void *host_pc)
319
{
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
320
CPUMBState *env = cs->env_ptr;
678
+ int *max_insns, vaddr pc, void *host_pc)
321
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
679
{
322
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
680
DisasContext dc;
323
uint32_t page_start, org_flags;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
324
uint32_t npc;
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
325
int num_insns;
683
index XXXXXXX..XXXXXXX 100644
326
- int max_insns;
684
--- a/target/mips/cpu.c
327
685
+++ b/target/mips/cpu.c
328
pc_start = tb->pc;
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
329
dc->cpu = cpu;
687
#include "hw/core/tcg-cpu-ops.h"
330
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
688
static const TCGCPUOps mips_tcg_ops = {
331
689
.initialize = mips_tcg_init,
332
page_start = pc_start & TARGET_PAGE_MASK;
690
+ .translate_code = mips_translate_code,
333
num_insns = 0;
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
334
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
692
.restore_state_to_opc = mips_restore_state_to_opc,
335
- if (max_insns == 0) {
693
336
- max_insns = CF_COUNT_MASK;
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
337
- }
695
index XXXXXXX..XXXXXXX 100644
338
- if (max_insns > TCG_MAX_INSNS) {
696
--- a/target/mips/tcg/translate.c
339
- max_insns = TCG_MAX_INSNS;
697
+++ b/target/mips/tcg/translate.c
340
- }
341
342
gen_tb_start(tb);
343
do
344
diff --git a/target/mips/translate.c b/target/mips/translate.c
345
index XXXXXXX..XXXXXXX 100644
346
--- a/target/mips/translate.c
347
+++ b/target/mips/translate.c
348
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
349
.disas_log = mips_tr_disas_log,
699
.tb_stop = mips_tr_tb_stop,
350
};
700
};
351
701
352
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
353
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
354
{
706
{
355
DisasContext ctx;
707
DisasContext ctx;
356
708
357
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb);
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
358
+ translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
710
index XXXXXXX..XXXXXXX 100644
359
}
711
--- a/target/openrisc/cpu.c
360
712
+++ b/target/openrisc/cpu.c
361
static void fpu_dump_state(CPUMIPSState *env, FILE *f, int flags)
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
362
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
714
363
index XXXXXXX..XXXXXXX 100644
715
static const TCGCPUOps openrisc_tcg_ops = {
364
--- a/target/moxie/translate.c
716
.initialize = openrisc_translate_init,
365
+++ b/target/moxie/translate.c
717
+ .translate_code = openrisc_translate_code,
366
@@ -XXX,XX +XXX,XX @@ static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
367
}
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
368
720
369
/* generate intermediate code for basic block 'tb'. */
370
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
371
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
372
{
373
CPUMoxieState *env = cs->env_ptr;
374
MoxieCPU *cpu = moxie_env_get_cpu(env);
375
DisasContext ctx;
376
target_ulong pc_start;
377
- int num_insns, max_insns;
378
+ int num_insns;
379
380
pc_start = tb->pc;
381
ctx.pc = pc_start;
382
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
383
ctx.singlestep_enabled = 0;
384
ctx.bstate = BS_NONE;
385
num_insns = 0;
386
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
387
- if (max_insns == 0) {
388
- max_insns = CF_COUNT_MASK;
389
- }
390
- if (max_insns > TCG_MAX_INSNS) {
391
- max_insns = TCG_MAX_INSNS;
392
- }
393
394
gen_tb_start(tb);
395
do {
396
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
397
index XXXXXXX..XXXXXXX 100644
398
--- a/target/nios2/translate.c
399
+++ b/target/nios2/translate.c
400
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *dc, uint32_t excp)
401
}
402
403
/* generate intermediate code for basic block 'tb'. */
404
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
405
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
406
{
407
CPUNios2State *env = cs->env_ptr;
408
DisasContext dc1, *dc = &dc1;
409
int num_insns;
410
- int max_insns;
411
412
/* Initialize DC */
413
dc->cpu_env = cpu_env;
414
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
415
416
/* Set up instruction counts */
417
num_insns = 0;
418
- if (cs->singlestep_enabled || singlestep) {
419
- max_insns = 1;
420
- } else {
421
+ if (max_insns > 1) {
422
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
423
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
424
- if (max_insns == 0) {
425
- max_insns = CF_COUNT_MASK;
426
- }
427
if (max_insns > page_insns) {
428
max_insns = page_insns;
429
}
430
- if (max_insns > TCG_MAX_INSNS) {
431
- max_insns = TCG_MAX_INSNS;
432
- }
433
}
434
435
gen_tb_start(tb);
436
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
437
index XXXXXXX..XXXXXXX 100644
722
index XXXXXXX..XXXXXXX 100644
438
--- a/target/openrisc/translate.c
723
--- a/target/openrisc/translate.c
439
+++ b/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
440
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
441
.disas_log = openrisc_tr_disas_log,
726
.tb_stop = openrisc_tr_tb_stop,
442
};
727
};
443
728
444
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
445
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
446
{
733
{
447
DisasContext ctx;
734
DisasContext ctx;
448
735
449
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb);
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
450
+ translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
737
index XXXXXXX..XXXXXXX 100644
451
}
738
--- a/target/ppc/cpu_init.c
452
739
+++ b/target/ppc/cpu_init.c
453
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
454
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
455
index XXXXXXX..XXXXXXX 100644
749
index XXXXXXX..XXXXXXX 100644
456
--- a/target/ppc/translate.c
750
--- a/target/ppc/translate.c
457
+++ b/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
458
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
459
.disas_log = ppc_tr_disas_log,
753
.tb_stop = ppc_tr_tb_stop,
460
};
754
};
461
755
462
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
463
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
464
{
760
{
465
DisasContext ctx;
761
DisasContext ctx;
466
762
467
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb);
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
468
+ translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
764
index XXXXXXX..XXXXXXX 100644
469
}
765
--- a/target/riscv/tcg/tcg-cpu.c
470
766
+++ b/target/riscv/tcg/tcg-cpu.c
471
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
472
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
473
index XXXXXXX..XXXXXXX 100644
776
index XXXXXXX..XXXXXXX 100644
474
--- a/target/riscv/translate.c
777
--- a/target/riscv/translate.c
475
+++ b/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
476
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
477
.disas_log = riscv_tr_disas_log,
780
.tb_stop = riscv_tr_tb_stop,
478
};
781
};
479
782
480
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
481
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
482
{
787
{
483
DisasContext ctx;
788
DisasContext ctx;
484
789
485
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
486
+ translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
791
index XXXXXXX..XXXXXXX 100644
487
}
792
--- a/target/rx/cpu.c
488
793
+++ b/target/rx/cpu.c
489
void riscv_translate_init(void)
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
490
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
795
491
index XXXXXXX..XXXXXXX 100644
796
static const TCGCPUOps rx_tcg_ops = {
492
--- a/target/s390x/translate.c
797
.initialize = rx_translate_init,
493
+++ b/target/s390x/translate.c
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
494
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
495
.disas_log = s390x_tr_disas_log,
834
.disas_log = s390x_tr_disas_log,
496
};
835
};
497
836
498
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
499
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
500
{
841
{
501
DisasContext dc;
842
DisasContext dc;
502
843
503
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
504
+ translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
845
index XXXXXXX..XXXXXXX 100644
505
}
846
--- a/target/sh4/cpu.c
506
847
+++ b/target/sh4/cpu.c
507
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
508
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
509
index XXXXXXX..XXXXXXX 100644
857
index XXXXXXX..XXXXXXX 100644
510
--- a/target/sh4/translate.c
858
--- a/target/sh4/translate.c
511
+++ b/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
512
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
513
.disas_log = sh4_tr_disas_log,
861
.tb_stop = sh4_tr_tb_stop,
514
};
862
};
515
863
516
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
517
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
518
{
868
{
519
DisasContext ctx;
869
DisasContext ctx;
520
870
521
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
522
+ translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
872
index XXXXXXX..XXXXXXX 100644
523
}
873
--- a/target/sparc/cpu.c
524
874
+++ b/target/sparc/cpu.c
525
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
526
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
527
index XXXXXXX..XXXXXXX 100644
884
index XXXXXXX..XXXXXXX 100644
528
--- a/target/sparc/translate.c
885
--- a/target/sparc/translate.c
529
+++ b/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
530
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
531
.disas_log = sparc_tr_disas_log,
888
.tb_stop = sparc_tr_tb_stop,
532
};
889
};
533
890
534
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
535
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
536
{
895
{
537
DisasContext dc = {};
896
DisasContext dc = {};
538
897
539
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb);
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
540
+ translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
899
index XXXXXXX..XXXXXXX 100644
541
}
900
--- a/target/tricore/cpu.c
542
901
+++ b/target/tricore/cpu.c
543
void sparc_tcg_init(void)
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
544
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
903
545
index XXXXXXX..XXXXXXX 100644
904
static const TCGCPUOps tricore_tcg_ops = {
546
--- a/target/tilegx/translate.c
905
.initialize = tricore_tcg_init,
547
+++ b/target/tilegx/translate.c
906
+ .translate_code = tricore_translate_code,
548
@@ -XXX,XX +XXX,XX @@ static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
549
}
908
.restore_state_to_opc = tricore_restore_state_to_opc,
550
}
909
.tlb_fill = tricore_cpu_tlb_fill,
551
552
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
553
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
554
{
555
CPUTLGState *env = cs->env_ptr;
556
DisasContext ctx;
557
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
558
uint64_t pc_start = tb->pc;
559
uint64_t page_start = pc_start & TARGET_PAGE_MASK;
560
int num_insns = 0;
561
- int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
562
563
dc->pc = pc_start;
564
dc->mmuidx = 0;
565
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
566
qemu_log_lock();
567
qemu_log("IN: %s\n", lookup_symbol(pc_start));
568
}
569
- if (!max_insns) {
570
- max_insns = CF_COUNT_MASK;
571
- }
572
- if (cs->singlestep_enabled || singlestep) {
573
- max_insns = 1;
574
- }
575
- if (max_insns > TCG_MAX_INSNS) {
576
- max_insns = TCG_MAX_INSNS;
577
- }
578
gen_tb_start(tb);
579
580
while (1) {
581
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
582
index XXXXXXX..XXXXXXX 100644
911
index XXXXXXX..XXXXXXX 100644
583
--- a/target/tricore/translate.c
912
--- a/target/tricore/translate.c
584
+++ b/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
585
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
586
}
915
.tb_stop = tricore_tr_tb_stop,
587
}
916
};
588
917
589
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
918
-
590
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
591
{
920
- vaddr pc, void *host_pc)
592
CPUTriCoreState *env = cs->env_ptr;
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
593
DisasContext ctx;
924
DisasContext ctx;
594
target_ulong pc_start;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
595
- int num_insns, max_insns;
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
596
-
927
index XXXXXXX..XXXXXXX 100644
597
- num_insns = 0;
928
--- a/target/xtensa/cpu.c
598
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
929
+++ b/target/xtensa/cpu.c
599
- if (max_insns == 0) {
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
600
- max_insns = CF_COUNT_MASK;
931
601
- }
932
static const TCGCPUOps xtensa_tcg_ops = {
602
- if (singlestep) {
933
.initialize = xtensa_translate_init,
603
- max_insns = 1;
934
+ .translate_code = xtensa_translate_code,
604
- }
935
.debug_excp_handler = xtensa_breakpoint_handler,
605
- if (max_insns > TCG_MAX_INSNS) {
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
606
- max_insns = TCG_MAX_INSNS;
937
607
- }
608
+ int num_insns = 0;
609
610
pc_start = tb->pc;
611
ctx.pc = pc_start;
612
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
613
index XXXXXXX..XXXXXXX 100644
614
--- a/target/unicore32/translate.c
615
+++ b/target/unicore32/translate.c
616
@@ -XXX,XX +XXX,XX @@ static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
617
}
618
619
/* generate intermediate code for basic block 'tb'. */
620
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
621
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
622
{
623
CPUUniCore32State *env = cs->env_ptr;
624
DisasContext dc1, *dc = &dc1;
625
target_ulong pc_start;
626
uint32_t page_start;
627
int num_insns;
628
- int max_insns;
629
630
/* generate intermediate code */
631
num_temps = 0;
632
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
633
cpu_F1d = tcg_temp_new_i64();
634
page_start = pc_start & TARGET_PAGE_MASK;
635
num_insns = 0;
636
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
637
- if (max_insns == 0) {
638
- max_insns = CF_COUNT_MASK;
639
- }
640
- if (max_insns > TCG_MAX_INSNS) {
641
- max_insns = TCG_MAX_INSNS;
642
- }
643
644
#ifndef CONFIG_USER_ONLY
645
if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
646
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
647
index XXXXXXX..XXXXXXX 100644
939
index XXXXXXX..XXXXXXX 100644
648
--- a/target/xtensa/translate.c
940
--- a/target/xtensa/translate.c
649
+++ b/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
650
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
651
.disas_log = xtensa_tr_disas_log,
943
.tb_stop = xtensa_tr_tb_stop,
652
};
944
};
653
945
654
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
655
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
656
{
950
{
657
DisasContext dc = {};
951
DisasContext dc = {};
658
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb);
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
659
+ translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
660
}
661
662
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
663
--
953
--
664
2.17.1
954
2.43.0
665
955
666
956
diff view generated by jsdifflib