1
The following changes since commit ec6f9f135d5e5596ab0258da2ddd048f1fd8c359:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'migration-20231017-pull-request' of https://gitlab.com/juan.quintela/qemu into staging (2023-10-17 10:06:21 -0400)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20231018
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to b540757b7f711eaf069f530916005cf8cfe7c00f:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
target/i386: Use i128 for 128 and 256-bit loads and stores (2023-10-18 12:33:08 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
tcg: Drop unused tcg_temp_free define
14
tcg/optimize: Remove in-flight mask data from OptContext
15
tcg: Introduce tcg_use_softmmu
15
fpu: Add float*_muladd_scalbn
16
tcg: Optimize past conditional branches
16
fpu: Remove float_muladd_halve_result
17
tcg: Use constant zero when expanding with divu2
17
fpu: Add float_round_nearest_even_max
18
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
18
fpu: Add float_muladd_suppress_add_product_zero
19
tcg/ppc: Use ADDPCIS for power9
19
target/hexagon: Use float32_muladd
20
tcg/ppc: Use prefixed instructions for power10
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
22
21
23
----------------------------------------------------------------
22
----------------------------------------------------------------
24
Jordan Niethe (1):
23
Ilya Leoshkevich (1):
25
tcg/ppc: Enable direct branching tcg_out_goto_tb with TCG_REG_TB
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
26
25
27
Mike Frysinger (1):
26
Pierrick Bouvier (1):
28
tcg: drop unused tcg_temp_free define
27
plugins: optimize cpu_index code generation
29
28
30
Richard Henderson (27):
29
Richard Henderson (70):
31
tcg/ppc: Untabify tcg-target.c.inc
30
tcg/optimize: Split out finish_bb, finish_ebb
32
tcg/ppc: Reinterpret tb-relative to TB+4
31
tcg/optimize: Split out fold_affected_mask
33
tcg/ppc: Use ADDPCIS in tcg_out_tb_start
32
tcg/optimize: Copy mask writeback to fold_masks
34
tcg/ppc: Use ADDPCIS in tcg_out_movi_int
33
tcg/optimize: Split out fold_masks_zs
35
tcg/ppc: Use ADDPCIS for the constant pool
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
36
tcg/ppc: Use ADDPCIS in tcg_out_goto_tb
35
tcg/optimize: Change representation of s_mask
37
tcg/ppc: Use PADDI in tcg_out_movi
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
38
tcg/ppc: Use prefixed instructions in tcg_out_mem_long
37
tcg/optimize: Introduce const value accessors for TempOptInfo
39
tcg/ppc: Use PLD in tcg_out_movi for constant pool
38
tcg/optimize: Use fold_masks_zs in fold_and
40
tcg/ppc: Use prefixed instructions in tcg_out_dupi_vec
39
tcg/optimize: Use fold_masks_zs in fold_andc
41
tcg/ppc: Use PLD in tcg_out_goto_tb
40
tcg/optimize: Use fold_masks_zs in fold_bswap
42
tcg/ppc: Disable TCG_REG_TB for Power9/Power10
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
43
tcg: Introduce tcg_use_softmmu
42
tcg/optimize: Use fold_masks_z in fold_ctpop
44
tcg: Provide guest_base fallback for system mode
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
45
tcg/arm: Use tcg_use_softmmu
44
tcg/optimize: Compute sign mask in fold_deposit
46
tcg/aarch64: Use tcg_use_softmmu
45
tcg/optimize: Use finish_folding in fold_divide
47
tcg/i386: Use tcg_use_softmmu
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
48
tcg/loongarch64: Use tcg_use_softmmu
47
tcg/optimize: Use fold_masks_s in fold_eqv
49
tcg/mips: Use tcg_use_softmmu
48
tcg/optimize: Use fold_masks_z in fold_extract
50
tcg/ppc: Use tcg_use_softmmu
49
tcg/optimize: Use finish_folding in fold_extract2
51
tcg/riscv: Do not reserve TCG_GUEST_BASE_REG for guest_base zero
50
tcg/optimize: Use fold_masks_zs in fold_exts
52
tcg/riscv: Use tcg_use_softmmu
51
tcg/optimize: Use fold_masks_z in fold_extu
53
tcg/s390x: Use tcg_use_softmmu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
54
tcg: Use constant zero when expanding with divu2
53
tcg/optimize: Use finish_folding in fold_mul*
55
tcg: Optimize past conditional branches
54
tcg/optimize: Use fold_masks_s in fold_nand
56
tcg: Add tcg_gen_{ld,st}_i128
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
57
target/i386: Use i128 for 128 and 256-bit loads and stores
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
58
100
59
include/tcg/tcg-op-common.h | 3 +
101
include/exec/translator.h | 14 -
60
include/tcg/tcg-op.h | 2 -
102
include/fpu/softfloat-types.h | 2 +
61
include/tcg/tcg.h | 8 +-
103
include/fpu/softfloat.h | 14 +-
62
target/i386/tcg/translate.c | 63 ++---
104
include/hw/core/tcg-cpu-ops.h | 13 +
63
tcg/optimize.c | 8 +-
105
target/alpha/cpu.h | 2 +
64
tcg/tcg-op-ldst.c | 14 +-
106
target/arm/internals.h | 2 +
65
tcg/tcg-op.c | 38 ++-
107
target/avr/cpu.h | 2 +
66
tcg/tcg.c | 13 +-
108
target/hexagon/cpu.h | 2 +
67
tcg/aarch64/tcg-target.c.inc | 177 ++++++------
109
target/hexagon/fma_emu.h | 3 -
68
tcg/arm/tcg-target.c.inc | 203 +++++++-------
110
target/hppa/cpu.h | 2 +
69
tcg/i386/tcg-target.c.inc | 184 +++++++------
111
target/i386/tcg/helper-tcg.h | 2 +
70
tcg/loongarch64/tcg-target.c.inc | 126 +++++----
112
target/loongarch/internals.h | 2 +
71
tcg/mips/tcg-target.c.inc | 231 ++++++++--------
113
target/m68k/cpu.h | 2 +
72
tcg/ppc/tcg-target.c.inc | 561 ++++++++++++++++++++++++++-------------
114
target/microblaze/cpu.h | 2 +
73
tcg/riscv/tcg-target.c.inc | 189 ++++++-------
115
target/mips/tcg/tcg-internal.h | 2 +
74
tcg/s390x/tcg-target.c.inc | 161 ++++++-----
116
target/openrisc/cpu.h | 2 +
75
16 files changed, 1093 insertions(+), 888 deletions(-)
117
target/ppc/cpu.h | 2 +
118
target/riscv/cpu.h | 3 +
119
target/rx/cpu.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
121
target/sh4/cpu.h | 2 +
122
target/sparc/cpu.h | 2 +
123
target/sparc/helper.h | 4 +-
124
target/tricore/cpu.h | 2 +
125
target/xtensa/cpu.h | 2 +
126
accel/tcg/cpu-exec.c | 8 +-
127
accel/tcg/plugin-gen.c | 9 +
128
accel/tcg/translate-all.c | 8 +-
129
fpu/softfloat.c | 63 +--
130
target/alpha/cpu.c | 1 +
131
target/alpha/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Do not require the translators to jump through concat and
1
Call them directly from the opcode switch statement in tcg_optimize,
2
extract of i64 in order to move values to and from env.
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
3
4
4
Tested-by: Song Gao <gaosong@loongson.cn>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Song Gao <gaosong@loongson.cn>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/tcg/tcg-op-common.h | 3 +++
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
10
tcg/tcg-op.c | 22 ++++++++++++++++++++++
9
1 file changed, 31 insertions(+), 16 deletions(-)
11
2 files changed, 25 insertions(+)
12
10
13
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op-common.h
13
--- a/tcg/optimize.c
16
+++ b/include/tcg/tcg-op-common.h
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
18
void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
19
void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
20
21
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset);
22
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset);
23
+
24
static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
25
{
26
tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
27
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/tcg-op.c
30
+++ b/tcg/tcg-op.c
31
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src)
32
}
16
}
33
}
17
}
34
18
35
+void tcg_gen_ld_i128(TCGv_i128 ret, TCGv_ptr base, tcg_target_long offset)
19
+static void finish_bb(OptContext *ctx)
36
+{
20
+{
37
+ if (HOST_BIG_ENDIAN) {
21
+ /* We only optimize memory barriers across basic blocks. */
38
+ tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset);
22
+ ctx->prev_mb = NULL;
39
+ tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset + 8);
40
+ } else {
41
+ tcg_gen_ld_i64(TCGV128_LOW(ret), base, offset);
42
+ tcg_gen_ld_i64(TCGV128_HIGH(ret), base, offset + 8);
43
+ }
44
+}
23
+}
45
+
24
+
46
+void tcg_gen_st_i128(TCGv_i128 val, TCGv_ptr base, tcg_target_long offset)
25
+static void finish_ebb(OptContext *ctx)
47
+{
26
+{
48
+ if (HOST_BIG_ENDIAN) {
27
+ finish_bb(ctx);
49
+ tcg_gen_st_i64(TCGV128_HIGH(val), base, offset);
28
+ /* We only optimize across extended basic blocks. */
50
+ tcg_gen_st_i64(TCGV128_LOW(val), base, offset + 8);
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
51
+ } else {
30
+ remove_mem_copy_all(ctx);
52
+ tcg_gen_st_i64(TCGV128_LOW(val), base, offset);
53
+ tcg_gen_st_i64(TCGV128_HIGH(val), base, offset + 8);
54
+ }
55
+}
31
+}
56
+
32
+
57
/* QEMU specific operations. */
33
static void finish_folding(OptContext *ctx, TCGOp *op)
58
34
{
59
void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
60
--
97
--
61
2.34.1
98
2.43.0
62
63
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
New patch
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
136
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
23
---
4
tcg/i386/tcg-target.c.inc | 184 ++++++++++++++++++--------------------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
5
1 file changed, 89 insertions(+), 95 deletions(-)
25
1 file changed, 15 insertions(+), 49 deletions(-)
6
26
7
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/i386/tcg-target.c.inc
29
--- a/tcg/optimize.c
10
+++ b/tcg/i386/tcg-target.c.inc
30
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
12
# define ALL_VECTOR_REGS 0x00ff0000u
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
13
# define ALL_BYTEL_REGS 0x0000000fu
33
uint64_t val;
14
#endif
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
15
-#ifdef CONFIG_SOFTMMU
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
16
-# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
17
-#else
37
} TempOptInfo;
18
-# define SOFTMMU_RESERVE_REGS 0
38
19
-#endif
39
typedef struct OptContext {
20
+#define SOFTMMU_RESERVE_REGS \
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
+ (tcg_use_softmmu ? (1 << TCG_REG_L0) | (1 << TCG_REG_L1) : 0)
41
22
42
/* In flight values from optimization. */
23
/* For 64-bit, we always know that CMOV is available. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
#if TCG_TARGET_REG_BITS == 64
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
26
return true;
143
return true;
27
}
144
}
28
145
29
-#ifndef CONFIG_SOFTMMU
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
30
static HostAddress x86_guest_base = {
147
31
.index = -1
148
ctx->z_mask = z_mask;
32
};
149
ctx->s_mask = s_mask;
33
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
34
return 0;
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
35
}
152
return true;
36
#endif /* setup_guest_base_seg */
37
-#endif /* !SOFTMMU */
38
39
#define MIN_TLB_MASK_TABLE_OFS INT_MIN
40
41
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
42
MemOp s_bits = opc & MO_SIZE;
43
unsigned a_mask;
44
45
-#ifdef CONFIG_SOFTMMU
46
- h->index = TCG_REG_L0;
47
- h->ofs = 0;
48
- h->seg = 0;
49
-#else
50
- *h = x86_guest_base;
51
-#endif
52
+ if (tcg_use_softmmu) {
53
+ h->index = TCG_REG_L0;
54
+ h->ofs = 0;
55
+ h->seg = 0;
56
+ } else {
57
+ *h = x86_guest_base;
58
+ }
59
h->base = addrlo;
60
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
61
a_mask = (1 << h->aa.align) - 1;
62
63
-#ifdef CONFIG_SOFTMMU
64
- int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
65
- : offsetof(CPUTLBEntry, addr_write);
66
- TCGType ttype = TCG_TYPE_I32;
67
- TCGType tlbtype = TCG_TYPE_I32;
68
- int trexw = 0, hrexw = 0, tlbrexw = 0;
69
- unsigned mem_index = get_mmuidx(oi);
70
- unsigned s_mask = (1 << s_bits) - 1;
71
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
72
- int tlb_mask;
73
+ if (tcg_use_softmmu) {
74
+ int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
75
+ : offsetof(CPUTLBEntry, addr_write);
76
+ TCGType ttype = TCG_TYPE_I32;
77
+ TCGType tlbtype = TCG_TYPE_I32;
78
+ int trexw = 0, hrexw = 0, tlbrexw = 0;
79
+ unsigned mem_index = get_mmuidx(oi);
80
+ unsigned s_mask = (1 << s_bits) - 1;
81
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
82
+ int tlb_mask;
83
84
- ldst = new_ldst_label(s);
85
- ldst->is_ld = is_ld;
86
- ldst->oi = oi;
87
- ldst->addrlo_reg = addrlo;
88
- ldst->addrhi_reg = addrhi;
89
+ ldst = new_ldst_label(s);
90
+ ldst->is_ld = is_ld;
91
+ ldst->oi = oi;
92
+ ldst->addrlo_reg = addrlo;
93
+ ldst->addrhi_reg = addrhi;
94
95
- if (TCG_TARGET_REG_BITS == 64) {
96
- ttype = s->addr_type;
97
- trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
98
- if (TCG_TYPE_PTR == TCG_TYPE_I64) {
99
- hrexw = P_REXW;
100
- if (s->page_bits + s->tlb_dyn_max_bits > 32) {
101
- tlbtype = TCG_TYPE_I64;
102
- tlbrexw = P_REXW;
103
+ if (TCG_TARGET_REG_BITS == 64) {
104
+ ttype = s->addr_type;
105
+ trexw = (ttype == TCG_TYPE_I32 ? 0 : P_REXW);
106
+ if (TCG_TYPE_PTR == TCG_TYPE_I64) {
107
+ hrexw = P_REXW;
108
+ if (s->page_bits + s->tlb_dyn_max_bits > 32) {
109
+ tlbtype = TCG_TYPE_I64;
110
+ tlbrexw = P_REXW;
111
+ }
112
}
113
}
114
- }
115
116
- tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
117
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
118
- s->page_bits - CPU_TLB_ENTRY_BITS);
119
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
120
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
121
+ s->page_bits - CPU_TLB_ENTRY_BITS);
122
123
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
124
- fast_ofs + offsetof(CPUTLBDescFast, mask));
125
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
126
+ fast_ofs + offsetof(CPUTLBDescFast, mask));
127
128
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
129
- fast_ofs + offsetof(CPUTLBDescFast, table));
130
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
131
+ fast_ofs + offsetof(CPUTLBDescFast, table));
132
133
- /*
134
- * If the required alignment is at least as large as the access, simply
135
- * copy the address and mask. For lesser alignments, check that we don't
136
- * cross pages for the complete access.
137
- */
138
- if (a_mask >= s_mask) {
139
- tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
140
- } else {
141
- tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
142
- addrlo, s_mask - a_mask);
143
- }
144
- tlb_mask = s->page_mask | a_mask;
145
- tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
146
+ /*
147
+ * If the required alignment is at least as large as the access,
148
+ * simply copy the address and mask. For lesser alignments,
149
+ * check that we don't cross pages for the complete access.
150
+ */
151
+ if (a_mask >= s_mask) {
152
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
153
+ } else {
154
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
155
+ addrlo, s_mask - a_mask);
156
+ }
157
+ tlb_mask = s->page_mask | a_mask;
158
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
159
160
- /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
161
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
162
- TCG_REG_L1, TCG_REG_L0, cmp_ofs);
163
-
164
- /* jne slow_path */
165
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
166
- ldst->label_ptr[0] = s->code_ptr;
167
- s->code_ptr += 4;
168
-
169
- if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
170
- /* cmp 4(TCG_REG_L0), addrhi */
171
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, cmp_ofs + 4);
172
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
173
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
174
+ TCG_REG_L1, TCG_REG_L0, cmp_ofs);
175
176
/* jne slow_path */
177
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
178
- ldst->label_ptr[1] = s->code_ptr;
179
+ ldst->label_ptr[0] = s->code_ptr;
180
s->code_ptr += 4;
181
- }
182
183
- /* TLB Hit. */
184
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
185
- offsetof(CPUTLBEntry, addend));
186
-#else
187
- if (a_mask) {
188
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I64) {
189
+ /* cmp 4(TCG_REG_L0), addrhi */
190
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi,
191
+ TCG_REG_L0, cmp_ofs + 4);
192
+
193
+ /* jne slow_path */
194
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
195
+ ldst->label_ptr[1] = s->code_ptr;
196
+ s->code_ptr += 4;
197
+ }
198
+
199
+ /* TLB Hit. */
200
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
201
+ offsetof(CPUTLBEntry, addend));
202
+ } else if (a_mask) {
203
ldst = new_ldst_label(s);
204
205
ldst->is_ld = is_ld;
206
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
207
ldst->label_ptr[0] = s->code_ptr;
208
s->code_ptr += 4;
209
}
153
}
210
-#endif
154
211
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
212
return ldst;
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
213
}
157
ctx->s_mask = s_mask;
214
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
158
215
tcg_out_push(s, tcg_target_callee_save_regs[i]);
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
216
}
162
}
217
163
218
-#if TCG_TARGET_REG_BITS == 32
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
219
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
220
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
166
221
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
222
- /* jmp *tb. */
168
- ctx->s_mask = smask_from_smask(s_mask);
223
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
169
224
- (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
170
return fold_masks(ctx, op);
225
- + stack_addend);
226
-#else
227
-# if !defined(CONFIG_SOFTMMU)
228
- if (guest_base) {
229
+ if (!tcg_use_softmmu && guest_base) {
230
int seg = setup_guest_base_seg();
231
if (seg != 0) {
232
x86_guest_base.seg = seg;
233
} else if (guest_base == (int32_t)guest_base) {
234
x86_guest_base.ofs = guest_base;
235
} else {
236
+ assert(TCG_TARGET_REG_BITS == 64);
237
/* Choose R12 because, as a base, it requires a SIB byte. */
238
x86_guest_base.index = TCG_REG_R12;
239
tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
240
tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
241
}
242
}
171
}
243
-# endif
244
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
245
- tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
246
- /* jmp *tb. */
247
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
248
-#endif
249
+
250
+ if (TCG_TARGET_REG_BITS == 32) {
251
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
252
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
253
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
254
+ /* jmp *tb. */
255
+ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
256
+ (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
257
+ + stack_addend);
258
+ } else {
259
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
260
+ tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
261
+ /* jmp *tb. */
262
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
263
+ }
264
265
/*
266
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
267
--
172
--
268
2.34.1
173
2.43.0
269
270
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
---
3
target/i386/tcg/translate.c | 63 +++++++++++++++++--------------------
4
tcg/optimize.c | 9 +++++----
4
1 file changed, 29 insertions(+), 34 deletions(-)
5
1 file changed, 5 insertions(+), 4 deletions(-)
5
6
6
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
8
--- a/target/i386/tcg/translate.c
9
--- a/tcg/optimize.c
9
+++ b/target/i386/tcg/translate.c
10
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static inline void gen_stq_env_A0(DisasContext *s, int offset)
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
11
12
remove_mem_copy_all(ctx);
12
static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
13
}
14
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
13
{
17
{
14
+ MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
15
+ ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
19
int i, nb_oargs;
16
+ MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
17
int mem_index = s->mem_index;
21
ts_info(ts)->z_mask = ctx->z_mask;
18
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
22
}
19
- MO_LEUQ | (align ? MO_ALIGN_16 : 0));
23
}
20
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
24
+ return true;
21
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
22
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
23
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
24
+ TCGv_i128 t = tcg_temp_new_i128();
25
+
26
+ tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
27
+ tcg_gen_st_i128(t, tcg_env, offset);
28
}
25
}
29
26
30
static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
27
/*
31
{
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
32
+ MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
29
fold_xi_to_x(ctx, op, 0)) {
33
+ ? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
30
return true;
34
+ MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
31
}
35
int mem_index = s->mem_index;
32
- return false;
36
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(0)));
33
+ return finish_folding(ctx, op);
37
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
38
- MO_LEUQ | (align ? MO_ALIGN_16 : 0));
39
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
40
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(XMMReg, XMM_Q(1)));
41
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
42
+ TCGv_i128 t = tcg_temp_new_i128();
43
+
44
+ tcg_gen_ld_i128(t, tcg_env, offset);
45
+ tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
46
}
34
}
47
35
48
static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
36
/* We cannot as yet do_constant_folding with vectors. */
49
{
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
50
+ MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
38
fold_xi_to_x(ctx, op, 0)) {
51
int mem_index = s->mem_index;
39
return true;
52
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index,
40
}
53
- MO_LEUQ | (align ? MO_ALIGN_32 : 0));
41
- return false;
54
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
42
+ return finish_folding(ctx, op);
55
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
56
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
57
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
58
+ TCGv_i128 t0 = tcg_temp_new_i128();
59
+ TCGv_i128 t1 = tcg_temp_new_i128();
60
61
+ tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
62
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
63
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
64
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
65
- tcg_gen_addi_tl(s->tmp0, s->A0, 24);
66
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
67
- tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
68
+ tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
69
+
70
+ tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
71
+ tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
72
}
43
}
73
44
74
static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
75
{
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
76
+ MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
47
op->args[4] = arg_new_constant(ctx, bl);
77
int mem_index = s->mem_index;
48
op->args[5] = arg_new_constant(ctx, bh);
78
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(0)));
49
}
79
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index,
50
- return false;
80
- MO_LEUQ | (align ? MO_ALIGN_32 : 0));
51
+ return finish_folding(ctx, op);
81
- tcg_gen_addi_tl(s->tmp0, s->A0, 8);
82
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(1)));
83
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
84
+ TCGv_i128 t = tcg_temp_new_i128();
85
+
86
+ tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
87
+ tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
88
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
89
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(2)));
90
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
91
- tcg_gen_addi_tl(s->tmp0, s->A0, 24);
92
- tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset + offsetof(YMMReg, YMM_Q(3)));
93
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
94
+ tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
95
+ tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
96
}
52
}
97
53
98
#include "decode-new.h"
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
99
--
55
--
100
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
The prefixed instruction has a pc-relative form to use here.
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
4
---
5
tcg/ppc/tcg-target.c.inc | 24 ++++++++++++++++++++++++
5
tcg/optimize.c | 20 +++++++++++++++++---
6
1 file changed, 24 insertions(+)
6
1 file changed, 17 insertions(+), 3 deletions(-)
7
7
8
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.c.inc
10
--- a/tcg/optimize.c
11
+++ b/tcg/ppc/tcg-target.c.inc
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
13
#define ALL_GENERAL_REGS 0xffffffffu
13
return ts_info(arg_temp(arg));
14
#define ALL_VECTOR_REGS 0xffffffff00000000ull
15
16
+#ifndef R_PPC64_PCREL34
17
+#define R_PPC64_PCREL34 132
18
+#endif
19
+
20
#define have_isel (cpuinfo & CPUINFO_ISEL)
21
22
#ifndef CONFIG_SOFTMMU
23
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
24
return false;
25
}
14
}
26
15
27
+static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
16
+static inline bool ti_is_const(TempOptInfo *ti)
28
+{
17
+{
29
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
18
+ return ti->is_const;
30
+ ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
31
+
32
+ if (disp == sextract64(disp, 0, 34)) {
33
+ src_rw[0] = (src_rw[0] & ~0x3ffff) | ((disp >> 16) & 0x3ffff);
34
+ src_rw[1] = (src_rw[1] & ~0xffff) | (disp & 0xffff);
35
+ return true;
36
+ }
37
+ return false;
38
+}
19
+}
39
+
20
+
40
/* test if a constant matches the constraint */
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
41
static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
42
{
32
{
43
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
33
- return ts_info(ts)->is_const;
44
return reloc_pc14(code_ptr, target);
34
+ return ti_is_const(ts_info(ts));
45
case R_PPC_REL24:
35
}
46
return reloc_pc24(code_ptr, target);
36
47
+ case R_PPC64_PCREL34:
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
48
+ return reloc_pc34(code_ptr, target);
38
{
49
case R_PPC_ADDR16:
39
- TempOptInfo *ti = ts_info(ts);
50
/*
40
- return ti->is_const && ti->val == val;
51
* We are (slightly) abusing this relocation type. In particular,
41
+ return ti_is_const_val(ts_info(ts), val);
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
42
}
53
tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
43
54
return;
44
static inline bool arg_is_const(TCGArg arg)
55
}
56
+ if (have_isa_3_10) {
57
+ tcg_out_8ls_d(s, PLD, ret, 0, 0, 1);
58
+ new_pool_label(s, arg, R_PPC64_PCREL34, s->code_ptr - 2, 0);
59
+ return;
60
+ }
61
if (have_isa_3_00) {
62
tcg_out_addpcis(s, TCG_REG_TMP2, 0);
63
new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
64
--
45
--
65
2.34.1
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
It saves one insn to load the address of TB+4 instead of TB.
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Adjust all of the indexing to match.
2
Avoid the use of the OptContext slots.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/ppc/tcg-target.c.inc | 15 ++++++++++-----
7
tcg/optimize.c | 13 ++++++++++---
7
1 file changed, 10 insertions(+), 5 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
8
9
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/tcg/optimize.c
12
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_callee_save_regs[] = {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
14
TCG_REG_R31
15
return true;
15
};
16
}
16
17
17
+/* For PPC, we use TB+4 instead of TB as the base. */
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
18
+static inline ptrdiff_t ppc_tbrel_diff(TCGContext *s, const void *target)
19
+{
19
+{
20
+ return tcg_tbrel_diff(s, target) - 4;
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
21
+}
22
+
22
+
23
static inline bool in_range_b(tcg_target_long target)
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
24
{
25
return target == sextract64(target, 0, 26);
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
27
}
34
}
28
35
29
/* Load addresses within the TB with one insn. */
36
switch (ctx->type) {
30
- tb_diff = tcg_tbrel_diff(s, (void *)arg);
37
case TCG_TYPE_I32:
31
+ tb_diff = ppc_tbrel_diff(s, (void *)arg);
38
- ctx->z_mask = 32 | 31;
32
if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
39
+ z_mask = 32 | 31;
33
tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
40
break;
34
return;
41
case TCG_TYPE_I64:
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
42
- ctx->z_mask = 64 | 63;
36
/* Use the constant pool, if possible. */
43
+ z_mask = 64 | 63;
37
if (!in_prologue && USE_REG_TB) {
44
break;
38
new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
45
default:
39
- tcg_tbrel_diff(s, NULL));
46
g_assert_not_reached();
40
+ ppc_tbrel_diff(s, NULL));
41
tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
42
return;
43
}
47
}
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
48
- return false;
45
*/
49
+ return fold_masks_z(ctx, op, z_mask);
46
if (USE_REG_TB) {
47
rel = R_PPC_ADDR16;
48
- add = tcg_tbrel_diff(s, NULL);
49
+ add = ppc_tbrel_diff(s, NULL);
50
} else {
51
rel = R_PPC_ADDR32;
52
add = 0;
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tb_start(TCGContext *s)
54
/* bcl 20,31,$+4 (preferred form for getting nia) */
55
tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
56
tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
57
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, -4));
58
}
59
}
50
}
60
51
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
62
63
/* When branch is out of range, fall through to indirect. */
64
if (USE_REG_TB) {
65
- ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
66
+ ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
67
tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
68
} else {
69
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
70
--
53
--
71
2.34.1
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
1
When the offset is out of range of the non-prefixed insn, but
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
fits the 34-bit immediate of the prefixed insn, use that.
2
Avoid the use of the OptContext slots.
3
3
4
Reviewed-by: Jordan Niethe <jniethe5@gmail.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/ppc/tcg-target.c.inc | 66 ++++++++++++++++++++++++++++++++++++++++
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 66 insertions(+)
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
9
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
#define STDX XO31(149)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
#define STQ XO62( 2)
17
18
+#define PLWA OPCD( 41)
19
+#define PLD OPCD( 57)
20
+#define PLXSD OPCD( 42)
21
+#define PLXV OPCD(25 * 2 + 1) /* force tx=1 */
22
+
23
+#define PSTD OPCD( 61)
24
+#define PSTXSD OPCD( 46)
25
+#define PSTXV OPCD(27 * 2 + 1) /* force sx=1 */
26
+
27
#define ADDIC OPCD( 12)
28
#define ADDI OPCD( 14)
29
#define ADDIS OPCD( 15)
30
@@ -XXX,XX +XXX,XX @@ static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target)
31
return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
32
}
16
}
33
17
34
+/* Output Type 00 Prefix - 8-Byte Load/Store Form (8LS:D) */
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
35
+static void tcg_out_8ls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
36
+ unsigned ra, tcg_target_long imm, bool r)
37
+{
19
+{
38
+ tcg_insn_unit p, i;
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
39
+
40
+ p = OPCD(1) | (r << 20) | ((imm >> 16) & 0x3ffff);
41
+ i = opc | TAI(rt, ra, imm);
42
+
43
+ tcg_out_prefix_align(s);
44
+ tcg_out32(s, p);
45
+ tcg_out32(s, i);
46
+}
21
+}
47
+
22
+
48
/* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
49
static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
24
{
50
unsigned ra, tcg_target_long imm, bool r)
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
52
break;
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
53
}
36
}
54
37
55
+ /* For unaligned or large offsets, use the prefixed form. */
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
56
+ if (have_isa_3_10
39
- & arg_info(op->args[2])->s_mask;
57
+ && (offset != (int16_t)offset || (offset & align))
40
- return false;
58
+ && offset == sextract64(offset, 0, 34)) {
41
+ s_mask = arg_info(op->args[1])->s_mask
59
+ /*
42
+ & arg_info(op->args[2])->s_mask;
60
+ * Note that the MLS:D insns retain their un-prefixed opcode,
43
+ return fold_masks_s(ctx, op, s_mask);
61
+ * while the 8LS:D insns use a different opcode space.
44
}
62
+ */
45
63
+ switch (opi) {
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
64
+ case LBZ:
65
+ case LHZ:
66
+ case LHA:
67
+ case LWZ:
68
+ case STB:
69
+ case STH:
70
+ case STW:
71
+ case ADDI:
72
+ tcg_out_mls_d(s, opi, rt, base, offset, 0);
73
+ return;
74
+ case LWA:
75
+ tcg_out_8ls_d(s, PLWA, rt, base, offset, 0);
76
+ return;
77
+ case LD:
78
+ tcg_out_8ls_d(s, PLD, rt, base, offset, 0);
79
+ return;
80
+ case STD:
81
+ tcg_out_8ls_d(s, PSTD, rt, base, offset, 0);
82
+ return;
83
+ case LXSD:
84
+ tcg_out_8ls_d(s, PLXSD, rt & 31, base, offset, 0);
85
+ return;
86
+ case STXSD:
87
+ tcg_out_8ls_d(s, PSTXSD, rt & 31, base, offset, 0);
88
+ return;
89
+ case LXV:
90
+ tcg_out_8ls_d(s, PLXV, rt & 31, base, offset, 0);
91
+ return;
92
+ case STXV:
93
+ tcg_out_8ls_d(s, PSTXV, rt & 31, base, offset, 0);
94
+ return;
95
+ }
96
+ }
97
+
98
/* For unaligned, or very large offsets, use the indexed form. */
99
if (offset & align || offset != (int32_t)offset || opi == 0) {
100
if (rs == base) {
101
--
47
--
102
2.34.1
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
---
3
tcg/ppc/tcg-target.c.inc | 6 +++---
4
tcg/optimize.c | 6 +++---
4
1 file changed, 3 insertions(+), 3 deletions(-)
5
1 file changed, 3 insertions(+), 3 deletions(-)
5
6
6
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/ppc/tcg-target.c.inc
9
--- a/tcg/optimize.c
9
+++ b/tcg/ppc/tcg-target.c.inc
10
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static inline bool in_range_b(tcg_target_long target)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
11
}
17
}
12
18
13
static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
14
-             const tcg_insn_unit *target)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
15
+ const tcg_insn_unit *target)
21
fold_xi_to_i(ctx, op, 0)) {
16
{
22
return true;
17
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
23
}
18
tcg_debug_assert(in_range_b(disp));
24
- return false;
19
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
25
+ return finish_folding(ctx, op);
20
}
26
}
21
27
22
static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
23
-             const tcg_insn_unit *target)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
24
+ const tcg_insn_unit *target)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
25
{
31
return true;
26
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
32
}
27
tcg_debug_assert(disp == (int16_t) disp);
33
- return false;
28
@@ -XXX,XX +XXX,XX @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
34
+ return finish_folding(ctx, op);
29
tcgv_vec_arg(t1), tcgv_vec_arg(t2));
35
}
30
vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
36
31
tcgv_vec_arg(v0), tcgv_vec_arg(t1));
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
32
-    break;
33
+ break;
34
35
case MO_32:
36
tcg_debug_assert(!have_isa_2_07);
37
--
38
--
38
2.34.1
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
1
We already register allocate through extended basic blocks,
1
Avoid the use of the OptContext slots.
2
optimize through extended basic blocks as well.
3
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/optimize.c | 8 +++++---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
int i, nb_oargs;
14
15
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
/*
16
{
17
- * For an opcode that ends a BB, reset all temp data.
17
+ uint64_t s_mask;
18
- * We do no cross-BB optimization.
18
+
19
+ * We only optimize extended basic blocks. If the opcode ends a BB
19
if (fold_const2(ctx, op) ||
20
+ * and is not a conditional branch, reset all temp data.
20
fold_xx_to_i(ctx, op, -1) ||
21
*/
21
fold_xi_to_x(ctx, op, -1) ||
22
if (def->flags & TCG_OPF_BB_END) {
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
23
return true;
24
ctx->prev_mb = NULL;
25
+ if (!(def->flags & TCG_OPF_COND_BRANCH)) {
26
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
27
+ }
28
return;
29
}
24
}
30
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
31
--
35
--
32
2.34.1
36
2.43.0
diff view generated by jsdifflib
1
With ISA v3.0, we can use ADDPCIS instead of BCL+MFLR to load NIA.
1
Avoid the use of the OptContext slots.
2
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
9
---
5
tcg/ppc/tcg-target.c.inc | 25 ++++++++++++++++++++++---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
6
1 file changed, 22 insertions(+), 3 deletions(-)
11
1 file changed, 21 insertions(+), 5 deletions(-)
7
12
8
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.c.inc
15
--- a/tcg/optimize.c
11
+++ b/tcg/ppc/tcg-target.c.inc
16
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
13
#define CRNAND XO19(225)
18
return fold_masks_s(ctx, op, s_mask);
14
#define CROR XO19(449)
15
#define CRNOR XO19( 33)
16
+#define ADDPCIS XO19( 2)
17
18
#define EXTSB XO31(954)
19
#define EXTSH XO31(922)
20
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
21
tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
22
}
19
}
23
20
24
+static void tcg_out_addpcis(TCGContext *s, TCGReg dst, intptr_t imm)
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
25
+{
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
26
+ uint32_t d0, d1, d2;
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
27
+
43
+
28
+ tcg_debug_assert((imm & 0xffff) == 0);
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
29
+ tcg_debug_assert(imm == (int32_t)imm);
30
+
31
+ d2 = extract32(imm, 16, 1);
32
+ d1 = extract32(imm, 17, 5);
33
+ d0 = extract32(imm, 22, 10);
34
+ tcg_out32(s, ADDPCIS | RT(dst) | (d1 << 16) | (d0 << 6) | d2);
35
+}
45
+}
36
+
46
+
37
static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
38
{
48
+{
39
TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tb_start(TCGContext *s)
50
+ ctx->prev_mb = NULL;
41
{
51
+ return finish_folding(ctx, op);
42
/* Load TCG_REG_TB. */
43
if (USE_REG_TB) {
44
- /* bcl 20,31,$+4 (preferred form for getting nia) */
45
- tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
46
- tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
47
+ if (have_isa_3_00) {
48
+ /* lnia REG_TB */
49
+ tcg_out_addpcis(s, TCG_REG_TB, 0);
50
+ } else {
51
+ /* bcl 20,31,$+4 (preferred form for getting nia) */
52
+ tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
53
+ tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
54
+ }
55
}
56
}
52
}
57
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
58
--
75
--
59
2.34.1
76
2.43.0
diff view generated by jsdifflib
New patch
1
Stores have no output operands, and so need no further work.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 11 +++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
20
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
--
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/loongarch64/tcg-target.c.inc | 126 +++++++++++++++----------------
7
tcg/optimize.c | 22 ++++++++++++++--------
5
1 file changed, 61 insertions(+), 65 deletions(-)
8
1 file changed, 14 insertions(+), 8 deletions(-)
6
9
7
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
return TCG_REG_A0 + slot;
15
return finish_folding(ctx, op);
13
}
16
}
14
17
15
-#ifndef CONFIG_SOFTMMU
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
16
-#define USE_GUEST_BASE (guest_base != 0)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
17
#define TCG_GUEST_BASE_REG TCG_REG_S1
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
18
-#endif
21
{
19
22
uint64_t a_zmask, b_val;
20
#define TCG_CT_CONST_ZERO 0x100
23
TCGCond cond;
21
#define TCG_CT_CONST_S12 0x200
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
22
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
25
op->opc = xor_opc;
23
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
26
op->args[2] = arg_new_constant(ctx, 1);
24
a_bits = h->aa.align;
27
}
25
28
- return false;
26
-#ifdef CONFIG_SOFTMMU
29
+ return -1;
27
- unsigned s_bits = opc & MO_SIZE;
30
}
28
- int mem_index = get_mmuidx(oi);
31
}
29
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
30
- int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
31
- int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
32
+ if (tcg_use_softmmu) {
33
+ unsigned s_bits = opc & MO_SIZE;
34
+ int mem_index = get_mmuidx(oi);
35
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
36
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
37
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
38
39
- ldst = new_ldst_label(s);
40
- ldst->is_ld = is_ld;
41
- ldst->oi = oi;
42
- ldst->addrlo_reg = addr_reg;
43
-
32
-
44
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
33
- return false;
45
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
34
+ return 0;
46
-
35
}
47
- tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
36
48
- s->page_bits - CPU_TLB_ENTRY_BITS);
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
49
- tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
50
- tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
51
-
40
}
52
- /* Load the tlb comparator and the addend. */
41
53
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
42
- if (fold_setcond_zmask(ctx, op, false)) {
54
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
43
+ i = fold_setcond_zmask(ctx, op, false);
55
- is_ld ? offsetof(CPUTLBEntry, addr_read)
44
+ if (i > 0) {
56
- : offsetof(CPUTLBEntry, addr_write));
45
return true;
57
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
46
}
58
- offsetof(CPUTLBEntry, addend));
47
- fold_setcond_tst_pow2(ctx, op, false);
59
-
48
+ if (i == 0) {
60
- /*
49
+ fold_setcond_tst_pow2(ctx, op, false);
61
- * For aligned accesses, we check the first byte and include the alignment
62
- * bits within the address. For unaligned access, we check that we don't
63
- * cross pages using the address of the last byte of the access.
64
- */
65
- if (a_bits < s_bits) {
66
- unsigned a_mask = (1u << a_bits) - 1;
67
- unsigned s_mask = (1u << s_bits) - 1;
68
- tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
69
- } else {
70
- tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
71
- }
72
- tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
73
- a_bits, s->page_bits - 1);
74
-
75
- /* Compare masked address with the TLB entry. */
76
- ldst->label_ptr[0] = s->code_ptr;
77
- tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
78
-
79
- h->index = TCG_REG_TMP2;
80
-#else
81
- if (a_bits) {
82
ldst = new_ldst_label(s);
83
-
84
ldst->is_ld = is_ld;
85
ldst->oi = oi;
86
ldst->addrlo_reg = addr_reg;
87
88
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
89
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
90
+
91
+ tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
92
+ s->page_bits - CPU_TLB_ENTRY_BITS);
93
+ tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
94
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
95
+
96
+ /* Load the tlb comparator and the addend. */
97
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
98
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
99
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
100
+ : offsetof(CPUTLBEntry, addr_write));
101
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
102
+ offsetof(CPUTLBEntry, addend));
103
+
104
/*
105
- * Without micro-architecture details, we don't know which of
106
- * bstrpick or andi is faster, so use bstrpick as it's not
107
- * constrained by imm field width. Not to say alignments >= 2^12
108
- * are going to happen any time soon.
109
+ * For aligned accesses, we check the first byte and include the
110
+ * alignment bits within the address. For unaligned access, we
111
+ * check that we don't cross pages using the address of the last
112
+ * byte of the access.
113
*/
114
- tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
115
+ if (a_bits < s_bits) {
116
+ unsigned a_mask = (1u << a_bits) - 1;
117
+ unsigned s_mask = (1u << s_bits) - 1;
118
+ tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
119
+ } else {
120
+ tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
121
+ }
122
+ tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
123
+ a_bits, s->page_bits - 1);
124
125
+ /* Compare masked address with the TLB entry. */
126
ldst->label_ptr[0] = s->code_ptr;
127
- tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
128
- }
129
+ tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
130
131
- h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
132
-#endif
133
+ h->index = TCG_REG_TMP2;
134
+ } else {
135
+ if (a_bits) {
136
+ ldst = new_ldst_label(s);
137
+
138
+ ldst->is_ld = is_ld;
139
+ ldst->oi = oi;
140
+ ldst->addrlo_reg = addr_reg;
141
+
142
+ /*
143
+ * Without micro-architecture details, we don't know which of
144
+ * bstrpick or andi is faster, so use bstrpick as it's not
145
+ * constrained by imm field width. Not to say alignments >= 2^12
146
+ * are going to happen any time soon.
147
+ */
148
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
149
+
150
+ ldst->label_ptr[0] = s->code_ptr;
151
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
152
+ }
153
+
154
+ h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
155
+ }
50
+ }
156
51
157
if (addr_type == TCG_TYPE_I32) {
52
ctx->z_mask = 1;
158
h->base = TCG_REG_TMP0;
53
return false;
159
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
160
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
161
}
56
}
162
57
163
-#if !defined(CONFIG_SOFTMMU)
58
- if (fold_setcond_zmask(ctx, op, true)) {
164
- if (USE_GUEST_BASE) {
59
+ i = fold_setcond_zmask(ctx, op, true);
165
+ if (!tcg_use_softmmu && guest_base) {
60
+ if (i > 0) {
166
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
61
return true;
167
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
168
}
62
}
169
-#endif
63
- fold_setcond_tst_pow2(ctx, op, true);
170
64
+ if (i == 0) {
171
/* Call generated code */
65
+ fold_setcond_tst_pow2(ctx, op, true);
172
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
173
--
70
--
174
2.34.1
71
2.43.0
175
176
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
1
Fixes: 92c041c59b ("tcg/riscv: Add the prologue generation and register the JIT")
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
tcg/riscv/tcg-target.c.inc | 6 ++++--
4
tcg/optimize.c | 2 +-
5
1 file changed, 4 insertions(+), 2 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
6
7
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/riscv/tcg-target.c.inc
9
--- a/tcg/optimize.c
10
+++ b/tcg/riscv/tcg-target.c.inc
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
12
}
17
}
13
18
14
#if !defined(CONFIG_SOFTMMU)
19
type = ctx->type;
15
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
16
- tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
17
+ if (guest_base) {
18
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
19
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
20
+ }
21
#endif
22
23
/* Call generated code */
24
--
20
--
25
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
6
---
3
tcg/ppc/tcg-target.c.inc | 11 +++++++++--
7
tcg/optimize.c | 18 ++++++++----------
4
1 file changed, 9 insertions(+), 2 deletions(-)
8
1 file changed, 8 insertions(+), 10 deletions(-)
5
9
6
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/tcg/optimize.c
9
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
11
static void tcg_out_goto_tb(TCGContext *s, int which)
15
return fold_masks_zs(ctx, op, -1, s_mask);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
12
{
29
{
13
uintptr_t ptr = get_jmp_target_addr(s, which);
30
+ uint64_t z_mask, s_mask;
14
+ int16_t lo;
31
+ TempOptInfo *t1, *t2;
15
32
+
16
/* Direct branch will be patched by tb_target_set_jmp_target. */
33
if (fold_const2_commutative(ctx, op) ||
17
set_jmp_insn_offset(s, which);
34
fold_xx_to_i(ctx, op, 0) ||
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
35
fold_xi_to_x(ctx, op, 0) ||
19
if (USE_REG_TB) {
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
20
ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
37
return true;
21
tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
22
+ } else if (have_isa_3_00) {
23
+ ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
24
+ lo = offset;
25
+ tcg_out_addpcis(s, TCG_REG_TMP1, offset - lo);
26
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
27
} else {
28
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
29
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
30
+ lo = ptr;
31
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - lo);
32
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, lo);
33
}
38
}
34
39
35
tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
36
--
53
--
37
2.34.1
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/s390x/tcg-target.c.inc | 161 ++++++++++++++++++-------------------
7
tcg/optimize.c | 4 ++--
5
1 file changed, 79 insertions(+), 82 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
6
9
7
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/s390x/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
12
/* A scratch register that may be be used throughout the backend. */
15
g_assert_not_reached();
13
#define TCG_TMP0 TCG_REG_R1
14
15
-#ifndef CONFIG_SOFTMMU
16
#define TCG_GUEST_BASE_REG TCG_REG_R13
17
-#endif
18
19
/* All of the following instructions are prefixed with their instruction
20
format, and are defined as 8- or 16-bit quantities, even when the two
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
22
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
23
a_mask = (1 << h->aa.align) - 1;
24
25
-#ifdef CONFIG_SOFTMMU
26
- unsigned s_mask = (1 << s_bits) - 1;
27
- int mem_index = get_mmuidx(oi);
28
- int fast_off = tlb_mask_table_ofs(s, mem_index);
29
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
30
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
31
- int ofs, a_off;
32
- uint64_t tlb_mask;
33
+ if (tcg_use_softmmu) {
34
+ unsigned s_mask = (1 << s_bits) - 1;
35
+ int mem_index = get_mmuidx(oi);
36
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
37
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
38
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
39
+ int ofs, a_off;
40
+ uint64_t tlb_mask;
41
42
- ldst = new_ldst_label(s);
43
- ldst->is_ld = is_ld;
44
- ldst->oi = oi;
45
- ldst->addrlo_reg = addr_reg;
46
-
47
- tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
48
- s->page_bits - CPU_TLB_ENTRY_BITS);
49
-
50
- tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
51
- tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
52
-
53
- /*
54
- * For aligned accesses, we check the first byte and include the alignment
55
- * bits within the address. For unaligned access, we check that we don't
56
- * cross pages using the address of the last byte of the access.
57
- */
58
- a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
59
- tlb_mask = (uint64_t)s->page_mask | a_mask;
60
- if (a_off == 0) {
61
- tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
62
- } else {
63
- tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
64
- tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
65
- }
66
-
67
- if (is_ld) {
68
- ofs = offsetof(CPUTLBEntry, addr_read);
69
- } else {
70
- ofs = offsetof(CPUTLBEntry, addr_write);
71
- }
72
- if (addr_type == TCG_TYPE_I32) {
73
- ofs += HOST_BIG_ENDIAN * 4;
74
- tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
75
- } else {
76
- tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
77
- }
78
-
79
- tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
80
- ldst->label_ptr[0] = s->code_ptr++;
81
-
82
- h->index = TCG_TMP0;
83
- tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
84
- offsetof(CPUTLBEntry, addend));
85
-
86
- if (addr_type == TCG_TYPE_I32) {
87
- tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
88
- h->base = TCG_REG_NONE;
89
- } else {
90
- h->base = addr_reg;
91
- }
92
- h->disp = 0;
93
-#else
94
- if (a_mask) {
95
ldst = new_ldst_label(s);
96
ldst->is_ld = is_ld;
97
ldst->oi = oi;
98
ldst->addrlo_reg = addr_reg;
99
100
- /* We are expecting a_bits to max out at 7, much lower than TMLL. */
101
- tcg_debug_assert(a_mask <= 0xffff);
102
- tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
103
+ tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
104
+ s->page_bits - CPU_TLB_ENTRY_BITS);
105
106
- tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
107
+ tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
108
+ tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
109
+
110
+ /*
111
+ * For aligned accesses, we check the first byte and include the
112
+ * alignment bits within the address. For unaligned access, we
113
+ * check that we don't cross pages using the address of the last
114
+ * byte of the access.
115
+ */
116
+ a_off = (a_mask >= s_mask ? 0 : s_mask - a_mask);
117
+ tlb_mask = (uint64_t)s->page_mask | a_mask;
118
+ if (a_off == 0) {
119
+ tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
120
+ } else {
121
+ tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
122
+ tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
123
+ }
124
+
125
+ if (is_ld) {
126
+ ofs = offsetof(CPUTLBEntry, addr_read);
127
+ } else {
128
+ ofs = offsetof(CPUTLBEntry, addr_write);
129
+ }
130
+ if (addr_type == TCG_TYPE_I32) {
131
+ ofs += HOST_BIG_ENDIAN * 4;
132
+ tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
133
+ } else {
134
+ tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
135
+ }
136
+
137
+ tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
138
ldst->label_ptr[0] = s->code_ptr++;
139
- }
140
141
- h->base = addr_reg;
142
- if (addr_type == TCG_TYPE_I32) {
143
- tcg_out_ext32u(s, TCG_TMP0, addr_reg);
144
- h->base = TCG_TMP0;
145
- }
146
- if (guest_base < 0x80000) {
147
- h->index = TCG_REG_NONE;
148
- h->disp = guest_base;
149
- } else {
150
- h->index = TCG_GUEST_BASE_REG;
151
+ h->index = TCG_TMP0;
152
+ tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
153
+ offsetof(CPUTLBEntry, addend));
154
+
155
+ if (addr_type == TCG_TYPE_I32) {
156
+ tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
157
+ h->base = TCG_REG_NONE;
158
+ } else {
159
+ h->base = addr_reg;
160
+ }
161
h->disp = 0;
162
+ } else {
163
+ if (a_mask) {
164
+ ldst = new_ldst_label(s);
165
+ ldst->is_ld = is_ld;
166
+ ldst->oi = oi;
167
+ ldst->addrlo_reg = addr_reg;
168
+
169
+ /* We are expecting a_bits to max out at 7, much lower than TMLL. */
170
+ tcg_debug_assert(a_mask <= 0xffff);
171
+ tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
172
+
173
+ tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
174
+ ldst->label_ptr[0] = s->code_ptr++;
175
+ }
176
+
177
+ h->base = addr_reg;
178
+ if (addr_type == TCG_TYPE_I32) {
179
+ tcg_out_ext32u(s, TCG_TMP0, addr_reg);
180
+ h->base = TCG_TMP0;
181
+ }
182
+ if (guest_base < 0x80000) {
183
+ h->index = TCG_REG_NONE;
184
+ h->disp = guest_base;
185
+ } else {
186
+ h->index = TCG_GUEST_BASE_REG;
187
+ h->disp = 0;
188
+ }
189
}
16
}
190
-#endif
17
191
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
192
return ldst;
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
193
}
20
return true;
194
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
195
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
196
CPU_TEMP_BUF_NLONGS * sizeof(long));
197
198
-#ifndef CONFIG_SOFTMMU
199
- if (guest_base >= 0x80000) {
200
+ if (!tcg_use_softmmu && guest_base >= 0x80000) {
201
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
202
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
203
}
21
}
204
-#endif
22
205
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
206
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
207
31
208
--
32
--
209
2.34.1
33
2.43.0
210
211
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/mips/tcg-target.c.inc | 231 +++++++++++++++++++-------------------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
5
1 file changed, 113 insertions(+), 118 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
6
9
7
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/mips/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/mips/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
12
#define TCG_TMP2 TCG_REG_T8
15
* 3) those that produce information about the result value.
13
#define TCG_TMP3 TCG_REG_T7
16
*/
14
17
15
-#ifndef CONFIG_SOFTMMU
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
16
#define TCG_GUEST_BASE_REG TCG_REG_S7
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
17
-#endif
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
18
#if TCG_TARGET_REG_BITS == 64
21
+
19
#define TCG_REG_TB TCG_REG_S6
22
static bool fold_add(OptContext *ctx, TCGOp *op)
20
#else
23
{
21
-#define TCG_REG_TB (qemu_build_not_reached(), TCG_REG_ZERO)
24
if (fold_const2_commutative(ctx, op) ||
22
+#define TCG_REG_TB ({ qemu_build_not_reached(); TCG_REG_ZERO; })
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
23
#endif
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
24
27
}
25
/* check if we really need so many registers :P */
28
26
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
27
a_bits = h->aa.align;
30
+{
28
a_mask = (1 << a_bits) - 1;
31
+ /* If true and false values are the same, eliminate the cmp. */
29
32
+ if (args_are_copies(op->args[2], op->args[3])) {
30
-#ifdef CONFIG_SOFTMMU
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
31
- unsigned s_mask = (1 << s_bits) - 1;
34
+ }
32
- int mem_index = get_mmuidx(oi);
35
+
33
- int fast_off = tlb_mask_table_ofs(s, mem_index);
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
34
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
37
+ uint64_t tv = arg_info(op->args[2])->val;
35
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
38
+ uint64_t fv = arg_info(op->args[3])->val;
36
- int add_off = offsetof(CPUTLBEntry, addend);
39
+
37
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
40
+ if (tv == -1 && fv == 0) {
38
- : offsetof(CPUTLBEntry, addr_write);
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
39
+ if (tcg_use_softmmu) {
42
+ }
40
+ unsigned s_mask = (1 << s_bits) - 1;
43
+ if (tv == 0 && fv == -1) {
41
+ int mem_index = get_mmuidx(oi);
44
+ if (TCG_TARGET_HAS_not_vec) {
42
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
45
+ op->opc = INDEX_op_not_vec;
43
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
46
+ return fold_not(ctx, op);
44
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
47
+ } else {
45
+ int add_off = offsetof(CPUTLBEntry, addend);
48
+ op->opc = INDEX_op_xor_vec;
46
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
49
+ op->args[2] = arg_new_constant(ctx, -1);
47
+ : offsetof(CPUTLBEntry, addr_write);
50
+ return fold_xor(ctx, op);
48
51
+ }
49
- ldst = new_ldst_label(s);
52
+ }
50
- ldst->is_ld = is_ld;
53
+ }
51
- ldst->oi = oi;
54
+ if (arg_is_const(op->args[2])) {
52
- ldst->addrlo_reg = addrlo;
55
+ uint64_t tv = arg_info(op->args[2])->val;
53
- ldst->addrhi_reg = addrhi;
56
+ if (tv == -1) {
54
-
57
+ op->opc = INDEX_op_or_vec;
55
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
58
+ op->args[2] = op->args[3];
56
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
59
+ return fold_or(ctx, op);
57
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
60
+ }
58
-
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
59
- /* Extract the TLB index from the address into TMP3. */
62
+ op->opc = INDEX_op_andc_vec;
60
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
63
+ op->args[2] = op->args[1];
61
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
64
+ op->args[1] = op->args[3];
62
- s->page_bits - CPU_TLB_ENTRY_BITS);
65
+ return fold_andc(ctx, op);
63
- } else {
66
+ }
64
- tcg_out_dsrl(s, TCG_TMP3, addrlo,
67
+ }
65
- s->page_bits - CPU_TLB_ENTRY_BITS);
68
+ if (arg_is_const(op->args[3])) {
66
- }
69
+ uint64_t fv = arg_info(op->args[3])->val;
67
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
70
+ if (fv == 0) {
68
-
71
+ op->opc = INDEX_op_and_vec;
69
- /* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
72
+ return fold_and(ctx, op);
70
- tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
73
+ }
71
-
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
72
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
75
+ op->opc = INDEX_op_orc_vec;
73
- /* Load the (low half) tlb comparator. */
76
+ op->args[2] = op->args[1];
74
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
77
+ op->args[1] = op->args[3];
75
- cmp_off + HOST_BIG_ENDIAN * 4);
78
+ return fold_orc(ctx, op);
76
- } else {
79
+ }
77
- tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
90
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
92
-{
93
- /* If true and false values are the same, eliminate the cmp. */
94
- if (args_are_copies(op->args[2], op->args[3])) {
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
78
- }
96
- }
79
-
97
-
80
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
81
- /* Load the tlb addend for the fast path. */
99
- uint64_t tv = arg_info(op->args[2])->val;
82
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
100
- uint64_t fv = arg_info(op->args[3])->val;
83
- }
84
-
101
-
85
- /*
102
- if (tv == -1 && fv == 0) {
86
- * Mask the page bits, keeping the alignment bits to compare against.
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
87
- * For unaligned accesses, compare against the end of the access to
88
- * verify that it does not cross a page boundary.
89
- */
90
- tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
91
- if (a_mask < s_mask) {
92
- if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
93
- tcg_out_opc_imm(s, OPC_ADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
94
- } else {
95
- tcg_out_opc_imm(s, OPC_DADDIU, TCG_TMP2, addrlo, s_mask - a_mask);
96
- }
104
- }
97
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
105
- if (tv == 0 && fv == -1) {
98
- } else {
106
- if (TCG_TARGET_HAS_not_vec) {
99
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
107
- op->opc = INDEX_op_not_vec;
100
- }
108
- return fold_not(ctx, op);
101
-
109
- } else {
102
- /* Zero extend a 32-bit guest address for a 64-bit host. */
110
- op->opc = INDEX_op_xor_vec;
103
- if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
111
- op->args[2] = arg_new_constant(ctx, -1);
104
- tcg_out_ext32u(s, TCG_TMP2, addrlo);
112
- return fold_xor(ctx, op);
105
- addrlo = TCG_TMP2;
113
- }
106
- }
107
-
108
- ldst->label_ptr[0] = s->code_ptr;
109
- tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
110
-
111
- /* Load and test the high half tlb comparator. */
112
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
113
- /* delay slot */
114
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
115
-
116
- /* Load the tlb addend for the fast path. */
117
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
118
-
119
- ldst->label_ptr[1] = s->code_ptr;
120
- tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
121
- }
122
-
123
- /* delay slot */
124
- base = TCG_TMP3;
125
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
126
-#else
127
- if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
128
ldst = new_ldst_label(s);
129
-
130
ldst->is_ld = is_ld;
131
ldst->oi = oi;
132
ldst->addrlo_reg = addrlo;
133
ldst->addrhi_reg = addrhi;
134
135
- /* We are expecting a_bits to max out at 7, much lower than ANDI. */
136
- tcg_debug_assert(a_bits < 16);
137
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
138
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
139
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
140
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
141
+
142
+ /* Extract the TLB index from the address into TMP3. */
143
+ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
144
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addrlo,
145
+ s->page_bits - CPU_TLB_ENTRY_BITS);
146
+ } else {
147
+ tcg_out_dsrl(s, TCG_TMP3, addrlo,
148
+ s->page_bits - CPU_TLB_ENTRY_BITS);
149
+ }
150
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0);
151
+
152
+ /* Add the tlb_table pointer, creating the CPUTLBEntry address. */
153
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
154
+
155
+ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
156
+ /* Load the (low half) tlb comparator. */
157
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
158
+ cmp_off + HOST_BIG_ENDIAN * 4);
159
+ } else {
160
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
161
+ }
162
+
163
+ if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
164
+ /* Load the tlb addend for the fast path. */
165
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
166
+ }
167
+
168
+ /*
169
+ * Mask the page bits, keeping the alignment bits to compare against.
170
+ * For unaligned accesses, compare against the end of the access to
171
+ * verify that it does not cross a page boundary.
172
+ */
173
+ tcg_out_movi(s, addr_type, TCG_TMP1, s->page_mask | a_mask);
174
+ if (a_mask < s_mask) {
175
+ tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32
176
+ || addr_type == TCG_TYPE_I32
177
+ ? OPC_ADDIU : OPC_DADDIU),
178
+ TCG_TMP2, addrlo, s_mask - a_mask);
179
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
180
+ } else {
181
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrlo);
182
+ }
183
+
184
+ /* Zero extend a 32-bit guest address for a 64-bit host. */
185
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
186
+ tcg_out_ext32u(s, TCG_TMP2, addrlo);
187
+ addrlo = TCG_TMP2;
188
+ }
189
190
ldst->label_ptr[0] = s->code_ptr;
191
- if (use_mips32r6_instructions) {
192
- tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
193
- } else {
194
- tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
195
- tcg_out_nop(s);
196
- }
114
- }
197
- }
115
- }
198
+ tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
116
- if (arg_is_const(op->args[2])) {
199
117
- uint64_t tv = arg_info(op->args[2])->val;
200
- base = addrlo;
118
- if (tv == -1) {
201
- if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
119
- op->opc = INDEX_op_or_vec;
202
- tcg_out_ext32u(s, TCG_REG_A0, base);
120
- op->args[2] = op->args[3];
203
- base = TCG_REG_A0;
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
204
- }
129
- }
205
- if (guest_base) {
130
- if (arg_is_const(op->args[3])) {
206
- if (guest_base == (int16_t)guest_base) {
131
- uint64_t fv = arg_info(op->args[3])->val;
207
- tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
132
- if (fv == 0) {
208
- } else {
133
- op->opc = INDEX_op_and_vec;
209
- tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
134
- return fold_and(ctx, op);
210
- TCG_GUEST_BASE_REG);
135
- }
211
+ /* Load and test the high half tlb comparator. */
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
212
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
137
- op->opc = INDEX_op_orc_vec;
213
+ /* delay slot */
138
- op->args[2] = op->args[1];
214
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
139
- op->args[1] = op->args[3];
215
+
140
- return fold_orc(ctx, op);
216
+ /* Load the tlb addend for the fast path. */
141
- }
217
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
142
- }
218
+
143
- return finish_folding(ctx, op);
219
+ ldst->label_ptr[1] = s->code_ptr;
144
-}
220
+ tcg_out_opc_br(s, OPC_BNE, addrhi, TCG_TMP0);
145
-
221
+ }
146
/* Propagate constants and copies, fold constant expressions. */
222
+
147
void tcg_optimize(TCGContext *s)
223
+ /* delay slot */
148
{
224
+ base = TCG_TMP3;
225
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addrlo);
226
+ } else {
227
+ if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) {
228
+ ldst = new_ldst_label(s);
229
+
230
+ ldst->is_ld = is_ld;
231
+ ldst->oi = oi;
232
+ ldst->addrlo_reg = addrlo;
233
+ ldst->addrhi_reg = addrhi;
234
+
235
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
236
+ tcg_debug_assert(a_bits < 16);
237
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
238
+
239
+ ldst->label_ptr[0] = s->code_ptr;
240
+ if (use_mips32r6_instructions) {
241
+ tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
242
+ } else {
243
+ tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
244
+ tcg_out_nop(s);
245
+ }
246
+ }
247
+
248
+ base = addrlo;
249
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
250
+ tcg_out_ext32u(s, TCG_REG_A0, base);
251
+ base = TCG_REG_A0;
252
+ }
253
+ if (guest_base) {
254
+ if (guest_base == (int16_t)guest_base) {
255
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
256
+ } else {
257
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
258
+ TCG_GUEST_BASE_REG);
259
+ }
260
+ base = TCG_REG_A0;
261
}
262
- base = TCG_REG_A0;
263
}
264
-#endif
265
266
h->base = base;
267
return ldst;
268
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
269
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
270
}
271
272
-#ifndef CONFIG_SOFTMMU
273
- if (guest_base != (int16_t)guest_base) {
274
+ if (!tcg_use_softmmu && guest_base != (int16_t)guest_base) {
275
/*
276
* The function call abi for n32 and n64 will have loaded $25 (t9)
277
* with the address of the prologue, so we can use that instead
278
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
279
TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0);
280
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
281
}
282
-#endif
283
284
if (TCG_TARGET_REG_BITS == 64) {
285
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
286
--
149
--
287
2.34.1
150
2.43.0
288
289
diff view generated by jsdifflib
1
PADDI can load 34-bit immediates and 34-bit pc-relative addresses.
1
The big comment just above says functions should be sorted.
2
2
3
Reviewed-by: Jordan Niethe <jniethe5@gmail.com>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/ppc/tcg-target.c.inc | 51 ++++++++++++++++++++++++++++++++++++++++
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
7
1 file changed, 51 insertions(+)
7
1 file changed, 30 insertions(+), 30 deletions(-)
8
8
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/ppc/tcg-target.c.inc
11
--- a/tcg/optimize.c
12
+++ b/tcg/ppc/tcg-target.c.inc
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
14
return true;
14
return true;
15
}
15
}
16
16
17
+/* Ensure that the prefixed instruction does not cross a 64-byte boundary. */
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
18
+static bool tcg_out_need_prefix_align(TCGContext *s)
19
+{
18
+{
20
+ return ((uintptr_t)s->code_ptr & 0x3f) == 0x3c;
19
+ /* Canonicalize the comparison to put immediate second. */
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
21
+}
24
+}
22
+
25
+
23
+static void tcg_out_prefix_align(TCGContext *s)
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
24
+{
27
+{
25
+ if (tcg_out_need_prefix_align(s)) {
28
+ /* If true and false values are the same, eliminate the cmp. */
26
+ tcg_out32(s, NOP);
29
+ if (args_are_copies(op->args[3], op->args[4])) {
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
27
+ }
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
28
+}
45
+}
29
+
46
+
30
+static ptrdiff_t tcg_pcrel_diff_for_prefix(TCGContext *s, const void *target)
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
31
+{
48
{
32
+ return tcg_pcrel_diff(s, target) - (tcg_out_need_prefix_align(s) ? 4 : 0);
49
uint64_t z_mask, s_mask;
33
+}
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
34
+
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
35
+/* Output Type 10 Prefix - Modified Load/Store Form (MLS:D) */
52
}
36
+static void tcg_out_mls_d(TCGContext *s, tcg_insn_unit opc, unsigned rt,
53
37
+ unsigned ra, tcg_target_long imm, bool r)
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
38
+{
55
-{
39
+ tcg_insn_unit p, i;
56
- /* Canonicalize the comparison to put immediate second. */
40
+
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
41
+ p = OPCD(1) | (2 << 24) | (r << 20) | ((imm >> 16) & 0x3ffff);
58
- op->args[3] = tcg_swap_cond(op->args[3]);
42
+ i = opc | TAI(rt, ra, imm);
59
- }
43
+
60
- return finish_folding(ctx, op);
44
+ tcg_out_prefix_align(s);
61
-}
45
+ tcg_out32(s, p);
62
-
46
+ tcg_out32(s, i);
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
47
+}
64
-{
48
+
65
- /* If true and false values are the same, eliminate the cmp. */
49
static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
66
- if (args_are_copies(op->args[3], op->args[4])) {
50
TCGReg base, tcg_target_long offset);
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
51
68
- }
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
69
-
53
return;
70
- /* Canonicalize the comparison to put immediate second. */
54
}
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
55
72
- op->args[5] = tcg_swap_cond(op->args[5]);
56
+ /*
73
- }
57
+ * Load values up to 34 bits, and pc-relative addresses,
74
- /*
58
+ * with one prefixed insn.
75
- * Canonicalize the "false" input reg to match the destination,
59
+ */
76
- * so that the tcg backend can implement "move if true".
60
+ if (have_isa_3_10) {
77
- */
61
+ if (arg == sextract64(arg, 0, 34)) {
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
62
+ /* pli ret,value = paddi ret,0,value,0 */
79
- op->args[5] = tcg_invert_cond(op->args[5]);
63
+ tcg_out_mls_d(s, ADDI, ret, 0, arg, 0);
80
- }
64
+ return;
81
- return finish_folding(ctx, op);
65
+ }
82
-}
66
+
83
-
67
+ tmp = tcg_pcrel_diff_for_prefix(s, (void *)arg);
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
68
+ if (tmp == sextract64(tmp, 0, 34)) {
85
{
69
+ /* pla ret,value = paddi ret,0,value,1 */
86
uint64_t z_mask, s_mask, s_mask_old;
70
+ tcg_out_mls_d(s, ADDI, ret, 0, tmp, 1);
71
+ return;
72
+ }
73
+ }
74
+
75
/* Load 32-bit immediates with two insns. Note that we've already
76
eliminated bare ADDIS, so we know both insns are required. */
77
if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
78
--
87
--
79
2.34.1
88
2.43.0
diff view generated by jsdifflib
1
Fix TCG_GUEST_BASE_REG to use 'TCG_REG_R30' instead of '30'.
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/ppc/tcg-target.c.inc | 284 ++++++++++++++++++++-------------------
7
include/fpu/softfloat.h | 6 ++++
7
1 file changed, 143 insertions(+), 141 deletions(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
8
9
fpu/softfloat-parts.c.inc | 7 +++--
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/ppc/tcg-target.c.inc
14
--- a/include/fpu/softfloat.h
12
+++ b/tcg/ppc/tcg-target.c.inc
15
+++ b/include/fpu/softfloat.h
13
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
14
17
float16 float16_sub(float16, float16, float_status *status);
15
#define have_isel (cpuinfo & CPUINFO_ISEL)
18
float16 float16_mul(float16, float16, float_status *status);
16
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
17
-#ifndef CONFIG_SOFTMMU
20
+float16 float16_muladd_scalbn(float16, float16, float16,
18
-#define TCG_GUEST_BASE_REG 30
21
+ int, int, float_status *status);
19
-#endif
22
float16 float16_div(float16, float16, float_status *status);
20
+#define TCG_GUEST_BASE_REG TCG_REG_R30
23
float16 float16_scalbn(float16, int, float_status *status);
21
24
float16 float16_min(float16, float16, float_status *status);
22
#ifdef CONFIG_DEBUG_TCG
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
23
static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
26
float32 float32_div(float32, float32, float_status *status);
24
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
27
float32 float32_rem(float32, float32, float_status *status);
25
s_bits == MO_128);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
26
a_bits = h->aa.align;
29
+float32 float32_muladd_scalbn(float32, float32, float32,
27
30
+ int, int, float_status *status);
28
-#ifdef CONFIG_SOFTMMU
31
float32 float32_sqrt(float32, float_status *status);
29
- int mem_index = get_mmuidx(oi);
32
float32 float32_exp2(float32, float_status *status);
30
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
33
float32 float32_log2(float32, float_status *status);
31
- : offsetof(CPUTLBEntry, addr_write);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
32
- int fast_off = tlb_mask_table_ofs(s, mem_index);
35
float64 float64_div(float64, float64, float_status *status);
33
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
36
float64 float64_rem(float64, float64, float_status *status);
34
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
35
+ if (tcg_use_softmmu) {
38
+float64 float64_muladd_scalbn(float64, float64, float64,
36
+ int mem_index = get_mmuidx(oi);
39
+ int, int, float_status *status);
37
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
40
float64 float64_sqrt(float64, float_status *status);
38
+ : offsetof(CPUTLBEntry, addr_write);
41
float64 float64_log2(float64, float_status *status);
39
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
42
FloatRelation float64_compare(float64, float64, float_status *status);
40
+ int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
41
+ int table_off = fast_off + offsetof(CPUTLBDescFast, table);
44
index XXXXXXX..XXXXXXX 100644
42
45
--- a/fpu/softfloat.c
43
- ldst = new_ldst_label(s);
46
+++ b/fpu/softfloat.c
44
- ldst->is_ld = is_ld;
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
45
- ldst->oi = oi;
48
#define parts_mul(A, B, S) \
46
- ldst->addrlo_reg = addrlo;
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
47
- ldst->addrhi_reg = addrhi;
50
48
-
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
49
- /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
52
- FloatParts64 *c, int flags,
50
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
53
- float_status *s);
51
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
52
-
55
- FloatParts128 *c, int flags,
53
- /* Extract the page index, shifted into place for tlb index. */
56
- float_status *s);
54
- if (TCG_TARGET_REG_BITS == 32) {
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
55
- tcg_out_shri32(s, TCG_REG_R0, addrlo,
58
+ FloatParts64 *c, int scale,
56
- s->page_bits - CPU_TLB_ENTRY_BITS);
59
+ int flags, float_status *s);
57
- } else {
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
58
- tcg_out_shri64(s, TCG_REG_R0, addrlo,
61
+ FloatParts128 *c, int scale,
59
- s->page_bits - CPU_TLB_ENTRY_BITS);
62
+ int flags, float_status *s);
60
- }
63
61
- tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
64
-#define parts_muladd(A, B, C, Z, S) \
62
-
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
63
- /*
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
64
- * Load the (low part) TLB comparator into TMP2.
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
65
- * For 64-bit host, always load the entire 64-bit slot for simplicity.
68
66
- * We will ignore the high bits with tcg_out_cmp(..., addr_type).
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
67
- */
70
float_status *s);
68
- if (TCG_TARGET_REG_BITS == 64) {
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
69
- if (cmp_off == 0) {
72
* Fused multiply-add
70
- tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
73
*/
71
- } else {
74
72
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
73
- tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
76
- int flags, float_status *status)
74
- }
77
+float16 QEMU_FLATTEN
75
- } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
76
- tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
79
+ int scale, int flags, float_status *status)
77
- } else {
80
{
78
- tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
81
FloatParts64 pa, pb, pc, *pr;
79
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
82
80
- cmp_off + 4 * HOST_BIG_ENDIAN);
83
float16_unpack_canonical(&pa, a, status);
81
- }
84
float16_unpack_canonical(&pb, b, status);
82
-
85
float16_unpack_canonical(&pc, c, status);
83
- /*
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
84
- * Load the TLB addend for use on the fast path.
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
85
- * Do this asap to minimize any load use delay.
88
86
- */
89
return float16_round_pack_canonical(pr, status);
87
- if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
90
}
88
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
91
89
- offsetof(CPUTLBEntry, addend));
92
-static float32 QEMU_SOFTFLOAT_ATTR
90
- }
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
91
-
94
- float_status *status)
92
- /* Clear the non-page, non-alignment bits from the address in R0. */
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
93
- if (TCG_TARGET_REG_BITS == 32) {
96
+ int flags, float_status *status)
94
- /*
97
+{
95
- * We don't support unaligned accesses on 32-bits.
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
96
- * Preserve the bottom bits and thus trigger a comparison
99
+}
97
- * failure on unaligned accesses.
98
- */
99
- if (a_bits < s_bits) {
100
- a_bits = s_bits;
101
- }
102
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
103
- (32 - a_bits) & 31, 31 - s->page_bits);
104
- } else {
105
- TCGReg t = addrlo;
106
-
107
- /*
108
- * If the access is unaligned, we need to make sure we fail if we
109
- * cross a page boundary. The trick is to add the access size-1
110
- * to the address before masking the low bits. That will make the
111
- * address overflow to the next page if we cross a page boundary,
112
- * which will then force a mismatch of the TLB compare.
113
- */
114
- if (a_bits < s_bits) {
115
- unsigned a_mask = (1 << a_bits) - 1;
116
- unsigned s_mask = (1 << s_bits) - 1;
117
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
118
- t = TCG_REG_R0;
119
- }
120
-
121
- /* Mask the address for the requested alignment. */
122
- if (addr_type == TCG_TYPE_I32) {
123
- tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
124
- (32 - a_bits) & 31, 31 - s->page_bits);
125
- } else if (a_bits == 0) {
126
- tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
127
- } else {
128
- tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
129
- 64 - s->page_bits, s->page_bits - a_bits);
130
- tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
131
- }
132
- }
133
-
134
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
135
- /* Low part comparison into cr7. */
136
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
137
- 0, 7, TCG_TYPE_I32);
138
-
139
- /* Load the high part TLB comparator into TMP2. */
140
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
141
- cmp_off + 4 * !HOST_BIG_ENDIAN);
142
-
143
- /* Load addend, deferred for this case. */
144
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
145
- offsetof(CPUTLBEntry, addend));
146
-
147
- /* High part comparison into cr6. */
148
- tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
149
-
150
- /* Combine comparisons into cr7. */
151
- tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
152
- } else {
153
- /* Full comparison into cr7. */
154
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, addr_type);
155
- }
156
-
157
- /* Load a pointer into the current opcode w/conditional branch-link. */
158
- ldst->label_ptr[0] = s->code_ptr;
159
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
160
-
161
- h->base = TCG_REG_TMP1;
162
-#else
163
- if (a_bits) {
164
ldst = new_ldst_label(s);
165
ldst->is_ld = is_ld;
166
ldst->oi = oi;
167
ldst->addrlo_reg = addrlo;
168
ldst->addrhi_reg = addrhi;
169
170
- /* We are expecting a_bits to max out at 7, much lower than ANDI. */
171
- tcg_debug_assert(a_bits < 16);
172
- tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
173
+ /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
174
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
175
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
176
177
+ /* Extract the page index, shifted into place for tlb index. */
178
+ if (TCG_TARGET_REG_BITS == 32) {
179
+ tcg_out_shri32(s, TCG_REG_R0, addrlo,
180
+ s->page_bits - CPU_TLB_ENTRY_BITS);
181
+ } else {
182
+ tcg_out_shri64(s, TCG_REG_R0, addrlo,
183
+ s->page_bits - CPU_TLB_ENTRY_BITS);
184
+ }
185
+ tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
186
+
100
+
187
+ /*
101
+float32 QEMU_SOFTFLOAT_ATTR
188
+ * Load the (low part) TLB comparator into TMP2.
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
189
+ * For 64-bit host, always load the entire 64-bit slot for simplicity.
103
+ int scale, int flags, float_status *status)
190
+ * We will ignore the high bits with tcg_out_cmp(..., addr_type).
104
{
191
+ */
105
FloatParts64 pa, pb, pc, *pr;
192
+ if (TCG_TARGET_REG_BITS == 64) {
106
193
+ if (cmp_off == 0) {
107
float32_unpack_canonical(&pa, a, status);
194
+ tcg_out32(s, LDUX | TAB(TCG_REG_TMP2,
108
float32_unpack_canonical(&pb, b, status);
195
+ TCG_REG_TMP1, TCG_REG_TMP2));
109
float32_unpack_canonical(&pc, c, status);
196
+ } else {
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
197
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1,
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
198
+ TCG_REG_TMP1, TCG_REG_TMP2));
112
199
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2,
113
return float32_round_pack_canonical(pr, status);
200
+ TCG_REG_TMP1, cmp_off);
114
}
201
+ }
115
202
+ } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
116
-static float64 QEMU_SOFTFLOAT_ATTR
203
+ tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2,
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
204
+ TCG_REG_TMP1, TCG_REG_TMP2));
118
- float_status *status)
205
+ } else {
119
+float64 QEMU_SOFTFLOAT_ATTR
206
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
207
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
121
+ int scale, int flags, float_status *status)
208
+ cmp_off + 4 * HOST_BIG_ENDIAN);
122
{
209
+ }
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
210
+
182
+
211
+ /*
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
212
+ * Load the TLB addend for use on the fast path.
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
213
+ * Do this asap to minimize any load use delay.
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
214
+ */
186
xnp = *parts_mul(&xnp, &xp, status);
215
+ if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
216
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
217
+ offsetof(CPUTLBEntry, addend));
218
+ }
219
+
220
+ /* Clear the non-page, non-alignment bits from the address in R0. */
221
+ if (TCG_TARGET_REG_BITS == 32) {
222
+ /*
223
+ * We don't support unaligned accesses on 32-bits.
224
+ * Preserve the bottom bits and thus trigger a comparison
225
+ * failure on unaligned accesses.
226
+ */
227
+ if (a_bits < s_bits) {
228
+ a_bits = s_bits;
229
+ }
230
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
231
+ (32 - a_bits) & 31, 31 - s->page_bits);
232
+ } else {
233
+ TCGReg t = addrlo;
234
+
235
+ /*
236
+ * If the access is unaligned, we need to make sure we fail if we
237
+ * cross a page boundary. The trick is to add the access size-1
238
+ * to the address before masking the low bits. That will make the
239
+ * address overflow to the next page if we cross a page boundary,
240
+ * which will then force a mismatch of the TLB compare.
241
+ */
242
+ if (a_bits < s_bits) {
243
+ unsigned a_mask = (1 << a_bits) - 1;
244
+ unsigned s_mask = (1 << s_bits) - 1;
245
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
246
+ t = TCG_REG_R0;
247
+ }
248
+
249
+ /* Mask the address for the requested alignment. */
250
+ if (addr_type == TCG_TYPE_I32) {
251
+ tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
252
+ (32 - a_bits) & 31, 31 - s->page_bits);
253
+ } else if (a_bits == 0) {
254
+ tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
255
+ } else {
256
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
257
+ 64 - s->page_bits, s->page_bits - a_bits);
258
+ tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
259
+ }
260
+ }
261
+
262
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
263
+ /* Low part comparison into cr7. */
264
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
265
+ 0, 7, TCG_TYPE_I32);
266
+
267
+ /* Load the high part TLB comparator into TMP2. */
268
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
269
+ cmp_off + 4 * !HOST_BIG_ENDIAN);
270
+
271
+ /* Load addend, deferred for this case. */
272
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
273
+ offsetof(CPUTLBEntry, addend));
274
+
275
+ /* High part comparison into cr6. */
276
+ tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
277
+ 0, 6, TCG_TYPE_I32);
278
+
279
+ /* Combine comparisons into cr7. */
280
+ tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
281
+ } else {
282
+ /* Full comparison into cr7. */
283
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
284
+ 0, 7, addr_type);
285
+ }
286
+
287
+ /* Load a pointer into the current opcode w/conditional branch-link. */
288
ldst->label_ptr[0] = s->code_ptr;
289
- tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
290
- }
291
+ tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
292
293
- h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
294
-#endif
295
+ h->base = TCG_REG_TMP1;
296
+ } else {
297
+ if (a_bits) {
298
+ ldst = new_ldst_label(s);
299
+ ldst->is_ld = is_ld;
300
+ ldst->oi = oi;
301
+ ldst->addrlo_reg = addrlo;
302
+ ldst->addrhi_reg = addrhi;
303
+
304
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
305
+ tcg_debug_assert(a_bits < 16);
306
+ tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
307
+
308
+ ldst->label_ptr[0] = s->code_ptr;
309
+ tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
310
+ }
311
+
312
+ h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
313
+ }
314
315
if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
316
/* Zero-extend the guest address for use in the host address. */
317
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
318
}
187
}
319
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
188
320
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
321
-#ifndef CONFIG_SOFTMMU
190
index XXXXXXX..XXXXXXX 100644
322
- if (guest_base) {
191
--- a/fpu/softfloat-parts.c.inc
323
+ if (!tcg_use_softmmu && guest_base) {
192
+++ b/fpu/softfloat-parts.c.inc
324
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
325
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
326
}
212
}
327
-#endif
213
+ a->exp += scale;
328
214
finish_sign:
329
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
215
if (flags & float_muladd_negate_result) {
330
tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
216
a->sign ^= 1;
331
--
217
--
332
2.34.1
218
2.43.0
333
219
334
220
diff view generated by jsdifflib
1
Begin disconnecting CONFIG_SOFTMMU from !CONFIG_USER_ONLY.
1
Use the scalbn interface instead of float_muladd_halve_result.
2
Introduce a variable which can be set at startup to select
3
one method or another for user-only.
4
2
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/tcg/tcg.h | 8 ++++++--
6
target/arm/tcg/helper-a64.c | 6 +++---
9
tcg/tcg-op-ldst.c | 14 +++++++-------
7
1 file changed, 3 insertions(+), 3 deletions(-)
10
tcg/tcg.c | 9 ++++++---
11
3 files changed, 19 insertions(+), 12 deletions(-)
12
8
13
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg.h
11
--- a/target/arm/tcg/helper-a64.c
16
+++ b/include/tcg/tcg.h
12
+++ b/target/arm/tcg/helper-a64.c
17
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
18
int nb_ops;
14
(float16_is_infinity(b) && float16_is_zero(a))) {
19
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
15
return float16_one_point_five;
20
16
}
21
-#ifdef CONFIG_SOFTMMU
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
22
int page_mask;
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
23
uint8_t page_bits;
24
uint8_t tlb_dyn_max_bits;
25
-#endif
26
uint8_t insn_start_words;
27
TCGBar guest_mo;
28
29
@@ -XXX,XX +XXX,XX @@ static inline bool temp_readonly(TCGTemp *ts)
30
return ts->kind >= TEMP_FIXED;
31
}
19
}
32
20
33
+#ifdef CONFIG_USER_ONLY
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
34
+extern bool tcg_use_softmmu;
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
35
+#else
23
(float32_is_infinity(b) && float32_is_zero(a))) {
36
+#define tcg_use_softmmu true
24
return float32_one_point_five;
37
+#endif
25
}
38
+
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
39
extern __thread TCGContext *tcg_ctx;
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
40
extern const void *tcg_code_gen_epilogue;
41
extern uintptr_t tcg_splitwx_diff;
42
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/tcg-op-ldst.c
45
+++ b/tcg/tcg-op-ldst.c
46
@@ -XXX,XX +XXX,XX @@
47
48
static void check_max_alignment(unsigned a_bits)
49
{
50
-#if defined(CONFIG_SOFTMMU)
51
/*
52
* The requested alignment cannot overlap the TLB flags.
53
* FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
54
*/
55
- tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
56
-#endif
57
+ if (tcg_use_softmmu) {
58
+ tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
59
+ }
60
}
28
}
61
29
62
static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
63
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
64
*/
32
(float64_is_infinity(b) && float64_is_zero(a))) {
65
static bool use_two_i64_for_i128(MemOp mop)
33
return float64_one_point_five;
66
{
67
-#ifdef CONFIG_SOFTMMU
68
/* Two softmmu tlb lookups is larger than one function call. */
69
- return false;
70
-#else
71
+ if (tcg_use_softmmu) {
72
+ return false;
73
+ }
74
+
75
/*
76
* For user-only, two 64-bit operations may well be smaller than a call.
77
* Determine if that would be legal for the requested atomicity.
78
@@ -XXX,XX +XXX,XX @@ static bool use_two_i64_for_i128(MemOp mop)
79
default:
80
g_assert_not_reached();
81
}
34
}
82
-#endif
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
83
}
37
}
84
38
85
static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
86
diff --git a/tcg/tcg.c b/tcg/tcg.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/tcg.c
89
+++ b/tcg/tcg.c
90
@@ -XXX,XX +XXX,XX @@ static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
91
MemOp host_atom, bool allow_two_ops)
92
__attribute__((unused));
93
94
+#ifdef CONFIG_USER_ONLY
95
+bool tcg_use_softmmu;
96
+#endif
97
+
98
TCGContext tcg_init_ctx;
99
__thread TCGContext *tcg_ctx;
100
101
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
102
return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
103
}
104
105
-#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
106
-static int tlb_mask_table_ofs(TCGContext *s, int which)
107
+static int __attribute__((unused))
108
+tlb_mask_table_ofs(TCGContext *s, int which)
109
{
110
return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
111
sizeof(CPUNegativeOffsetState));
112
}
113
-#endif
114
115
/* Signal overflow, starting over with fewer guest insns. */
116
static G_NORETURN
117
--
40
--
118
2.34.1
41
2.43.0
119
42
120
43
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
2
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/arm/tcg-target.c.inc | 203 +++++++++++++++++++--------------------
6
target/sparc/helper.h | 4 +-
5
1 file changed, 97 insertions(+), 106 deletions(-)
7
target/sparc/fop_helper.c | 8 ++--
6
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
7
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/arm/tcg-target.c.inc
13
--- a/target/sparc/helper.h
10
+++ b/tcg/arm/tcg-target.c.inc
14
+++ b/target/sparc/helper.h
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
12
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
13
#define TCG_REG_TMP TCG_REG_R12
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
14
#define TCG_VEC_TMP TCG_REG_Q15
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
15
-#ifndef CONFIG_SOFTMMU
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
16
#define TCG_REG_GUEST_BASE TCG_REG_R11
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
17
-#endif
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
18
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
19
typedef enum {
23
20
COND_EQ = 0x0,
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
21
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
22
* r0-r3 will be overwritten when reading the tlb entry (system-mode only);
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
23
* r14 will be overwritten by the BLNE branching to the slow path.
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
24
*/
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
25
-#ifdef CONFIG_SOFTMMU
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
26
#define ALL_QLDST_REGS \
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
27
- (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
28
- (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
32
29
- (1 << TCG_REG_R14)))
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
30
-#else
34
index XXXXXXX..XXXXXXX 100644
31
-#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14))
35
--- a/target/sparc/fop_helper.c
32
-#endif
36
+++ b/target/sparc/fop_helper.c
33
+ (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
34
38
}
35
/*
39
36
* ARM immediates for ALU instructions are made of an unsigned 8-bit
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
37
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
41
- float32 s2, float32 s3, uint32_t op)
38
MemOp opc = get_memop(oi);
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
39
unsigned a_mask;
43
{
40
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
41
-#ifdef CONFIG_SOFTMMU
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
42
- *h = (HostAddress){
46
check_ieee_exceptions(env, GETPC());
43
- .cond = COND_AL,
47
return ret;
44
- .base = addrlo,
48
}
45
- .index = TCG_REG_R1,
49
46
- .index_scratch = true,
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
47
- };
51
- float64 s2, float64 s3, uint32_t op)
48
-#else
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
49
- *h = (HostAddress){
53
{
50
- .cond = COND_AL,
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
51
- .base = addrlo,
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
52
- .index = guest_base ? TCG_REG_GUEST_BASE : -1,
56
check_ieee_exceptions(env, GETPC());
53
- .index_scratch = false,
57
return ret;
54
- };
58
}
55
-#endif
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
56
+ if (tcg_use_softmmu) {
60
index XXXXXXX..XXXXXXX 100644
57
+ *h = (HostAddress){
61
--- a/target/sparc/translate.c
58
+ .cond = COND_AL,
62
+++ b/target/sparc/translate.c
59
+ .base = addrlo,
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
60
+ .index = TCG_REG_R1,
64
61
+ .index_scratch = true,
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
62
+ };
66
{
63
+ } else {
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
64
+ *h = (HostAddress){
68
+ TCGv_i32 z = tcg_constant_i32(0);
65
+ .cond = COND_AL,
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
66
+ .base = addrlo,
70
}
67
+ .index = guest_base ? TCG_REG_GUEST_BASE : -1,
71
68
+ .index_scratch = false,
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
69
+ };
73
{
70
+ }
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
71
75
+ TCGv_i32 z = tcg_constant_i32(0);
72
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
73
a_mask = (1 << h->aa.align) - 1;
77
}
74
78
75
-#ifdef CONFIG_SOFTMMU
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
76
- int mem_index = get_mmuidx(oi);
80
{
77
- int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
81
- int op = float_muladd_negate_c;
78
- : offsetof(CPUTLBEntry, addr_write);
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
79
- int fast_off = tlb_mask_table_ofs(s, mem_index);
83
+ TCGv_i32 z = tcg_constant_i32(0);
80
- unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
81
- TCGReg t_addr;
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
82
+ if (tcg_use_softmmu) {
86
}
83
+ int mem_index = get_mmuidx(oi);
87
84
+ int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
85
+ : offsetof(CPUTLBEntry, addr_write);
89
{
86
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
90
- int op = float_muladd_negate_c;
87
+ unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
88
+ TCGReg t_addr;
92
+ TCGv_i32 z = tcg_constant_i32(0);
89
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
90
- ldst = new_ldst_label(s);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
91
- ldst->is_ld = is_ld;
95
}
92
- ldst->oi = oi;
96
93
- ldst->addrlo_reg = addrlo;
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
94
- ldst->addrhi_reg = addrhi;
98
{
95
+ ldst = new_ldst_label(s);
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
96
+ ldst->is_ld = is_ld;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
97
+ ldst->oi = oi;
101
+ TCGv_i32 z = tcg_constant_i32(0);
98
+ ldst->addrlo_reg = addrlo;
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
99
+ ldst->addrhi_reg = addrhi;
103
+ float_muladd_negate_result);
100
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
101
- /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
105
}
102
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
106
103
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
104
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
108
{
105
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
106
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
107
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
111
+ TCGv_i32 z = tcg_constant_i32(0);
108
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
109
113
+ float_muladd_negate_result);
110
- /* Extract the tlb index from the address into R0. */
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
111
- tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
115
}
112
- SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
116
113
+ /* Extract the tlb index from the address into R0. */
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
114
+ tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
118
{
115
+ SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
119
- int op = float_muladd_negate_result;
116
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
117
- /*
121
+ TCGv_i32 z = tcg_constant_i32(0);
118
- * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
119
- * Load the tlb comparator into R2/R3 and the fast path addend into R1.
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
120
- */
124
}
121
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
125
122
- if (cmp_off == 0) {
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
123
- if (s->addr_type == TCG_TYPE_I32) {
127
{
124
- tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
128
- int op = float_muladd_negate_result;
125
+ /*
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
126
+ * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
130
+ TCGv_i32 z = tcg_constant_i32(0);
127
+ * Load the tlb comparator into R2/R3 and the fast path addend into R1.
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
128
+ */
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
129
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
133
}
130
+ if (cmp_off == 0) {
134
131
+ if (s->addr_type == TCG_TYPE_I32) {
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
132
+ tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
133
+ TCG_REG_R1, TCG_REG_R0);
137
{
134
+ } else {
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
135
+ tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
139
- int op = float_muladd_halve_result;
136
+ TCG_REG_R1, TCG_REG_R0);
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
137
+ }
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
138
} else {
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
139
- tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
143
+ TCGv_i32 op = tcg_constant_i32(0);
140
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
141
+ TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
145
}
142
+ if (s->addr_type == TCG_TYPE_I32) {
146
143
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
144
+ } else {
148
{
145
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
146
+ }
150
- int op = float_muladd_halve_result;
147
}
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
148
- } else {
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
149
- tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
150
- TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
154
+ TCGv_i32 op = tcg_constant_i32(0);
151
- if (s->addr_type == TCG_TYPE_I32) {
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
152
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
156
}
153
+
157
154
+ /* Load the tlb addend. */
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
155
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
156
+ offsetof(CPUTLBEntry, addend));
160
{
157
+
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
158
+ /*
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
159
+ * Check alignment, check comparators.
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
160
+ * Do this in 2-4 insns. Use MOVW for v7, if possible,
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
161
+ * to reduce the number of sequential conditional instructions.
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
162
+ * Almost all guests have at least 4k pages, which means that we need
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
163
+ * to clear at least 9 bits even for an 8-byte memory, which means it
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
164
+ * isn't worth checking for an immediate operand for BIC.
168
}
165
+ *
169
166
+ * For unaligned accesses, test the page of the last unit of alignment.
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
167
+ * This leaves the least significant alignment bits unchanged, and of
171
{
168
+ * course must be zero.
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
169
+ */
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
170
+ t_addr = addrlo;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
171
+ if (a_mask < s_mask) {
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
172
+ t_addr = TCG_REG_R0;
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
173
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
174
+ addrlo, s_mask - a_mask);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
175
+ }
179
}
176
+ if (use_armv7_instructions && s->page_bits <= 16) {
180
177
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
178
+ tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
179
+ t_addr, TCG_REG_TMP, 0);
183
{
180
+ tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
181
+ TCG_REG_R2, TCG_REG_TMP, 0);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
182
} else {
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
183
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
184
+ if (a_mask) {
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
185
+ tcg_debug_assert(a_mask <= 0xff);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
186
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
187
+ }
191
}
188
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
192
189
+ SHIFT_IMM_LSR(s->page_bits));
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
190
+ tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
194
{
191
+ 0, TCG_REG_R2, TCG_REG_TMP,
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
192
+ SHIFT_IMM_LSL(s->page_bits));
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
193
}
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
194
- }
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
195
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
196
- /* Load the tlb addend. */
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
197
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
198
- offsetof(CPUTLBEntry, addend));
202
}
199
-
203
200
- /*
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
201
- * Check alignment, check comparators.
202
- * Do this in 2-4 insns. Use MOVW for v7, if possible,
203
- * to reduce the number of sequential conditional instructions.
204
- * Almost all guests have at least 4k pages, which means that we need
205
- * to clear at least 9 bits even for an 8-byte memory, which means it
206
- * isn't worth checking for an immediate operand for BIC.
207
- *
208
- * For unaligned accesses, test the page of the last unit of alignment.
209
- * This leaves the least significant alignment bits unchanged, and of
210
- * course must be zero.
211
- */
212
- t_addr = addrlo;
213
- if (a_mask < s_mask) {
214
- t_addr = TCG_REG_R0;
215
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
216
- addrlo, s_mask - a_mask);
217
- }
218
- if (use_armv7_instructions && s->page_bits <= 16) {
219
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
220
- tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
221
- t_addr, TCG_REG_TMP, 0);
222
- tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
223
- } else {
224
- if (a_mask) {
225
- tcg_debug_assert(a_mask <= 0xff);
226
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
227
+ if (s->addr_type != TCG_TYPE_I32) {
228
+ tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
229
}
230
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
231
- SHIFT_IMM_LSR(s->page_bits));
232
- tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
233
- 0, TCG_REG_R2, TCG_REG_TMP,
234
- SHIFT_IMM_LSL(s->page_bits));
235
- }
236
-
237
- if (s->addr_type != TCG_TYPE_I32) {
238
- tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
239
- }
240
-#else
241
- if (a_mask) {
242
+ } else if (a_mask) {
243
ldst = new_ldst_label(s);
244
ldst->is_ld = is_ld;
245
ldst->oi = oi;
246
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
247
/* tst addr, #mask */
248
tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
249
}
250
-#endif
251
252
return ldst;
253
}
254
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
255
256
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
257
258
-#ifndef CONFIG_SOFTMMU
259
- if (guest_base) {
260
+ if (!tcg_use_softmmu && guest_base) {
261
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
262
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
263
}
264
-#endif
265
266
tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
267
268
--
205
--
269
2.34.1
206
2.43.0
270
207
271
208
diff view generated by jsdifflib
1
All uses have been convered to float*_muladd_scalbn.
2
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/aarch64/tcg-target.c.inc | 177 +++++++++++++++++------------------
6
include/fpu/softfloat.h | 3 ---
5
1 file changed, 88 insertions(+), 89 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
6
10
7
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/aarch64/tcg-target.c.inc
13
--- a/include/fpu/softfloat.h
10
+++ b/tcg/aarch64/tcg-target.c.inc
14
+++ b/include/fpu/softfloat.h
11
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
12
#define TCG_REG_TMP2 TCG_REG_X30
16
| Using these differs from negating an input or output before calling
13
#define TCG_VEC_TMP0 TCG_REG_V31
17
| the muladd function in that this means that a NaN doesn't have its
14
18
| sign bit inverted before it is propagated.
15
-#ifndef CONFIG_SOFTMMU
19
-| We also support halving the result before rounding, as a special
16
#define TCG_REG_GUEST_BASE TCG_REG_X28
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
17
-#endif
21
*----------------------------------------------------------------------------*/
18
22
enum {
19
static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
23
float_muladd_negate_c = 1,
20
{
24
float_muladd_negate_product = 2,
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
25
float_muladd_negate_result = 4,
22
s_bits == MO_128);
26
- float_muladd_halve_result = 8,
23
a_mask = (1 << h->aa.align) - 1;
27
};
24
28
25
-#ifdef CONFIG_SOFTMMU
29
/*----------------------------------------------------------------------------
26
- unsigned s_mask = (1u << s_bits) - 1;
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
27
- unsigned mem_index = get_mmuidx(oi);
31
index XXXXXXX..XXXXXXX 100644
28
- TCGReg addr_adj;
32
--- a/fpu/softfloat.c
29
- TCGType mask_type;
33
+++ b/fpu/softfloat.c
30
- uint64_t compare_mask;
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
31
+ if (tcg_use_softmmu) {
35
if (unlikely(!can_use_fpu(s))) {
32
+ unsigned s_mask = (1u << s_bits) - 1;
36
goto soft;
33
+ unsigned mem_index = get_mmuidx(oi);
37
}
34
+ TCGReg addr_adj;
38
- if (unlikely(flags & float_muladd_halve_result)) {
35
+ TCGType mask_type;
39
- goto soft;
36
+ uint64_t compare_mask;
37
38
- ldst = new_ldst_label(s);
39
- ldst->is_ld = is_ld;
40
- ldst->oi = oi;
41
- ldst->addrlo_reg = addr_reg;
42
-
43
- mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
44
- ? TCG_TYPE_I64 : TCG_TYPE_I32);
45
-
46
- /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
47
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
48
- QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
49
- tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
50
- tlb_mask_table_ofs(s, mem_index), 1, 0);
51
-
52
- /* Extract the TLB index from the address into X0. */
53
- tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
54
- TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
55
- s->page_bits - CPU_TLB_ENTRY_BITS);
56
-
57
- /* Add the tlb_table pointer, forming the CPUTLBEntry address in TMP1. */
58
- tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
59
-
60
- /* Load the tlb comparator into TMP0, and the fast path addend into TMP1. */
61
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
62
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
63
- is_ld ? offsetof(CPUTLBEntry, addr_read)
64
- : offsetof(CPUTLBEntry, addr_write));
65
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
66
- offsetof(CPUTLBEntry, addend));
67
-
68
- /*
69
- * For aligned accesses, we check the first byte and include the alignment
70
- * bits within the address. For unaligned access, we check that we don't
71
- * cross pages using the address of the last byte of the access.
72
- */
73
- if (a_mask >= s_mask) {
74
- addr_adj = addr_reg;
75
- } else {
76
- addr_adj = TCG_REG_TMP2;
77
- tcg_out_insn(s, 3401, ADDI, addr_type,
78
- addr_adj, addr_reg, s_mask - a_mask);
79
- }
40
- }
80
- compare_mask = (uint64_t)s->page_mask | a_mask;
41
81
-
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
82
- /* Store the page mask part of the address into TMP2. */
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
83
- tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
84
- addr_adj, compare_mask);
45
if (unlikely(!can_use_fpu(s))) {
85
-
46
goto soft;
86
- /* Perform the address comparison. */
47
}
87
- tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
48
- if (unlikely(flags & float_muladd_halve_result)) {
88
-
49
- goto soft;
89
- /* If not equal, we jump to the slow path. */
90
- ldst->label_ptr[0] = s->code_ptr;
91
- tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
92
-
93
- h->base = TCG_REG_TMP1;
94
- h->index = addr_reg;
95
- h->index_ext = addr_type;
96
-#else
97
- if (a_mask) {
98
ldst = new_ldst_label(s);
99
-
100
ldst->is_ld = is_ld;
101
ldst->oi = oi;
102
ldst->addrlo_reg = addr_reg;
103
104
- /* tst addr, #mask */
105
- tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
106
+ mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32
107
+ ? TCG_TYPE_I64 : TCG_TYPE_I32);
108
109
- /* b.ne slow_path */
110
+ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
111
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
112
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
113
+ tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
114
+ tlb_mask_table_ofs(s, mem_index), 1, 0);
115
+
116
+ /* Extract the TLB index from the address into X0. */
117
+ tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
118
+ TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
119
+ s->page_bits - CPU_TLB_ENTRY_BITS);
120
+
121
+ /* Add the tlb_table pointer, forming the CPUTLBEntry address. */
122
+ tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
123
+
124
+ /* Load the tlb comparator into TMP0, and the fast path addend. */
125
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
126
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
127
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
128
+ : offsetof(CPUTLBEntry, addr_write));
129
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
130
+ offsetof(CPUTLBEntry, addend));
131
+
132
+ /*
133
+ * For aligned accesses, we check the first byte and include
134
+ * the alignment bits within the address. For unaligned access,
135
+ * we check that we don't cross pages using the address of the
136
+ * last byte of the access.
137
+ */
138
+ if (a_mask >= s_mask) {
139
+ addr_adj = addr_reg;
140
+ } else {
141
+ addr_adj = TCG_REG_TMP2;
142
+ tcg_out_insn(s, 3401, ADDI, addr_type,
143
+ addr_adj, addr_reg, s_mask - a_mask);
144
+ }
145
+ compare_mask = (uint64_t)s->page_mask | a_mask;
146
+
147
+ /* Store the page mask part of the address into TMP2. */
148
+ tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
149
+ addr_adj, compare_mask);
150
+
151
+ /* Perform the address comparison. */
152
+ tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
153
+
154
+ /* If not equal, we jump to the slow path. */
155
ldst->label_ptr[0] = s->code_ptr;
156
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
157
- }
50
- }
158
51
159
- if (guest_base || addr_type == TCG_TYPE_I32) {
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
160
- h->base = TCG_REG_GUEST_BASE;
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
161
+ h->base = TCG_REG_TMP1;
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
162
h->index = addr_reg;
55
index XXXXXXX..XXXXXXX 100644
163
h->index_ext = addr_type;
56
--- a/fpu/softfloat-parts.c.inc
164
} else {
57
+++ b/fpu/softfloat-parts.c.inc
165
- h->base = addr_reg;
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
166
- h->index = TCG_REG_XZR;
59
a->exp = p_widen.exp;
167
- h->index_ext = TCG_TYPE_I64;
60
168
+ if (a_mask) {
61
return_normal:
169
+ ldst = new_ldst_label(s);
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
170
+
63
- if (flags & float_muladd_halve_result) {
171
+ ldst->is_ld = is_ld;
64
- a->exp -= 1;
172
+ ldst->oi = oi;
65
- }
173
+ ldst->addrlo_reg = addr_reg;
66
a->exp += scale;
174
+
67
finish_sign:
175
+ /* tst addr, #mask */
68
if (flags & float_muladd_negate_result) {
176
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
177
+
178
+ /* b.ne slow_path */
179
+ ldst->label_ptr[0] = s->code_ptr;
180
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
181
+ }
182
+
183
+ if (guest_base || addr_type == TCG_TYPE_I32) {
184
+ h->base = TCG_REG_GUEST_BASE;
185
+ h->index = addr_reg;
186
+ h->index_ext = addr_type;
187
+ } else {
188
+ h->base = addr_reg;
189
+ h->index = TCG_REG_XZR;
190
+ h->index_ext = TCG_TYPE_I64;
191
+ }
192
}
193
-#endif
194
195
return ldst;
196
}
197
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
198
tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
199
CPU_TEMP_BUF_NLONGS * sizeof(long));
200
201
-#if !defined(CONFIG_SOFTMMU)
202
- /*
203
- * Note that XZR cannot be encoded in the address base register slot,
204
- * as that actually encodes SP. Depending on the guest, we may need
205
- * to zero-extend the guest address via the address index register slot,
206
- * therefore we need to load even a zero guest base into a register.
207
- */
208
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
209
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
210
-#endif
211
+ if (!tcg_use_softmmu) {
212
+ /*
213
+ * Note that XZR cannot be encoded in the address base register slot,
214
+ * as that actually encodes SP. Depending on the guest, we may need
215
+ * to zero-extend the guest address via the address index register slot,
216
+ * therefore we need to load even a zero guest base into a register.
217
+ */
218
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
219
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
220
+ }
221
222
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
223
tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
224
--
69
--
225
2.34.1
70
2.43.0
226
71
227
72
diff view generated by jsdifflib
1
The prefixed instructions have a pc-relative form to use here.
1
This rounding mode is used by Hexagon.
2
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
4
---
5
tcg/ppc/tcg-target.c.inc | 9 +++++++++
5
include/fpu/softfloat-types.h | 2 ++
6
1 file changed, 9 insertions(+)
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
7
8
8
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.c.inc
11
--- a/include/fpu/softfloat-types.h
11
+++ b/tcg/ppc/tcg-target.c.inc
12
+++ b/include/fpu/softfloat-types.h
12
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
13
if (USE_REG_TB) {
14
float_round_to_odd = 5,
14
rel = R_PPC_ADDR16;
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
15
add = ppc_tbrel_diff(s, NULL);
16
float_round_to_odd_inf = 6,
16
+ } else if (have_isa_3_10) {
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
17
+ if (type == TCG_TYPE_V64) {
18
+ float_round_nearest_even_max = 7,
18
+ tcg_out_8ls_d(s, PLXSD, ret & 31, 0, 0, 1);
19
} FloatRoundMode;
19
+ new_pool_label(s, val, R_PPC64_PCREL34, s->code_ptr - 2, 0);
20
20
+ } else {
21
/*
21
+ tcg_out_8ls_d(s, PLXV, ret & 31, 0, 0, 1);
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
22
+ new_pool_l2(s, R_PPC64_PCREL34, s->code_ptr - 2, 0, val, val);
23
index XXXXXXX..XXXXXXX 100644
23
+ }
24
--- a/fpu/softfloat-parts.c.inc
24
+ return;
25
+++ b/fpu/softfloat-parts.c.inc
25
} else if (have_isa_3_00) {
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
26
tcg_out_addpcis(s, TCG_REG_TMP1, 0);
27
int exp, flags = 0;
27
rel = R_PPC_REL14;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
28
--
36
--
29
2.34.1
37
2.43.0
diff view generated by jsdifflib
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
3
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/ppc/tcg-target.c.inc | 12 ++++++++++++
6
include/fpu/softfloat.h | 5 +++++
4
1 file changed, 12 insertions(+)
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
5
10
6
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
7
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/ppc/tcg-target.c.inc
13
--- a/include/fpu/softfloat.h
9
+++ b/tcg/ppc/tcg-target.c.inc
14
+++ b/include/fpu/softfloat.h
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
11
tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
16
| Using these differs from negating an input or output before calling
12
return;
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
13
}
39
}
14
+ if (have_isa_3_00) {
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
15
+ tcg_out_addpcis(s, TCG_REG_TMP2, 0);
41
+ goto soft;
16
+ new_pool_label(s, arg, R_PPC_REL14, s->code_ptr, 0);
17
+ tcg_out32(s, LD | TAI(ret, TCG_REG_TMP2, 0));
18
+ return;
19
+ }
42
+ }
20
43
21
tmp = arg >> 31 >> 1;
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
22
tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
23
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
24
if (USE_REG_TB) {
47
index XXXXXXX..XXXXXXX 100644
25
rel = R_PPC_ADDR16;
48
--- a/fpu/softfloat-parts.c.inc
26
add = ppc_tbrel_diff(s, NULL);
49
+++ b/fpu/softfloat-parts.c.inc
27
+ } else if (have_isa_3_00) {
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
28
+ tcg_out_addpcis(s, TCG_REG_TMP1, 0);
51
goto return_normal;
29
+ rel = R_PPC_REL14;
52
}
30
+ add = 0;
53
if (c->cls == float_class_zero) {
31
} else {
54
- if (a->sign != c->sign) {
32
rel = R_PPC_ADDR32;
55
+ if (flags & float_muladd_suppress_add_product_zero) {
33
add = 0;
56
+ a->sign = c->sign;
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
57
+ } else if (a->sign != c->sign) {
35
if (USE_REG_TB) {
58
goto return_sub_zero;
36
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
59
}
37
load_insn |= RA(TCG_REG_TB);
60
goto return_zero;
38
+ } else if (have_isa_3_00) {
39
+ tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
40
} else {
41
tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
42
tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
43
--
61
--
44
2.34.1
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
There are multiple special cases for this instruction.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
8
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
11
---
3
tcg/ppc/tcg-target.c.inc | 13 +++++++++++++
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
4
1 file changed, 13 insertions(+)
13
1 file changed, 26 insertions(+), 79 deletions(-)
5
14
6
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
7
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/ppc/tcg-target.c.inc
17
--- a/target/hexagon/op_helper.c
9
+++ b/tcg/ppc/tcg-target.c.inc
18
+++ b/target/hexagon/op_helper.c
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
11
return;
20
return RxV;
12
}
21
}
13
22
14
+ /* Load addresses within 2GB with 2 insns. */
23
-static bool is_zero_prod(float32 a, float32 b)
15
+ if (have_isa_3_00) {
24
-{
16
+ intptr_t hi = tcg_pcrel_diff(s, (void *)arg) - 4;
25
- return ((float32_is_zero(a) && is_finite(b)) ||
17
+ int16_t lo = hi;
26
- (float32_is_zero(b) && is_finite(a)));
27
-}
28
-
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
30
-{
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
43
{
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
45
return RxV;
46
}
47
48
-static bool is_inf_prod(int32_t a, int32_t b)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
50
+ float32 RsV, float32 RtV, int negate)
51
{
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
18
+
56
+
19
+ hi -= lo;
57
+ arch_fpop_start(env);
20
+ if (hi == (int32_t)hi) {
58
+
21
+ tcg_out_addpcis(s, TCG_REG_TMP2, hi);
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
22
+ tcg_out32(s, ADDI | TAI(ret, TCG_REG_TMP2, lo));
60
+ RxV = float32_muladd(RsV, RtV, RxV,
23
+ return;
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
24
+ }
72
+ }
25
+ }
73
+ }
26
+
74
+
27
/* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
75
+ arch_fpop_end(env);
28
if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
76
+ return RxV;
29
tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
77
}
78
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
80
float32 RsV, float32 RtV)
81
{
82
- bool infinp;
83
- bool infminusinf;
84
- float32 tmp;
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
111
}
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
115
{
116
- bool infinp;
117
- bool infminusinf;
118
- float32 tmp;
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
30
--
149
--
31
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
The function is now unused.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/tcg-op.c | 16 ++++++++--------
6
target/hexagon/fma_emu.h | 2 -
4
1 file changed, 8 insertions(+), 8 deletions(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
5
9
6
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
7
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tcg-op.c
12
--- a/target/hexagon/fma_emu.h
9
+++ b/tcg/tcg-op.c
13
+++ b/target/hexagon/fma_emu.h
10
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
11
tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2);
15
}
12
} else if (TCG_TARGET_HAS_div2_i32) {
16
int32_t float32_getexp(float32 f32);
13
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
17
float32 infinite_float32(uint8_t sign);
14
- tcg_gen_movi_i32(t0, 0);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
15
- tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2);
19
- int scale, float_status *fp_status);
16
+ TCGv_i32 zero = tcg_constant_i32(0);
20
float64 internal_mpyhh(float64 a, float64 b,
17
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, zero, arg2);
21
unsigned long long int accumulated,
18
tcg_temp_free_i32(t0);
22
float_status *fp_status);
19
} else {
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
20
gen_helper_divu_i32(ret, arg1, arg2);
24
index XXXXXXX..XXXXXXX 100644
21
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
25
--- a/target/hexagon/fma_emu.c
22
tcg_temp_free_i32(t0);
26
+++ b/target/hexagon/fma_emu.c
23
} else if (TCG_TARGET_HAS_div2_i32) {
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
24
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
28
return -1;
25
- tcg_gen_movi_i32(t0, 0);
29
}
26
- tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2);
30
27
+ TCGv_i32 zero = tcg_constant_i32(0);
31
-static uint64_t float32_getmant(float32 f32)
28
+ tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, zero, arg2);
32
-{
29
tcg_temp_free_i32(t0);
33
- Float a = { .i = f32 };
30
} else {
34
- if (float32_is_normal(f32)) {
31
gen_helper_remu_i32(ret, arg1, arg2);
35
- return a.mant | 1ULL << 23;
32
@@ -XXX,XX +XXX,XX @@ void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
36
- }
33
tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2);
37
- if (float32_is_zero(f32)) {
34
} else if (TCG_TARGET_HAS_div2_i64) {
38
- return 0;
35
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
39
- }
36
- tcg_gen_movi_i64(t0, 0);
40
- if (float32_is_denormal(f32)) {
37
- tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2);
41
- return a.mant;
38
+ TCGv_i64 zero = tcg_constant_i64(0);
42
- }
39
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, zero, arg2);
43
- return ~0ULL;
40
tcg_temp_free_i64(t0);
44
-}
41
} else {
45
-
42
gen_helper_divu_i64(ret, arg1, arg2);
46
int32_t float32_getexp(float32 f32)
43
@@ -XXX,XX +XXX,XX @@ void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
47
{
44
tcg_temp_free_i64(t0);
48
Float a = { .i = f32 };
45
} else if (TCG_TARGET_HAS_div2_i64) {
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
46
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
50
}
47
- tcg_gen_movi_i64(t0, 0);
51
48
- tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2);
52
/* Return a maximum finite value with the requested sign */
49
+ TCGv_i64 zero = tcg_constant_i64(0);
53
-static float32 maxfinite_float32(uint8_t sign)
50
+ tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, zero, arg2);
54
-{
51
tcg_temp_free_i64(t0);
55
- if (sign) {
52
} else {
56
- return make_float32(SF_MINUS_MAXF);
53
gen_helper_remu_i64(ret, arg1, arg2);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
54
--
219
--
55
2.34.1
220
2.43.0
diff view generated by jsdifflib
1
From: Mike Frysinger <vapier@gentoo.org>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
2
3
3
Use of the API was removed a while back, but the define wasn't.
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Message-Id: <20231015010046.16020-1-vapier@gentoo.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
include/tcg/tcg-op.h | 2 --
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
11
1 file changed, 2 deletions(-)
8
1 file changed, 127 insertions(+), 128 deletions(-)
12
9
13
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-op.h
12
--- a/target/hexagon/fma_emu.c
16
+++ b/include/tcg/tcg-op.h
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
18
typedef TCGv_i32 TCGv;
15
}
19
#define tcg_temp_new() tcg_temp_new_i32()
16
20
#define tcg_global_mem_new tcg_global_mem_new_i32
17
/* Return a maximum finite value with the requested sign */
21
-#define tcg_temp_free tcg_temp_free_i32
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
22
#define tcgv_tl_temp tcgv_i32_temp
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
23
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
20
-{ \
24
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
25
@@ -XXX,XX +XXX,XX @@ typedef TCGv_i32 TCGv;
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
26
typedef TCGv_i64 TCGv;
23
- /* result zero */ \
27
#define tcg_temp_new() tcg_temp_new_i64()
24
- switch (fp_status->float_rounding_mode) { \
28
#define tcg_global_mem_new tcg_global_mem_new_i64
25
- case float_round_down: \
29
-#define tcg_temp_free tcg_temp_free_i64
26
- return zero_##SUFFIX(1); \
30
#define tcgv_tl_temp tcgv_i64_temp
27
- default: \
31
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
28
- return zero_##SUFFIX(0); \
32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
33
--
278
--
34
2.34.1
279
2.43.0
35
36
diff view generated by jsdifflib
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
6
---
3
tcg/ppc/tcg-target.c.inc | 3 +++
7
target/hexagon/fma_emu.c | 16 +++-------------
4
1 file changed, 3 insertions(+)
8
1 file changed, 3 insertions(+), 13 deletions(-)
5
9
6
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
7
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
9
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
14
@@ -XXX,XX +XXX,XX @@ typedef union {
11
if (USE_REG_TB) {
15
};
12
ptrdiff_t offset = ppc_tbrel_diff(s, (void *)ptr);
16
} Double;
13
tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
17
14
+ } else if (have_isa_3_10) {
18
-typedef union {
15
+ ptrdiff_t offset = tcg_pcrel_diff_for_prefix(s, (void *)ptr);
19
- float f;
16
+ tcg_out_8ls_d(s, PLD, TCG_REG_TMP1, 0, offset, 1);
20
- uint32_t i;
17
} else if (have_isa_3_00) {
21
- struct {
18
ptrdiff_t offset = tcg_pcrel_diff(s, (void *)ptr) - 4;
22
- uint32_t mant:23;
19
lo = offset;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
20
--
47
--
21
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
From: Jordan Niethe <jniethe5@gmail.com>
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
2
3
3
Direct branch patching was disabled when using TCG_REG_TB in commit
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
736a1588c1 ("tcg/ppc: Fix race in goto_tb implementation").
5
6
The issue with direct branch patching with TCG_REG_TB is the lack of
7
synchronization between the new TCG_REG_TB being established and the
8
direct branch being patched in.
9
10
If each translation block is responsible for establishing its own
11
TCG_REG_TB then there can be no synchronization issue.
12
13
Make each translation block begin by setting up its own TCG_REG_TB.
14
Use the preferred 'bcl 20,31,$+4' sequence.
15
16
Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
17
[rth: Split out tcg_out_tb_start, power9 addpcis]
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
---
6
---
20
tcg/ppc/tcg-target.c.inc | 48 ++++++++++++++--------------------------
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
21
1 file changed, 17 insertions(+), 31 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
22
9
23
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
26
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
14
@@ -XXX,XX +XXX,XX @@
28
15
29
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
16
#define WAY_BIG_EXP 4096
30
tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
17
31
- if (USE_REG_TB) {
18
-typedef union {
32
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
19
- double f;
33
- }
20
- uint64_t i;
34
tcg_out32(s, BCCTR | BO_ALWAYS);
21
- struct {
35
22
- uint64_t mant:52;
36
/* Epilogue */
23
- uint64_t exp:11;
37
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
24
- uint64_t sign:1;
38
25
- };
39
static void tcg_out_tb_start(TCGContext *s)
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
40
{
29
{
41
- /* nothing to do */
30
- Double a = { .i = f64 };
42
+ /* Load TCG_REG_TB. */
31
+ uint64_t mant = extract64(f64, 0, 52);
43
+ if (USE_REG_TB) {
32
if (float64_is_normal(f64)) {
44
+ /* bcl 20,31,$+4 (preferred form for getting nia) */
33
- return a.mant | 1ULL << 52;
45
+ tcg_out32(s, BC | BO_ALWAYS | BI(7, CR_SO) | 0x4 | LK);
34
+ return mant | 1ULL << 52;
46
+ tcg_out32(s, MFSPR | RT(TCG_REG_TB) | LR);
35
}
47
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, -4));
36
if (float64_is_zero(f64)) {
48
+ }
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
49
}
44
}
50
45
51
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
46
int32_t float64_getexp(float64 f64)
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
53
{
47
{
54
uintptr_t ptr = get_jmp_target_addr(s, which);
48
- Double a = { .i = f64 };
55
49
+ int exp = extract64(f64, 52, 11);
56
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
50
if (float64_is_normal(f64)) {
57
+ set_jmp_insn_offset(s, which);
51
- return a.exp;
58
+ tcg_out32(s, NOP);
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
59
+
65
+
60
+ /* When branch is out of range, fall through to indirect. */
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
61
if (USE_REG_TB) {
67
&& ((a.guard | a.round | a.sticky) == 0)) {
62
ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
68
/* result zero */
63
- tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
64
-
70
}
65
- /* TODO: Use direct branches when possible. */
66
- set_jmp_insn_offset(s, which);
67
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
68
-
69
- tcg_out32(s, BCCTR | BO_ALWAYS);
70
-
71
- /* For the unlinked case, need to reset TCG_REG_TB. */
72
- set_jmp_reset_offset(s, which);
73
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
74
- -tcg_current_code_size(s));
75
+ tcg_out_mem_long(s, LD, LDX, TCG_REG_TMP1, TCG_REG_TB, offset);
76
} else {
77
- /* Direct branch will be patched by tb_target_set_jmp_target. */
78
- set_jmp_insn_offset(s, which);
79
- tcg_out32(s, NOP);
80
-
81
- /* When branch is out of range, fall through to indirect. */
82
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
83
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
84
- tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
85
- tcg_out32(s, BCCTR | BO_ALWAYS);
86
- set_jmp_reset_offset(s, which);
87
}
71
}
88
+
72
/* Underflow? */
89
+ tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
90
+ tcg_out32(s, BCCTR | BO_ALWAYS);
74
+ ret = int128_getlo(a.mant);
91
+ set_jmp_reset_offset(s, which);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
92
}
97
}
93
98
94
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
99
float64 internal_mpyhh(float64 a, float64 b,
95
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
96
intptr_t diff = addr - jmp_rx;
97
tcg_insn_unit insn;
98
99
- if (USE_REG_TB) {
100
- return;
101
- }
102
-
103
if (in_range_b(diff)) {
104
insn = B | (diff & 0x3fffffc);
105
} else {
106
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
107
switch (opc) {
108
case INDEX_op_goto_ptr:
109
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
110
- if (USE_REG_TB) {
111
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
112
- }
113
tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
114
tcg_out32(s, BCCTR | BO_ALWAYS);
115
break;
116
--
100
--
117
2.34.1
101
2.43.0
diff view generated by jsdifflib
1
No need to open-code 64x64->128-bit multiplication.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/riscv/tcg-target.c.inc | 185 +++++++++++++++++++------------------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
4
1 file changed, 94 insertions(+), 91 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
5
8
6
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
7
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/riscv/tcg-target.c.inc
11
--- a/target/hexagon/fma_emu.c
9
+++ b/tcg/riscv/tcg-target.c.inc
12
+++ b/target/hexagon/fma_emu.c
10
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
11
aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
14
return -1;
12
a_mask = (1u << aa.align) - 1;
15
}
13
16
14
-#ifdef CONFIG_SOFTMMU
17
-static uint32_t int128_getw0(Int128 x)
15
- unsigned s_bits = opc & MO_SIZE;
18
-{
16
- unsigned s_mask = (1u << s_bits) - 1;
19
- return int128_getlo(x);
17
- int mem_index = get_mmuidx(oi);
20
-}
18
- int fast_ofs = tlb_mask_table_ofs(s, mem_index);
19
- int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
20
- int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
21
- int compare_mask;
22
- TCGReg addr_adj;
23
+ if (tcg_use_softmmu) {
24
+ unsigned s_bits = opc & MO_SIZE;
25
+ unsigned s_mask = (1u << s_bits) - 1;
26
+ int mem_index = get_mmuidx(oi);
27
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
28
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
29
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
30
+ int compare_mask;
31
+ TCGReg addr_adj;
32
33
- ldst = new_ldst_label(s);
34
- ldst->is_ld = is_ld;
35
- ldst->oi = oi;
36
- ldst->addrlo_reg = addr_reg;
37
-
21
-
38
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
22
-static uint32_t int128_getw1(Int128 x)
39
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
23
-{
24
- return int128_getlo(x) >> 32;
25
-}
40
-
26
-
41
- tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
42
- s->page_bits - CPU_TLB_ENTRY_BITS);
28
{
43
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
29
- Int128 a, b;
44
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
45
-
39
-
46
- /*
40
- pp1s = pp1a + pp1b;
47
- * For aligned accesses, we check the first byte and include the alignment
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
48
- * bits within the address. For unaligned access, we check that we don't
42
- pp2 += (1ULL << 32);
49
- * cross pages using the address of the last byte of the access.
50
- */
51
- addr_adj = addr_reg;
52
- if (a_mask < s_mask) {
53
- addr_adj = TCG_REG_TMP0;
54
- tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
55
- addr_adj, addr_reg, s_mask - a_mask);
56
- }
43
- }
57
- compare_mask = s->page_mask | a_mask;
44
- uint64_t ret_low = pp0 + (pp1s << 32);
58
- if (compare_mask == sextreg(compare_mask, 0, 12)) {
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
59
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
46
- pp2 += 1;
60
- } else {
61
- tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
62
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
63
- }
47
- }
64
-
48
-
65
- /* Load the tlb comparator and the addend. */
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
66
- QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
50
+ mulu64(&l, &h, ai, bi);
67
- tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
51
+ return int128_make128(l, h);
68
- is_ld ? offsetof(CPUTLBEntry, addr_read)
69
- : offsetof(CPUTLBEntry, addr_write));
70
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
71
- offsetof(CPUTLBEntry, addend));
72
-
73
- /* Compare masked address with the TLB entry. */
74
- ldst->label_ptr[0] = s->code_ptr;
75
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
76
-
77
- /* TLB Hit - translate address using addend. */
78
- if (addr_type != TCG_TYPE_I32) {
79
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
80
- } else if (have_zba) {
81
- tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
82
- } else {
83
- tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
84
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2);
85
- }
86
- *pbase = TCG_REG_TMP0;
87
-#else
88
- TCGReg base;
89
-
90
- if (a_mask) {
91
ldst = new_ldst_label(s);
92
ldst->is_ld = is_ld;
93
ldst->oi = oi;
94
ldst->addrlo_reg = addr_reg;
95
96
- /* We are expecting alignment max 7, so we can always use andi. */
97
- tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
98
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
99
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
100
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
101
102
- ldst->label_ptr[0] = s->code_ptr;
103
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
104
- }
105
+ tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
106
+ s->page_bits - CPU_TLB_ENTRY_BITS);
107
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
108
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
109
110
- if (guest_base != 0) {
111
- base = TCG_REG_TMP0;
112
- if (addr_type != TCG_TYPE_I32) {
113
- tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
114
- } else if (have_zba) {
115
- tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
116
- } else {
117
- tcg_out_ext32u(s, base, addr_reg);
118
- tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
119
+ /*
120
+ * For aligned accesses, we check the first byte and include the
121
+ * alignment bits within the address. For unaligned access, we
122
+ * check that we don't cross pages using the address of the last
123
+ * byte of the access.
124
+ */
125
+ addr_adj = addr_reg;
126
+ if (a_mask < s_mask) {
127
+ addr_adj = TCG_REG_TMP0;
128
+ tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
129
+ addr_adj, addr_reg, s_mask - a_mask);
130
}
131
- } else if (addr_type != TCG_TYPE_I32) {
132
- base = addr_reg;
133
+ compare_mask = s->page_mask | a_mask;
134
+ if (compare_mask == sextreg(compare_mask, 0, 12)) {
135
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
136
+ } else {
137
+ tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
138
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
139
+ }
140
+
141
+ /* Load the tlb comparator and the addend. */
142
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
143
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
144
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
145
+ : offsetof(CPUTLBEntry, addr_write));
146
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
147
+ offsetof(CPUTLBEntry, addend));
148
+
149
+ /* Compare masked address with the TLB entry. */
150
+ ldst->label_ptr[0] = s->code_ptr;
151
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
152
+
153
+ /* TLB Hit - translate address using addend. */
154
+ if (addr_type != TCG_TYPE_I32) {
155
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
156
+ } else if (have_zba) {
157
+ tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
158
+ addr_reg, TCG_REG_TMP2);
159
+ } else {
160
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
161
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
162
+ TCG_REG_TMP0, TCG_REG_TMP2);
163
+ }
164
+ *pbase = TCG_REG_TMP0;
165
} else {
166
- base = TCG_REG_TMP0;
167
- tcg_out_ext32u(s, base, addr_reg);
168
+ TCGReg base;
169
+
170
+ if (a_mask) {
171
+ ldst = new_ldst_label(s);
172
+ ldst->is_ld = is_ld;
173
+ ldst->oi = oi;
174
+ ldst->addrlo_reg = addr_reg;
175
+
176
+ /* We are expecting alignment max 7, so we can always use andi. */
177
+ tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
178
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
179
+
180
+ ldst->label_ptr[0] = s->code_ptr;
181
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
182
+ }
183
+
184
+ if (guest_base != 0) {
185
+ base = TCG_REG_TMP0;
186
+ if (addr_type != TCG_TYPE_I32) {
187
+ tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
188
+ TCG_GUEST_BASE_REG);
189
+ } else if (have_zba) {
190
+ tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
191
+ TCG_GUEST_BASE_REG);
192
+ } else {
193
+ tcg_out_ext32u(s, base, addr_reg);
194
+ tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
195
+ }
196
+ } else if (addr_type != TCG_TYPE_I32) {
197
+ base = addr_reg;
198
+ } else {
199
+ base = TCG_REG_TMP0;
200
+ tcg_out_ext32u(s, base, addr_reg);
201
+ }
202
+ *pbase = base;
203
}
204
- *pbase = base;
205
-#endif
206
207
return ldst;
208
}
52
}
209
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
53
210
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
211
}
212
213
-#if !defined(CONFIG_SOFTMMU)
214
- if (guest_base) {
215
+ if (!tcg_use_softmmu && guest_base) {
216
tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
217
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
218
}
219
-#endif
220
221
/* Call generated code */
222
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
223
--
55
--
224
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
This appears to slightly improve performance on power9/10.
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
tcg/ppc/tcg-target.c.inc | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
6
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
7
9
8
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
13
#define TCG_VEC_TMP2 TCG_REG_V1
15
float64_is_infinity(b)) {
14
16
return float64_mul(a, b, fp_status);
15
#define TCG_REG_TB TCG_REG_R31
17
}
16
-#define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
18
- x.mant = int128_mul_6464(accumulated, 1);
17
+#define USE_REG_TB (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00)
19
+ x.mant = int128_make64(accumulated);
18
20
x.sticky = sticky;
19
/* Shorthand for size of a pointer. Avoid promotion to unsigned. */
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
20
#define SZP ((int)sizeof(void *))
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
21
--
23
--
22
2.34.1
24
2.43.0
diff view generated by jsdifflib
1
Provide a define to allow !tcg_use_softmmu code paths to
1
Convert all targets simultaneously, as the gen_intermediate_code
2
compile in system mode, but require elimination.
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
3
4
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/tcg.c | 4 ++++
8
include/exec/translator.h | 14 --------------
8
1 file changed, 4 insertions(+)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
9
71
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
74
--- a/include/exec/translator.h
13
+++ b/tcg/tcg.c
75
+++ b/include/exec/translator.h
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
76
@@ -XXX,XX +XXX,XX @@
15
static int tcg_out_ldst_finalize(TCGContext *s);
77
#include "qemu/bswap.h"
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
16
#endif
563
#endif
17
564
};
18
+#ifndef CONFIG_USER_ONLY
565
19
+#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
20
+#endif
567
- vaddr pc, void *host_pc)
21
+
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
22
typedef struct TCGLdstHelperParam {
569
+ int *max_insns, vaddr pc, void *host_pc)
23
TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
570
{
24
unsigned ntmp;
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
25
--
953
--
26
2.34.1
954
2.43.0
27
955
28
956
diff view generated by jsdifflib