1
The following changes since commit 36eae3a732a1f2aa81391e871ac0e9bb3233e7d7:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-migration-20220302b' into staging (2022-03-02 20:55:48 +0000)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220303
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to f23e6de25c31cadd9a3b7122f9384e6b259ce37f:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tcg/loongarch64: Support TCG_TARGET_SIGNED_ADDR32 (2022-03-03 10:47:20 -1000)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Reorder do_constant_folding_cond test to satisfy valgrind.
14
tcg/optimize: Remove in-flight mask data from OptContext
15
Fix value of MAX_OPC_PARAM_IARGS.
15
fpu: Add float*_muladd_scalbn
16
Add opcodes for vector nand, nor, eqv.
16
fpu: Remove float_muladd_halve_result
17
Support vector nand, nor, eqv on PPC and S390X hosts.
17
fpu: Add float_round_nearest_even_max
18
Support AVX512VL, AVX512BW, AVX512DQ, and AVX512VBMI2.
18
fpu: Add float_muladd_suppress_add_product_zero
19
Support 32-bit guest addresses as signed values.
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
20
21
21
----------------------------------------------------------------
22
----------------------------------------------------------------
22
Alex Bennée (1):
23
Ilya Leoshkevich (1):
23
tcg/optimize: only read val after const check
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
24
25
25
Richard Henderson (28):
26
Pierrick Bouvier (1):
26
tcg: Add opcodes for vector nand, nor, eqv
27
plugins: optimize cpu_index code generation
27
tcg/ppc: Implement vector NAND, NOR, EQV
28
tcg/s390x: Implement vector NAND, NOR, EQV
29
tcg/i386: Detect AVX512
30
tcg/i386: Add tcg_out_evex_opc
31
tcg/i386: Use tcg_can_emit_vec_op in expand_vec_cmp_noinv
32
tcg/i386: Implement avx512 variable shifts
33
tcg/i386: Implement avx512 scalar shift
34
tcg/i386: Implement avx512 immediate sari shift
35
tcg/i386: Implement avx512 immediate rotate
36
tcg/i386: Implement avx512 variable rotate
37
tcg/i386: Support avx512vbmi2 vector shift-double instructions
38
tcg/i386: Expand vector word rotate as avx512vbmi2 shift-double
39
tcg/i386: Remove rotls_vec from tcg_target_op_def
40
tcg/i386: Expand scalar rotate with avx512 insns
41
tcg/i386: Implement avx512 min/max/abs
42
tcg/i386: Implement avx512 multiply
43
tcg/i386: Implement more logical operations for avx512
44
tcg/i386: Implement bitsel for avx512
45
tcg: Add TCG_TARGET_SIGNED_ADDR32
46
accel/tcg: Split out g2h_tlbe
47
accel/tcg: Support TCG_TARGET_SIGNED_ADDR32 for softmmu
48
accel/tcg: Add guest_base_signed_addr32 for user-only
49
linux-user: Support TCG_TARGET_SIGNED_ADDR32
50
tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32
51
tcg/mips: Support TCG_TARGET_SIGNED_ADDR32
52
tcg/riscv: Support TCG_TARGET_SIGNED_ADDR32
53
tcg/loongarch64: Support TCG_TARGET_SIGNED_ADDR32
54
28
55
Ziqiao Kong (1):
29
Richard Henderson (70):
56
tcg: Set MAX_OPC_PARAM_IARGS to 7
30
tcg/optimize: Split out finish_bb, finish_ebb
31
tcg/optimize: Split out fold_affected_mask
32
tcg/optimize: Copy mask writeback to fold_masks
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
57
100
58
include/exec/cpu-all.h | 20 +-
101
include/exec/translator.h | 14 -
59
include/exec/cpu_ldst.h | 3 +-
102
include/fpu/softfloat-types.h | 2 +
60
include/qemu/cpuid.h | 20 +-
103
include/fpu/softfloat.h | 14 +-
61
include/tcg/tcg-opc.h | 3 +
104
include/hw/core/tcg-cpu-ops.h | 13 +
62
include/tcg/tcg.h | 5 +-
105
target/alpha/cpu.h | 2 +
63
tcg/aarch64/tcg-target-sa32.h | 7 +
106
target/arm/internals.h | 2 +
64
tcg/aarch64/tcg-target.h | 3 +
107
target/avr/cpu.h | 2 +
65
tcg/arm/tcg-target-sa32.h | 1 +
108
target/hexagon/cpu.h | 2 +
66
tcg/arm/tcg-target.h | 3 +
109
target/hexagon/fma_emu.h | 3 -
67
tcg/i386/tcg-target-con-set.h | 1 +
110
target/hppa/cpu.h | 2 +
68
tcg/i386/tcg-target-sa32.h | 1 +
111
target/i386/tcg/helper-tcg.h | 2 +
69
tcg/i386/tcg-target.h | 17 +-
112
target/loongarch/internals.h | 2 +
70
tcg/i386/tcg-target.opc.h | 3 +
113
target/m68k/cpu.h | 2 +
71
tcg/loongarch64/tcg-target-sa32.h | 1 +
114
target/microblaze/cpu.h | 2 +
72
tcg/mips/tcg-target-sa32.h | 9 +
115
target/mips/tcg/tcg-internal.h | 2 +
73
tcg/ppc/tcg-target-sa32.h | 1 +
116
target/openrisc/cpu.h | 2 +
74
tcg/ppc/tcg-target.h | 3 +
117
target/ppc/cpu.h | 2 +
75
tcg/riscv/tcg-target-sa32.h | 5 +
118
target/riscv/cpu.h | 3 +
76
tcg/s390x/tcg-target-sa32.h | 1 +
119
target/rx/cpu.h | 2 +
77
tcg/s390x/tcg-target.h | 3 +
120
target/s390x/s390x-internal.h | 2 +
78
tcg/sparc/tcg-target-sa32.h | 1 +
121
target/sh4/cpu.h | 2 +
79
tcg/tci/tcg-target-sa32.h | 1 +
122
target/sparc/cpu.h | 2 +
80
accel/tcg/cputlb.c | 36 ++--
123
target/sparc/helper.h | 4 +-
81
bsd-user/main.c | 4 +
124
target/tricore/cpu.h | 2 +
82
linux-user/elfload.c | 62 ++++--
125
target/xtensa/cpu.h | 2 +
83
linux-user/main.c | 3 +
126
accel/tcg/cpu-exec.c | 8 +-
84
tcg/optimize.c | 20 +-
127
accel/tcg/plugin-gen.c | 9 +
85
tcg/tcg-op-vec.c | 27 ++-
128
accel/tcg/translate-all.c | 8 +-
86
tcg/tcg.c | 10 +
129
fpu/softfloat.c | 63 +--
87
tcg/aarch64/tcg-target.c.inc | 81 +++++---
130
target/alpha/cpu.c | 1 +
88
tcg/i386/tcg-target.c.inc | 387 +++++++++++++++++++++++++++++++-------
131
target/alpha/translate.c | 4 +-
89
tcg/loongarch64/tcg-target.c.inc | 15 +-
132
target/arm/cpu.c | 1 +
90
tcg/mips/tcg-target.c.inc | 10 +-
133
target/arm/tcg/cpu-v7m.c | 1 +
91
tcg/ppc/tcg-target.c.inc | 15 ++
134
target/arm/tcg/helper-a64.c | 6 +-
92
tcg/riscv/tcg-target.c.inc | 8 +-
135
target/arm/tcg/translate.c | 5 +-
93
tcg/s390x/tcg-target.c.inc | 17 ++
136
target/avr/cpu.c | 1 +
94
tcg/tci/tcg-target.c.inc | 2 +-
137
target/avr/translate.c | 6 +-
95
37 files changed, 640 insertions(+), 169 deletions(-)
138
target/hexagon/cpu.c | 1 +
96
create mode 100644 tcg/aarch64/tcg-target-sa32.h
139
target/hexagon/fma_emu.c | 496 ++++++---------------
97
create mode 100644 tcg/arm/tcg-target-sa32.h
140
target/hexagon/op_helper.c | 125 ++----
98
create mode 100644 tcg/i386/tcg-target-sa32.h
141
target/hexagon/translate.c | 4 +-
99
create mode 100644 tcg/loongarch64/tcg-target-sa32.h
142
target/hppa/cpu.c | 1 +
100
create mode 100644 tcg/mips/tcg-target-sa32.h
143
target/hppa/translate.c | 4 +-
101
create mode 100644 tcg/ppc/tcg-target-sa32.h
144
target/i386/tcg/tcg-cpu.c | 1 +
102
create mode 100644 tcg/riscv/tcg-target-sa32.h
145
target/i386/tcg/translate.c | 5 +-
103
create mode 100644 tcg/s390x/tcg-target-sa32.h
146
target/loongarch/cpu.c | 1 +
104
create mode 100644 tcg/sparc/tcg-target-sa32.h
147
target/loongarch/tcg/translate.c | 4 +-
105
create mode 100644 tcg/tci/tcg-target-sa32.h
148
target/m68k/cpu.c | 1 +
106
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
New patch
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
9
1 file changed, 31 insertions(+), 16 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
17
}
18
19
+static void finish_bb(OptContext *ctx)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
34
{
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
97
--
98
2.43.0
diff view generated by jsdifflib
1
We will use VPSHLD, VPSHLDV and VPSHRDV for 16-bit rotates.
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
2
4
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Remove the a_mask field from OptContext, as the mask is
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
tcg/i386/tcg-target-con-set.h | 1 +
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
8
tcg/i386/tcg-target.opc.h | 3 +++
12
1 file changed, 27 insertions(+), 15 deletions(-)
9
tcg/i386/tcg-target.c.inc | 38 +++++++++++++++++++++++++++++++++++
10
3 files changed, 42 insertions(+)
11
13
12
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target-con-set.h
16
--- a/tcg/optimize.c
15
+++ b/tcg/i386/tcg-target-con-set.h
17
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rI)
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
17
C_O1_I2(x, x, x)
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
18
C_N1_I2(r, r, r)
20
19
C_N1_I2(r, r, rW)
21
/* In flight values from optimization. */
20
+C_O1_I3(x, 0, x, x)
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
21
C_O1_I3(x, x, x, x)
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
22
C_O1_I4(r, r, re, r, 0)
24
uint64_t s_mask; /* mask of clrsb(value) bits */
23
C_O1_I4(r, r, r, ri, ri)
25
TCGType type;
24
diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target.opc.h
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
25
index XXXXXXX..XXXXXXX 100644
27
26
--- a/tcg/i386/tcg-target.opc.h
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
27
+++ b/tcg/i386/tcg-target.opc.h
29
{
28
@@ -XXX,XX +XXX,XX @@ DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC)
30
- uint64_t a_mask = ctx->a_mask;
29
DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC)
31
uint64_t z_mask = ctx->z_mask;
30
DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC)
32
uint64_t s_mask = ctx->s_mask;
31
DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC)
33
32
+DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC)
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
+DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC)
35
* type changing opcodes.
34
+DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC)
36
*/
35
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
37
if (ctx->type == TCG_TYPE_I32) {
36
index XXXXXXX..XXXXXXX 100644
38
- a_mask = (int32_t)a_mask;
37
--- a/tcg/i386/tcg-target.c.inc
39
z_mask = (int32_t)z_mask;
38
+++ b/tcg/i386/tcg-target.c.inc
40
s_mask |= MAKE_64BIT_MASK(32, 32);
39
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
41
ctx->z_mask = z_mask;
40
#define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
41
#define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
43
if (z_mask == 0) {
42
#define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
43
+#define OPC_VPSHLDW (0x70 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
45
}
44
+#define OPC_VPSHLDD (0x71 | P_EXT3A | P_DATA16 | P_EVEX)
46
+ return false;
45
+#define OPC_VPSHLDQ (0x71 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
47
+}
46
+#define OPC_VPSHLDVW (0x70 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
48
+
47
+#define OPC_VPSHLDVD (0x71 | P_EXT38 | P_DATA16 | P_EVEX)
49
+/*
48
+#define OPC_VPSHLDVQ (0x71 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
50
+ * An "affected" mask bit is 0 if and only if the result is identical
49
+#define OPC_VPSHRDVW (0x72 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
51
+ * to the first input. Thus if the entire mask is 0, the operation
50
+#define OPC_VPSHRDVD (0x73 | P_EXT38 | P_DATA16 | P_EVEX)
52
+ * is equivalent to a copy.
51
+#define OPC_VPSHRDVQ (0x73 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
53
+ */
52
#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
53
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
55
+{
54
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
56
+ if (ctx->type == TCG_TYPE_I32) {
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
57
+ a_mask = (uint32_t)a_mask;
56
static int const sars_insn[4] = {
58
+ }
57
OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
59
if (a_mask == 0) {
58
};
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
59
+ static int const vpshldi_insn[4] = {
61
}
60
+ OPC_UD2, OPC_VPSHLDW, OPC_VPSHLDD, OPC_VPSHLDQ
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
61
+ };
63
* Known-zeros does not imply known-ones. Therefore unless
62
+ static int const vpshldv_insn[4] = {
64
* arg2 is constant, we can't infer affected bits from it.
63
+ OPC_UD2, OPC_VPSHLDVW, OPC_VPSHLDVD, OPC_VPSHLDVQ
65
*/
64
+ };
66
- if (arg_is_const(op->args[2])) {
65
+ static int const vpshrdv_insn[4] = {
67
- ctx->a_mask = z1 & ~z2;
66
+ OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
68
+ if (arg_is_const(op->args[2]) &&
67
+ };
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
68
static int const abs_insn[4] = {
70
+ return true;
69
/* TODO: AVX512 adds support for MO_64. */
71
}
70
OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2
72
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
73
return fold_masks(ctx, op);
72
case INDEX_op_x86_packus_vec:
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
73
insn = packus_insn[vece];
75
*/
74
goto gen_simd;
76
if (arg_is_const(op->args[2])) {
75
+ case INDEX_op_x86_vpshldv_vec:
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
76
+ insn = vpshldv_insn[vece];
78
- ctx->a_mask = z1 & ~z2;
77
+ a1 = a2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
78
+ a2 = args[3];
80
+ return true;
79
+ goto gen_simd;
81
+ }
80
+ case INDEX_op_x86_vpshrdv_vec:
82
z1 &= z2;
81
+ insn = vpshrdv_insn[vece];
83
}
82
+ a1 = a2;
84
ctx->z_mask = z1;
83
+ a2 = args[3];
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
84
+ goto gen_simd;
86
85
#if TCG_TARGET_REG_BITS == 32
87
z_mask_old = arg_info(op->args[1])->z_mask;
86
case INDEX_op_dup2_vec:
88
z_mask = extract64(z_mask_old, pos, len);
87
/* First merge the two 32-bit inputs to a single 64-bit element. */
89
- if (pos == 0) {
88
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
90
- ctx->a_mask = z_mask_old ^ z_mask;
89
insn = OPC_VPERM2I128;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
90
sub = args[3];
92
+ return true;
91
goto gen_simd_imm8;
93
}
92
+ case INDEX_op_x86_vpshldi_vec:
94
ctx->z_mask = z_mask;
93
+ insn = vpshldi_insn[vece];
95
ctx->s_mask = smask_from_zmask(z_mask);
94
+ sub = args[3];
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
95
+ goto gen_simd_imm8;
97
96
gen_simd_imm8:
98
ctx->z_mask = z_mask;
97
+ tcg_debug_assert(insn != OPC_UD2);
99
ctx->s_mask = s_mask;
98
if (type == TCG_TYPE_V256) {
100
- if (!type_change) {
99
insn |= P_VEXL;
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
100
}
130
}
101
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
131
102
case INDEX_op_x86_vperm2i128_vec:
132
/* Assume all bits affected, no bits known zero, no sign reps. */
103
case INDEX_op_x86_punpckl_vec:
133
- ctx.a_mask = -1;
104
case INDEX_op_x86_punpckh_vec:
134
ctx.z_mask = -1;
105
+ case INDEX_op_x86_vpshldi_vec:
135
ctx.s_mask = 0;
106
#if TCG_TARGET_REG_BITS == 32
107
case INDEX_op_dup2_vec:
108
#endif
109
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
110
case INDEX_op_x86_psrldq_vec:
111
return C_O1_I1(x, x);
112
113
+ case INDEX_op_x86_vpshldv_vec:
114
+ case INDEX_op_x86_vpshrdv_vec:
115
+ return C_O1_I3(x, 0, x, x);
116
+
117
case INDEX_op_x86_vpblendvb_vec:
118
return C_O1_I3(x, x, x, x);
119
136
120
--
137
--
121
2.25.1
138
2.43.0
122
123
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
New patch
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
136
2.43.0
diff view generated by jsdifflib
New patch
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
1
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
25
1 file changed, 15 insertions(+), 49 deletions(-)
26
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/optimize.c
30
+++ b/tcg/optimize.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
172
--
173
2.43.0
diff view generated by jsdifflib
1
AArch64 has both sign and zero-extending addressing modes, which
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
means that either treatment of guest addresses is equally efficient.
3
Enabling this for AArch64 gives us testing of the feature in CI.
4
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/aarch64/tcg-target-sa32.h | 8 +++-
4
tcg/optimize.c | 9 +++++----
9
tcg/aarch64/tcg-target.c.inc | 81 ++++++++++++++++++++++++-----------
5
1 file changed, 5 insertions(+), 4 deletions(-)
10
2 files changed, 64 insertions(+), 25 deletions(-)
11
6
12
diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/aarch64/tcg-target-sa32.h
9
--- a/tcg/optimize.c
15
+++ b/tcg/aarch64/tcg-target-sa32.h
10
+++ b/tcg/optimize.c
16
@@ -1 +1,7 @@
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
17
-#define TCG_TARGET_SIGNED_ADDR32 0
12
remove_mem_copy_all(ctx);
18
+/*
19
+ * AArch64 has both SXTW and UXTW addressing modes, which means that
20
+ * it is agnostic to how guest addresses should be represented.
21
+ * Because aarch64 is more common than the other hosts that will
22
+ * want to use this feature, enable it for continuous testing.
23
+ */
24
+#define TCG_TARGET_SIGNED_ADDR32 1
25
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/aarch64/tcg-target.c.inc
28
+++ b/tcg/aarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
30
LDST_LD_S_W = 3, /* load and sign-extend into Wt */
31
} AArch64LdstType;
32
33
+/*
34
+ * See aarch64/instrs/extendreg/DecodeRegExtend
35
+ * But note that option<1> == 0 is UNDEFINED for LDR/STR.
36
+ */
37
+typedef enum {
38
+ LDST_EXT_UXTW = 2, /* zero-extend from uint32_t */
39
+ LDST_EXT_UXTX = 3, /* zero-extend from uint64_t (i.e. no extension) */
40
+ LDST_EXT_SXTW = 6, /* sign-extend from int32_t */
41
+} AArch64LdstExt;
42
+
43
/* We encode the format of the insn into the beginning of the name, so that
44
we can have the preprocessor help "typecheck" the insn vs the output
45
function. Arm didn't provide us with nice names for the formats, so we
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
47
}
13
}
48
14
49
static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
50
- TCGReg rd, TCGReg base, TCGType ext,
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
51
+ TCGReg rd, TCGReg base, AArch64LdstExt option,
52
TCGReg regoff)
53
{
17
{
54
/* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
55
tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
19
int i, nb_oargs;
56
- 0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
57
+ option << 13 | base << 5 | (rd & 0x1f));
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
23
}
24
+ return true;
58
}
25
}
59
26
60
static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
27
/*
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
62
29
fold_xi_to_x(ctx, op, 0)) {
63
/* Worst-case scenario, move offset to temp register, use reg offset. */
30
return true;
64
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
31
}
65
- tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
32
- return false;
66
+ tcg_out_ldst_r(s, insn, rd, rn, LDST_EXT_UXTX, TCG_REG_TMP);
33
+ return finish_folding(ctx, op);
67
}
34
}
68
35
69
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
36
/* We cannot as yet do_constant_folding with vectors. */
70
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
71
38
fold_xi_to_x(ctx, op, 0)) {
72
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
39
return true;
73
TCGReg data_r, TCGReg addr_r,
74
- TCGType otype, TCGReg off_r)
75
+ AArch64LdstExt option, TCGReg off_r)
76
{
77
switch (memop & MO_SSIZE) {
78
case MO_UB:
79
- tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
80
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);
81
break;
82
case MO_SB:
83
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
84
- data_r, addr_r, otype, off_r);
85
+ data_r, addr_r, option, off_r);
86
break;
87
case MO_UW:
88
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
89
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);
90
break;
91
case MO_SW:
92
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
93
- data_r, addr_r, otype, off_r);
94
+ data_r, addr_r, option, off_r);
95
break;
96
case MO_UL:
97
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
98
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);
99
break;
100
case MO_SL:
101
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
102
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);
103
break;
104
case MO_UQ:
105
- tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
106
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);
107
break;
108
default:
109
tcg_abort();
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
111
112
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
113
TCGReg data_r, TCGReg addr_r,
114
- TCGType otype, TCGReg off_r)
115
+ AArch64LdstExt option, TCGReg off_r)
116
{
117
switch (memop & MO_SIZE) {
118
case MO_8:
119
- tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
120
+ tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, option, off_r);
121
break;
122
case MO_16:
123
- tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
124
+ tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, option, off_r);
125
break;
126
case MO_32:
127
- tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
128
+ tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, option, off_r);
129
break;
130
case MO_64:
131
- tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
132
+ tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, option, off_r);
133
break;
134
default:
135
tcg_abort();
136
}
40
}
41
- return false;
42
+ return finish_folding(ctx, op);
137
}
43
}
138
44
139
+/*
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
140
+ * Bits for the option field of LDR/STR (register),
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
141
+ * for application to a guest address.
47
op->args[4] = arg_new_constant(ctx, bl);
142
+ */
48
op->args[5] = arg_new_constant(ctx, bh);
143
+static AArch64LdstExt ldst_ext_option(void)
144
+{
145
+#ifdef CONFIG_USER_ONLY
146
+ bool signed_addr32 = guest_base_signed_addr32;
147
+#else
148
+ bool signed_addr32 = TCG_TARGET_SIGNED_ADDR32;
149
+#endif
150
+
151
+ if (TARGET_LONG_BITS == 64) {
152
+ return LDST_EXT_UXTX;
153
+ } else if (signed_addr32) {
154
+ return LDST_EXT_SXTW;
155
+ } else {
156
+ return LDST_EXT_UXTW;
157
+ }
158
+}
159
+
160
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
161
MemOpIdx oi, TCGType ext)
162
{
163
MemOp memop = get_memop(oi);
164
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
165
+ AArch64LdstExt option = ldst_ext_option();
166
167
/* Byte swapping is left to middle-end expansion. */
168
tcg_debug_assert((memop & MO_BSWAP) == 0);
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
170
171
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
172
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
173
- TCG_REG_X1, otype, addr_reg);
174
+ TCG_REG_X1, option, addr_reg);
175
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
176
s->code_ptr, label_ptr);
177
#else /* !CONFIG_SOFTMMU */
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
179
}
49
}
180
if (USE_GUEST_BASE) {
50
- return false;
181
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
51
+ return finish_folding(ctx, op);
182
- TCG_REG_GUEST_BASE, otype, addr_reg);
183
+ TCG_REG_GUEST_BASE, option, addr_reg);
184
} else {
185
+ /* This case is always a 64-bit guest with no extension. */
186
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
187
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
188
+ addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
189
}
190
#endif /* CONFIG_SOFTMMU */
191
}
52
}
192
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
53
193
MemOpIdx oi)
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
194
{
195
MemOp memop = get_memop(oi);
196
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
197
+ AArch64LdstExt option = ldst_ext_option();
198
199
/* Byte swapping is left to middle-end expansion. */
200
tcg_debug_assert((memop & MO_BSWAP) == 0);
201
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
202
203
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
204
tcg_out_qemu_st_direct(s, memop, data_reg,
205
- TCG_REG_X1, otype, addr_reg);
206
+ TCG_REG_X1, option, addr_reg);
207
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
208
data_reg, addr_reg, s->code_ptr, label_ptr);
209
#else /* !CONFIG_SOFTMMU */
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
211
}
212
if (USE_GUEST_BASE) {
213
tcg_out_qemu_st_direct(s, memop, data_reg,
214
- TCG_REG_GUEST_BASE, otype, addr_reg);
215
+ TCG_REG_GUEST_BASE, option, addr_reg);
216
} else {
217
+ /* This case is always a 64-bit guest with no extension. */
218
tcg_out_qemu_st_direct(s, memop, data_reg,
219
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
220
+ addr_reg, LDST_EXT_UXTX, TCG_REG_XZR);
221
}
222
#endif /* CONFIG_SOFTMMU */
223
}
224
--
55
--
225
2.25.1
56
2.43.0
diff view generated by jsdifflib
New patch
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 20 +++++++++++++++++---
6
1 file changed, 17 insertions(+), 3 deletions(-)
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
13
return ts_info(arg_temp(arg));
14
}
15
16
+static inline bool ti_is_const(TempOptInfo *ti)
17
+{
18
+ return ti->is_const;
19
+}
20
+
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
32
{
33
- return ts_info(ts)->is_const;
34
+ return ti_is_const(ts_info(ts));
35
}
36
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
38
{
39
- TempOptInfo *ti = ts_info(ts);
40
- return ti->is_const && ti->val == val;
41
+ return ti_is_const_val(ts_info(ts), val);
42
}
43
44
static inline bool arg_is_const(TCGArg arg)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
1
While there are no specific 16-bit rotate instructions, there
1
The input which overlaps the sign bit of the output can
2
are double-word shifts, which can perform the same operation.
2
have its input s_mask propagated to the output s_mask.
3
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/i386/tcg-target.c.inc | 18 +++++++++++++++++-
7
tcg/optimize.c | 14 ++++++++++++--
9
1 file changed, 17 insertions(+), 1 deletion(-)
8
1 file changed, 12 insertions(+), 2 deletions(-)
10
9
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
12
--- a/tcg/optimize.c
14
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
16
case INDEX_op_rotlv_vec:
15
TempOptInfo *t2 = arg_info(op->args[2]);
17
case INDEX_op_rotrv_vec:
16
int ofs = op->args[3];
18
switch (vece) {
17
int len = op->args[4];
19
+ case MO_16:
18
+ int width;
20
+ return have_avx512vbmi2 ? -1 : 0;
19
TCGOpcode and_opc;
21
case MO_32:
20
- uint64_t z_mask;
22
case MO_64:
21
+ uint64_t z_mask, s_mask;
23
return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
22
24
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotli(TCGType type, unsigned vece,
23
if (ti_is_const(t1) && ti_is_const(t2)) {
25
return;
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
26
}
39
}
27
40
28
+ if (have_avx512vbmi2) {
41
+ /* The s_mask from the top portion of the deposit is still valid. */
29
+ vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece,
42
+ if (ofs + len == width) {
30
+ tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm);
43
+ s_mask = t2->s_mask << ofs;
31
+ return;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
32
+ }
46
+ }
33
+
47
+
34
t = tcg_temp_new_vec(type);
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
35
tcg_gen_shli_vec(vece, t, v1, imm);
49
- return fold_masks_z(ctx, op, z_mask);
36
tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
37
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotls(TCGType type, unsigned vece,
51
}
38
static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
52
39
TCGv_vec v1, TCGv_vec sh, bool right)
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
40
{
41
- TCGv_vec t = tcg_temp_new_vec(type);
42
+ TCGv_vec t;
43
44
+ if (have_avx512vbmi2) {
45
+ vec_gen_4(right ? INDEX_op_x86_vpshrdv_vec : INDEX_op_x86_vpshldv_vec,
46
+ type, vece, tcgv_vec_arg(v0), tcgv_vec_arg(v1),
47
+ tcgv_vec_arg(v1), tcgv_vec_arg(sh));
48
+ return;
49
+ }
50
+
51
+ t = tcg_temp_new_vec(type);
52
tcg_gen_dupi_vec(vece, t, 8 << vece);
53
tcg_gen_sub_vec(vece, t, t, sh);
54
if (right) {
55
--
54
--
56
2.25.1
55
2.43.0
57
58
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
1
There are some operation sizes in some subsets of AVX512 that
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
are missing from previous iterations of AVX. Detect them.
3
2
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/qemu/cpuid.h | 20 +++++++++++++++++---
6
tcg/optimize.c | 19 +++++++++++--------
9
tcg/i386/tcg-target.h | 4 ++++
7
1 file changed, 11 insertions(+), 8 deletions(-)
10
tcg/i386/tcg-target.c.inc | 24 ++++++++++++++++++++++--
11
3 files changed, 43 insertions(+), 5 deletions(-)
12
8
13
diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/include/qemu/cpuid.h
11
--- a/tcg/optimize.c
16
+++ b/include/qemu/cpuid.h
12
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
18
#ifndef bit_AVX2
14
19
#define bit_AVX2 (1 << 5)
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
20
#endif
21
-#ifndef bit_AVX512F
22
-#define bit_AVX512F (1 << 16)
23
-#endif
24
#ifndef bit_BMI2
25
#define bit_BMI2 (1 << 8)
26
#endif
27
+#ifndef bit_AVX512F
28
+#define bit_AVX512F (1 << 16)
29
+#endif
30
+#ifndef bit_AVX512DQ
31
+#define bit_AVX512DQ (1 << 17)
32
+#endif
33
+#ifndef bit_AVX512BW
34
+#define bit_AVX512BW (1 << 30)
35
+#endif
36
+#ifndef bit_AVX512VL
37
+#define bit_AVX512VL (1u << 31)
38
+#endif
39
+
40
+/* Leaf 7, %ecx */
41
+#ifndef bit_AVX512VBMI2
42
+#define bit_AVX512VBMI2 (1 << 6)
43
+#endif
44
45
/* Leaf 0x80000001, %ecx */
46
#ifndef bit_LZCNT
47
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/i386/tcg-target.h
50
+++ b/tcg/i386/tcg-target.h
51
@@ -XXX,XX +XXX,XX @@ extern bool have_bmi1;
52
extern bool have_popcnt;
53
extern bool have_avx1;
54
extern bool have_avx2;
55
+extern bool have_avx512bw;
56
+extern bool have_avx512dq;
57
+extern bool have_avx512vbmi2;
58
+extern bool have_avx512vl;
59
extern bool have_movbe;
60
61
/* optional instructions */
62
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/i386/tcg-target.c.inc
65
+++ b/tcg/i386/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ bool have_bmi1;
67
bool have_popcnt;
68
bool have_avx1;
69
bool have_avx2;
70
+bool have_avx512bw;
71
+bool have_avx512dq;
72
+bool have_avx512vbmi2;
73
+bool have_avx512vl;
74
bool have_movbe;
75
76
#ifdef CONFIG_CPUID_H
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
78
static void tcg_target_init(TCGContext *s)
79
{
16
{
80
#ifdef CONFIG_CPUID_H
17
+ uint64_t z_mask, s_mask;
81
- unsigned a, b, c, d, b7 = 0;
18
+ TempOptInfo *tt, *ft;
82
+ unsigned a, b, c, d, b7 = 0, c7 = 0;
19
int i;
83
unsigned max = __get_cpuid_max(0, 0);
20
84
21
/* If true and false values are the same, eliminate the cmp. */
85
if (max >= 7) {
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
86
/* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
87
- __cpuid_count(7, 0, a, b7, c, d);
88
+ __cpuid_count(7, 0, a, b7, c7, d);
89
have_bmi1 = (b7 & bit_BMI) != 0;
90
have_bmi2 = (b7 & bit_BMI2) != 0;
91
}
24
}
92
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
25
93
if ((xcrl & 6) == 6) {
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
94
have_avx1 = (c & bit_AVX) != 0;
27
- | arg_info(op->args[4])->z_mask;
95
have_avx2 = (b7 & bit_AVX2) != 0;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
96
+
29
- & arg_info(op->args[4])->s_mask;
97
+ /*
30
+ tt = arg_info(op->args[3]);
98
+ * There are interesting instructions in AVX512, so long
31
+ ft = arg_info(op->args[4]);
99
+ * as we have AVX512VL, which indicates support for EVEX
32
+ z_mask = tt->z_mask | ft->z_mask;
100
+ * on sizes smaller than 512 bits. We are required to
33
+ s_mask = tt->s_mask & ft->s_mask;
101
+ * check that OPMASK and all extended ZMM state are enabled
34
102
+ * even if we're not using them -- the insns will fault.
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
103
+ */
36
- uint64_t tv = arg_info(op->args[3])->val;
104
+ if ((xcrl & 0xe0) == 0xe0
37
- uint64_t fv = arg_info(op->args[4])->val;
105
+ && (b7 & bit_AVX512F)
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
106
+ && (b7 & bit_AVX512VL)) {
39
+ uint64_t tv = ti_const_val(tt);
107
+ have_avx512vl = true;
40
+ uint64_t fv = ti_const_val(ft);
108
+ have_avx512bw = (b7 & bit_AVX512BW) != 0;
41
TCGOpcode opc, negopc = 0;
109
+ have_avx512dq = (b7 & bit_AVX512DQ) != 0;
42
TCGCond cond = op->args[5];
110
+ have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0;
43
111
+ }
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
112
}
45
}
113
}
46
}
114
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
115
--
54
--
116
2.25.1
55
2.43.0
117
118
diff view generated by jsdifflib
1
We've had placeholders for these opcodes for a while,
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
and should have support on ppc, s390x and avx512 hosts.
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
3
---
9
include/tcg/tcg-opc.h | 3 +++
4
tcg/optimize.c | 6 +++---
10
include/tcg/tcg.h | 3 +++
5
1 file changed, 3 insertions(+), 3 deletions(-)
11
tcg/aarch64/tcg-target.h | 3 +++
12
tcg/arm/tcg-target.h | 3 +++
13
tcg/i386/tcg-target.h | 3 +++
14
tcg/ppc/tcg-target.h | 3 +++
15
tcg/s390x/tcg-target.h | 3 +++
16
tcg/optimize.c | 12 ++++++------
17
tcg/tcg-op-vec.c | 27 ++++++++++++++++++---------
18
tcg/tcg.c | 6 ++++++
19
10 files changed, 51 insertions(+), 15 deletions(-)
20
6
21
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg-opc.h
24
+++ b/include/tcg/tcg-opc.h
25
@@ -XXX,XX +XXX,XX @@ DEF(or_vec, 1, 2, 0, IMPLVEC)
26
DEF(xor_vec, 1, 2, 0, IMPLVEC)
27
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
28
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
29
+DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
30
+DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
31
+DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
32
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
33
34
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
35
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/tcg/tcg.h
38
+++ b/include/tcg/tcg.h
39
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
40
#define TCG_TARGET_HAS_not_vec 0
41
#define TCG_TARGET_HAS_andc_vec 0
42
#define TCG_TARGET_HAS_orc_vec 0
43
+#define TCG_TARGET_HAS_nand_vec 0
44
+#define TCG_TARGET_HAS_nor_vec 0
45
+#define TCG_TARGET_HAS_eqv_vec 0
46
#define TCG_TARGET_HAS_roti_vec 0
47
#define TCG_TARGET_HAS_rots_vec 0
48
#define TCG_TARGET_HAS_rotv_vec 0
49
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/aarch64/tcg-target.h
52
+++ b/tcg/aarch64/tcg-target.h
53
@@ -XXX,XX +XXX,XX @@ typedef enum {
54
55
#define TCG_TARGET_HAS_andc_vec 1
56
#define TCG_TARGET_HAS_orc_vec 1
57
+#define TCG_TARGET_HAS_nand_vec 0
58
+#define TCG_TARGET_HAS_nor_vec 0
59
+#define TCG_TARGET_HAS_eqv_vec 0
60
#define TCG_TARGET_HAS_not_vec 1
61
#define TCG_TARGET_HAS_neg_vec 1
62
#define TCG_TARGET_HAS_abs_vec 1
63
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/arm/tcg-target.h
66
+++ b/tcg/arm/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
68
69
#define TCG_TARGET_HAS_andc_vec 1
70
#define TCG_TARGET_HAS_orc_vec 1
71
+#define TCG_TARGET_HAS_nand_vec 0
72
+#define TCG_TARGET_HAS_nor_vec 0
73
+#define TCG_TARGET_HAS_eqv_vec 0
74
#define TCG_TARGET_HAS_not_vec 1
75
#define TCG_TARGET_HAS_neg_vec 1
76
#define TCG_TARGET_HAS_abs_vec 1
77
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/i386/tcg-target.h
80
+++ b/tcg/i386/tcg-target.h
81
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
82
83
#define TCG_TARGET_HAS_andc_vec 1
84
#define TCG_TARGET_HAS_orc_vec 0
85
+#define TCG_TARGET_HAS_nand_vec 0
86
+#define TCG_TARGET_HAS_nor_vec 0
87
+#define TCG_TARGET_HAS_eqv_vec 0
88
#define TCG_TARGET_HAS_not_vec 0
89
#define TCG_TARGET_HAS_neg_vec 0
90
#define TCG_TARGET_HAS_abs_vec 1
91
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/ppc/tcg-target.h
94
+++ b/tcg/ppc/tcg-target.h
95
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
96
97
#define TCG_TARGET_HAS_andc_vec 1
98
#define TCG_TARGET_HAS_orc_vec have_isa_2_07
99
+#define TCG_TARGET_HAS_nand_vec 0
100
+#define TCG_TARGET_HAS_nor_vec 0
101
+#define TCG_TARGET_HAS_eqv_vec 0
102
#define TCG_TARGET_HAS_not_vec 1
103
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
104
#define TCG_TARGET_HAS_abs_vec 0
105
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/s390x/tcg-target.h
108
+++ b/tcg/s390x/tcg-target.h
109
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
110
111
#define TCG_TARGET_HAS_andc_vec 1
112
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
113
+#define TCG_TARGET_HAS_nand_vec 0
114
+#define TCG_TARGET_HAS_nor_vec 0
115
+#define TCG_TARGET_HAS_eqv_vec 0
116
#define TCG_TARGET_HAS_not_vec 1
117
#define TCG_TARGET_HAS_neg_vec 1
118
#define TCG_TARGET_HAS_abs_vec 1
119
diff --git a/tcg/optimize.c b/tcg/optimize.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
120
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
121
--- a/tcg/optimize.c
9
--- a/tcg/optimize.c
122
+++ b/tcg/optimize.c
10
+++ b/tcg/optimize.c
123
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
124
CASE_OP_32_64_VEC(orc):
12
fold_xi_to_x(ctx, op, 1)) {
125
return x | ~y;
13
return true;
126
14
}
127
- CASE_OP_32_64(eqv):
15
- return false;
128
+ CASE_OP_32_64_VEC(eqv):
16
+ return finish_folding(ctx, op);
129
return ~(x ^ y);
130
131
- CASE_OP_32_64(nand):
132
+ CASE_OP_32_64_VEC(nand):
133
return ~(x & y);
134
135
- CASE_OP_32_64(nor):
136
+ CASE_OP_32_64_VEC(nor):
137
return ~(x | y);
138
139
case INDEX_op_clz_i32:
140
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
141
case INDEX_op_dup2_vec:
142
done = fold_dup2(&ctx, op);
143
break;
144
- CASE_OP_32_64(eqv):
145
+ CASE_OP_32_64_VEC(eqv):
146
done = fold_eqv(&ctx, op);
147
break;
148
CASE_OP_32_64(extract):
149
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
150
CASE_OP_32_64(mulu2):
151
done = fold_multiply2(&ctx, op);
152
break;
153
- CASE_OP_32_64(nand):
154
+ CASE_OP_32_64_VEC(nand):
155
done = fold_nand(&ctx, op);
156
break;
157
CASE_OP_32_64(neg):
158
done = fold_neg(&ctx, op);
159
break;
160
- CASE_OP_32_64(nor):
161
+ CASE_OP_32_64_VEC(nor):
162
done = fold_nor(&ctx, op);
163
break;
164
CASE_OP_32_64_VEC(not):
165
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
166
index XXXXXXX..XXXXXXX 100644
167
--- a/tcg/tcg-op-vec.c
168
+++ b/tcg/tcg-op-vec.c
169
@@ -XXX,XX +XXX,XX @@ void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
170
171
void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
172
{
173
- /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
174
- tcg_gen_and_vec(0, r, a, b);
175
- tcg_gen_not_vec(0, r, r);
176
+ if (TCG_TARGET_HAS_nand_vec) {
177
+ vec_gen_op3(INDEX_op_nand_vec, 0, r, a, b);
178
+ } else {
179
+ tcg_gen_and_vec(0, r, a, b);
180
+ tcg_gen_not_vec(0, r, r);
181
+ }
182
}
17
}
183
18
184
void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
185
{
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
186
- /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
21
fold_xi_to_i(ctx, op, 0)) {
187
- tcg_gen_or_vec(0, r, a, b);
22
return true;
188
- tcg_gen_not_vec(0, r, r);
23
}
189
+ if (TCG_TARGET_HAS_nor_vec) {
24
- return false;
190
+ vec_gen_op3(INDEX_op_nor_vec, 0, r, a, b);
25
+ return finish_folding(ctx, op);
191
+ } else {
192
+ tcg_gen_or_vec(0, r, a, b);
193
+ tcg_gen_not_vec(0, r, r);
194
+ }
195
}
26
}
196
27
197
void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
198
{
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
199
- /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
30
tcg_opt_gen_movi(ctx, op2, rh, h);
200
- tcg_gen_xor_vec(0, r, a, b);
31
return true;
201
- tcg_gen_not_vec(0, r, r);
32
}
202
+ if (TCG_TARGET_HAS_eqv_vec) {
33
- return false;
203
+ vec_gen_op3(INDEX_op_eqv_vec, 0, r, a, b);
34
+ return finish_folding(ctx, op);
204
+ } else {
205
+ tcg_gen_xor_vec(0, r, a, b);
206
+ tcg_gen_not_vec(0, r, r);
207
+ }
208
}
35
}
209
36
210
static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
211
diff --git a/tcg/tcg.c b/tcg/tcg.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/tcg/tcg.c
214
+++ b/tcg/tcg.c
215
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
216
return have_vec && TCG_TARGET_HAS_andc_vec;
217
case INDEX_op_orc_vec:
218
return have_vec && TCG_TARGET_HAS_orc_vec;
219
+ case INDEX_op_nand_vec:
220
+ return have_vec && TCG_TARGET_HAS_nand_vec;
221
+ case INDEX_op_nor_vec:
222
+ return have_vec && TCG_TARGET_HAS_nor_vec;
223
+ case INDEX_op_eqv_vec:
224
+ return have_vec && TCG_TARGET_HAS_eqv_vec;
225
case INDEX_op_mul_vec:
226
return have_vec && TCG_TARGET_HAS_mul_vec;
227
case INDEX_op_shli_vec:
228
--
38
--
229
2.25.1
39
2.43.0
230
231
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
While the host may prefer to treat 32-bit addresses as signed,
1
Avoid the use of the OptContext slots.
2
there are edge cases of guests that cannot be implemented with
3
addresses 0x7fff_ffff and 0x8000_0000 being non-consecutive.
4
2
5
Therefore, default to guest_base_signed_addr32 false, and allow
3
Be careful not to call fold_masks_zs when the memory operation
6
probe_guest_base to determine whether it is possible to set it
4
is wide enough to require multiple outputs, so split into two
7
to true. A tcg backend which sets TCG_TARGET_SIGNED_ADDR32 will
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
8
have to cope with either setting for user-only.
9
6
10
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
11
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
9
---
14
include/exec/cpu-all.h | 16 ++++++++++++++++
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
15
include/exec/cpu_ldst.h | 3 ++-
11
1 file changed, 21 insertions(+), 5 deletions(-)
16
bsd-user/main.c | 4 ++++
17
linux-user/main.c | 3 +++
18
4 files changed, 25 insertions(+), 1 deletion(-)
19
12
20
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
21
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu-all.h
15
--- a/tcg/optimize.c
23
+++ b/include/exec/cpu-all.h
16
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@ static inline void tswap64s(uint64_t *s)
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
25
18
return fold_masks_s(ctx, op, s_mask);
26
#if defined(CONFIG_USER_ONLY)
19
}
27
#include "exec/user/abitypes.h"
20
28
+#include "tcg-target-sa32.h"
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
29
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
30
/* On some host systems the guest address space is reserved on the host.
23
{
31
* This allows the guest address space to be offset to a convenient location.
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
32
@@ -XXX,XX +XXX,XX @@ extern uintptr_t guest_base;
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
33
extern bool have_guest_base;
26
MemOp mop = get_memop(oi);
34
extern unsigned long reserved_va;
27
int width = 8 * memop_size(mop);
35
28
+ uint64_t z_mask = -1, s_mask = 0;
36
+#if TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32
29
37
+extern bool guest_base_signed_addr32;
30
if (width < 64) {
38
+#else
31
if (mop & MO_SIGN) {
39
+#define guest_base_signed_addr32 false
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
40
+#endif
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
41
+
43
+
42
+static inline void set_guest_base_signed_addr32(void)
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
43
+{
44
+#ifdef guest_base_signed_addr32
45
+ qemu_build_not_reached();
46
+#else
47
+ guest_base_signed_addr32 = true;
48
+#endif
49
+}
45
+}
50
+
46
+
51
/*
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
52
* Limit the guest addresses as best we can.
48
+{
53
*
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
54
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
50
+ ctx->prev_mb = NULL;
55
index XXXXXXX..XXXXXXX 100644
51
+ return finish_folding(ctx, op);
56
--- a/include/exec/cpu_ldst.h
57
+++ b/include/exec/cpu_ldst.h
58
@@ -XXX,XX +XXX,XX @@ static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
59
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
60
static inline void *g2h_untagged(abi_ptr x)
61
{
62
- return (void *)((uintptr_t)(x) + guest_base);
63
+ uintptr_t hx = guest_base_signed_addr32 ? (int32_t)x : (uintptr_t)x;
64
+ return (void *)(guest_base + hx);
65
}
52
}
66
53
67
static inline void *g2h(CPUState *cs, abi_ptr x)
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
68
diff --git a/bsd-user/main.c b/bsd-user/main.c
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
69
index XXXXXXX..XXXXXXX 100644
56
break;
70
--- a/bsd-user/main.c
57
case INDEX_op_qemu_ld_a32_i32:
71
+++ b/bsd-user/main.c
58
case INDEX_op_qemu_ld_a64_i32:
72
@@ -XXX,XX +XXX,XX @@
59
+ done = fold_qemu_ld_1reg(&ctx, op);
73
int singlestep;
60
+ break;
74
uintptr_t guest_base;
61
case INDEX_op_qemu_ld_a32_i64:
75
bool have_guest_base;
62
case INDEX_op_qemu_ld_a64_i64:
76
+#ifndef guest_base_signed_addr32
63
+ if (TCG_TARGET_REG_BITS == 64) {
77
+bool guest_base_signed_addr32;
64
+ done = fold_qemu_ld_1reg(&ctx, op);
78
+#endif
65
+ break;
79
+
66
+ }
80
/*
67
+ QEMU_FALLTHROUGH;
81
* When running 32-on-64 we should make sure we can fit all of the possible
68
case INDEX_op_qemu_ld_a32_i128:
82
* guest address space into a contiguous chunk of virtual host memory.
69
case INDEX_op_qemu_ld_a64_i128:
83
diff --git a/linux-user/main.c b/linux-user/main.c
70
- done = fold_qemu_ld(&ctx, op);
84
index XXXXXXX..XXXXXXX 100644
71
+ done = fold_qemu_ld_2reg(&ctx, op);
85
--- a/linux-user/main.c
72
break;
86
+++ b/linux-user/main.c
73
case INDEX_op_qemu_st8_a32_i32:
87
@@ -XXX,XX +XXX,XX @@ static const char *seed_optarg;
74
case INDEX_op_qemu_st8_a64_i32:
88
unsigned long mmap_min_addr;
89
uintptr_t guest_base;
90
bool have_guest_base;
91
+#ifndef guest_base_signed_addr32
92
+bool guest_base_signed_addr32;
93
+#endif
94
95
/*
96
* Used to implement backwards-compatibility for the `-strace`, and
97
--
75
--
98
2.25.1
76
2.43.0
99
100
diff view generated by jsdifflib
1
Create a new function to combine a CPUTLBEntry addend
1
Stores have no output operands, and so need no further work.
2
with the guest address to form a host address.
3
2
4
Reviewed-by: WANG Xuerui <git@xen0n.name>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
accel/tcg/cputlb.c | 24 ++++++++++++++----------
6
tcg/optimize.c | 11 +++++------
11
1 file changed, 14 insertions(+), 10 deletions(-)
7
1 file changed, 5 insertions(+), 6 deletions(-)
12
8
13
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/cputlb.c
11
--- a/tcg/optimize.c
16
+++ b/accel/tcg/cputlb.c
12
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
18
return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
19
}
20
20
21
+static inline uintptr_t g2h_tlbe(const CPUTLBEntry *tlb, target_ulong gaddr)
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
+{
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
+ return tlb->addend + (uintptr_t)gaddr;
23
24
+}
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
+
25
remove_mem_copy_all(ctx);
26
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
26
- return false;
27
size_t max_entries)
27
+ return true;
28
{
29
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
30
31
if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
32
TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
33
- addr &= TARGET_PAGE_MASK;
34
- addr += tlb_entry->addend;
35
+ addr = g2h_tlbe(tlb_entry, addr & TARGET_PAGE_MASK);
36
if ((addr - start) < length) {
37
#if TCG_OVERSIZED_GUEST
38
tlb_entry->addr_write |= TLB_NOTDIRTY;
39
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
40
return -1;
41
}
28
}
42
29
43
- p = (void *)((uintptr_t)addr + entry->addend);
30
switch (op->opc) {
44
+ p = (void *)g2h_tlbe(entry, addr);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
45
if (hostp) {
32
g_assert_not_reached();
46
*hostp = p;
47
}
33
}
48
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
49
}
47
}
50
48
51
/* Everything else is RAM. */
49
src = arg_temp(op->args[0]);
52
- *phost = (void *)((uintptr_t)addr + entry->addend);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
53
+ *phost = (void *)g2h_tlbe(entry, addr);
51
last = ofs + tcg_type_size(type) - 1;
54
return flags;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
55
}
56
}
56
57
57
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
58
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
59
} else {
60
data->is_io = false;
61
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
62
+ data->v.ram.hostaddr = (void *)g2h_tlbe(tlbe, addr);
63
}
64
return true;
65
} else {
66
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
67
goto stop_the_world;
68
}
69
70
- hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
71
+ hostaddr = (void *)g2h_tlbe(tlbe, addr);
72
73
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
74
notdirty_write(env_cpu(env), addr, size,
75
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
76
access_type, op ^ (need_swap * MO_BSWAP));
77
}
78
79
- haddr = (void *)((uintptr_t)addr + entry->addend);
80
+ haddr = (void *)g2h_tlbe(entry, addr);
81
82
/*
83
* Keep these two load_memop separate to ensure that the compiler
84
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
85
return res & MAKE_64BIT_MASK(0, size * 8);
86
}
87
88
- haddr = (void *)((uintptr_t)addr + entry->addend);
89
+ haddr = (void *)g2h_tlbe(entry, addr);
90
return load_memop(haddr, op);
91
}
92
93
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
94
notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
95
}
96
97
- haddr = (void *)((uintptr_t)addr + entry->addend);
98
+ haddr = (void *)g2h_tlbe(entry, addr);
99
100
/*
101
* Keep these two store_memop separate to ensure that the compiler
102
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
103
return;
104
}
105
106
- haddr = (void *)((uintptr_t)addr + entry->addend);
107
+ haddr = (void *)g2h_tlbe(entry, addr);
108
store_memop(haddr, val, op);
109
}
110
111
--
59
--
112
2.25.1
60
2.43.0
113
114
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
1
When using reserved_va, which is the default for a 64-bit host
1
Change return from bool to int; distinguish between
2
and a 32-bit guest, set guest_base_signed_addr32 if requested
2
complete folding, simplification, and no change.
3
by TCG_TARGET_SIGNED_ADDR32, and the executable layout allows.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/cpu-all.h | 4 ---
7
tcg/optimize.c | 22 ++++++++++++++--------
10
linux-user/elfload.c | 62 ++++++++++++++++++++++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
11
2 files changed, 50 insertions(+), 16 deletions(-)
12
9
13
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/cpu-all.h
12
--- a/tcg/optimize.c
16
+++ b/include/exec/cpu-all.h
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ extern const TargetPageBits target_page;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
18
#define PAGE_RESET 0x0040
15
return finish_folding(ctx, op);
19
/* For linux-user, indicates that the page is MAP_ANON. */
16
}
20
#define PAGE_ANON 0x0080
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
21
-
32
-
22
-#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
33
- return false;
23
-/* FIXME: Code that sets/uses this is broken and needs to go away. */
34
+ return 0;
24
#define PAGE_RESERVED 0x0100
35
}
25
-#endif
36
26
/* Target-specific bits that will be used via page_get_flags(). */
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
27
#define PAGE_TARGET_1 0x0200
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
28
#define PAGE_TARGET_2 0x0400
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
29
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/linux-user/elfload.c
32
+++ b/linux-user/elfload.c
33
@@ -XXX,XX +XXX,XX @@ static void pgb_dynamic(const char *image_name, long align)
34
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
35
abi_ulong guest_hiaddr, long align)
36
{
37
- int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
38
+ int flags = (MAP_ANONYMOUS | MAP_PRIVATE |
39
+ MAP_NORESERVE | MAP_FIXED_NOREPLACE);
40
+ unsigned long local_rva = reserved_va;
41
+ bool protect_wrap = false;
42
void *addr, *test;
43
44
- if (guest_hiaddr > reserved_va) {
45
+ if (guest_hiaddr > local_rva) {
46
error_report("%s: requires more than reserved virtual "
47
"address space (0x%" PRIx64 " > 0x%lx)",
48
- image_name, (uint64_t)guest_hiaddr, reserved_va);
49
+ image_name, (uint64_t)guest_hiaddr, local_rva);
50
exit(EXIT_FAILURE);
51
}
40
}
52
41
53
- /* Widen the "image" to the entire reserved address space. */
42
- if (fold_setcond_zmask(ctx, op, false)) {
54
- pgb_static(image_name, 0, reserved_va, align);
43
+ i = fold_setcond_zmask(ctx, op, false);
55
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
44
+ if (i > 0) {
56
+ if (guest_loaddr < 0x80000000u && guest_hiaddr > 0x80000000u) {
45
return true;
57
+ /*
46
}
58
+ * The executable itself wraps on signed addresses.
47
- fold_setcond_tst_pow2(ctx, op, false);
59
+ * Without per-page translation, we must keep the
48
+ if (i == 0) {
60
+ * guest address 0x7fff_ffff adjacent to 0x8000_0000
49
+ fold_setcond_tst_pow2(ctx, op, false);
61
+ * consecutive in host memory: unsigned addresses.
62
+ */
63
+ } else {
64
+ set_guest_base_signed_addr32();
65
+ if (local_rva <= 0x80000000u) {
66
+ /* No guest addresses are "negative": win! */
67
+ } else {
68
+ /* Begin by allocating the entire address space. */
69
+ local_rva = 0xfffffffful + 1;
70
+ protect_wrap = true;
71
+ }
72
+ }
73
+ }
50
+ }
74
51
75
- /* osdep.h defines this as 0 if it's missing */
52
ctx->z_mask = 1;
76
- flags |= MAP_FIXED_NOREPLACE;
53
return false;
77
+ /* Widen the "image" to the entire reserved address space. */
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
78
+ pgb_static(image_name, 0, local_rva, align);
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
79
+ assert(guest_base != 0);
80
81
/* Reserve the memory on the host. */
82
- assert(guest_base != 0);
83
test = g2h_untagged(0);
84
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
85
+ addr = mmap(test, local_rva, PROT_NONE, flags, -1, 0);
86
if (addr == MAP_FAILED || addr != test) {
87
+ /*
88
+ * If protect_wrap, we could try again with the original reserved_va
89
+ * setting, but the edge case of low ulimit vm setting on a 64-bit
90
+ * host is probably useless.
91
+ */
92
error_report("Unable to reserve 0x%lx bytes of virtual address "
93
- "space at %p (%s) for use as guest address space (check your"
94
- "virtual memory ulimit setting, min_mmap_addr or reserve less "
95
- "using -R option)", reserved_va, test, strerror(errno));
96
+ "space at %p (%s) for use as guest address space "
97
+ "(check your virtual memory ulimit setting, "
98
+ "min_mmap_addr or reserve less using -R option)",
99
+ local_rva, test, strerror(errno));
100
exit(EXIT_FAILURE);
101
}
56
}
102
57
103
+ if (protect_wrap) {
58
- if (fold_setcond_zmask(ctx, op, true)) {
104
+ /*
59
+ i = fold_setcond_zmask(ctx, op, true);
105
+ * Prevent the page just before 0x80000000 from being allocated.
60
+ if (i > 0) {
106
+ * This prevents a single guest object/allocation from crossing
61
return true;
107
+ * the signed wrap, and thus being discontiguous in host memory.
62
}
108
+ */
63
- fold_setcond_tst_pow2(ctx, op, true);
109
+ page_set_flags(0x7fffffff & TARGET_PAGE_MASK, 0x80000000u,
64
+ if (i == 0) {
110
+ PAGE_RESERVED);
65
+ fold_setcond_tst_pow2(ctx, op, true);
111
+ /* Adjust guest_base so that 0 is in the middle of the reservation. */
112
+ guest_base += 0x80000000ul;
113
+ }
66
+ }
114
+
67
115
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
116
__func__, addr, reserved_va);
69
ctx->s_mask = -1;
117
}
118
--
70
--
119
2.25.1
71
2.43.0
120
121
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
1
The condition for UMIN/UMAX availability is about to change;
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
use the canonical version.
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
3
4
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/i386/tcg-target.c.inc | 8 ++++----
8
tcg/optimize.c | 5 ++---
9
1 file changed, 4 insertions(+), 4 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
13
--- a/tcg/optimize.c
14
+++ b/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
fixup = NEED_SWAP | NEED_INV;
16
17
break;
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
case TCG_COND_LEU:
18
{
19
- if (vece <= MO_32) {
19
- uint64_t s_mask, z_mask, sign;
20
+ if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
20
+ uint64_t s_mask, z_mask;
21
fixup = NEED_UMIN;
21
TempOptInfo *t1, *t2;
22
} else {
22
23
fixup = NEED_BIAS | NEED_INV;
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
24
}
32
}
25
break;
33
break;
26
case TCG_COND_GTU:
27
- if (vece <= MO_32) {
28
+ if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
29
fixup = NEED_UMIN | NEED_INV;
30
} else {
31
fixup = NEED_BIAS;
32
}
33
break;
34
case TCG_COND_GEU:
35
- if (vece <= MO_32) {
36
+ if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
37
fixup = NEED_UMAX;
38
} else {
39
fixup = NEED_BIAS | NEED_SWAP | NEED_INV;
40
}
41
break;
42
case TCG_COND_LTU:
43
- if (vece <= MO_32) {
44
+ if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
45
fixup = NEED_UMAX | NEED_INV;
46
} else {
47
fixup = NEED_BIAS | NEED_SWAP;
48
--
34
--
49
2.25.1
35
2.43.0
50
51
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
type = ctx->type;
20
--
21
2.43.0
diff view generated by jsdifflib
1
All 32-bit LoongArch operations sign-extend the output, so we are easily
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
2
Remove fold_masks as the function becomes unused.
3
3
4
Cc: WANG Xuerui <git@xen0n.name>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/loongarch64/tcg-target-sa32.h | 2 +-
7
tcg/optimize.c | 18 ++++++++----------
9
tcg/loongarch64/tcg-target.c.inc | 15 ++++++---------
8
1 file changed, 8 insertions(+), 10 deletions(-)
10
2 files changed, 7 insertions(+), 10 deletions(-)
11
9
12
diff --git a/tcg/loongarch64/tcg-target-sa32.h b/tcg/loongarch64/tcg-target-sa32.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target-sa32.h
12
--- a/tcg/optimize.c
15
+++ b/tcg/loongarch64/tcg-target-sa32.h
13
+++ b/tcg/optimize.c
16
@@ -1 +1 @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
17
-#define TCG_TARGET_SIGNED_ADDR32 0
15
return fold_masks_zs(ctx, op, -1, s_mask);
18
+#define TCG_TARGET_SIGNED_ADDR32 1
19
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.c.inc
22
+++ b/tcg/loongarch64/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
24
return tcg_out_fail_alignment(s, l);
25
}
16
}
26
17
27
-#endif /* CONFIG_SOFTMMU */
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
28
-
22
-
29
/*
23
/*
30
* `ext32u` the address register into the temp register given,
24
* An "affected" mask bit is 0 if and only if the result is identical
31
* if target is 32-bit, no-op otherwise.
25
* to the first input. Thus if the entire mask is 0, the operation
32
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
33
static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
27
34
TCGReg addr, TCGReg tmp)
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
35
{
29
{
36
- if (TARGET_LONG_BITS == 32) {
30
+ uint64_t z_mask, s_mask;
37
+ if (TARGET_LONG_BITS == 32 && !guest_base_signed_addr32) {
31
+ TempOptInfo *t1, *t2;
38
tcg_out_ext32u(s, tmp, addr);
32
+
39
return tmp;
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
40
}
38
}
41
return addr;
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
50
}
43
+#endif /* CONFIG_SOFTMMU */
51
44
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
45
static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
46
TCGReg rk, MemOp opc, TCGType type)
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
48
tcg_insn_unit *label_ptr[1];
49
#else
50
unsigned a_bits;
51
-#endif
52
TCGReg base;
53
+#endif
54
55
data_regl = *args++;
56
addr_regl = *args++;
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
58
59
#if defined(CONFIG_SOFTMMU)
60
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
61
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
62
- tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
63
+ tcg_out_qemu_ld_indexed(s, data_regl, addr_regl, TCG_REG_TMP2, opc, type);
64
add_qemu_ldst_label(s, 1, oi, type,
65
data_regl, addr_regl,
66
s->code_ptr, label_ptr);
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
68
tcg_insn_unit *label_ptr[1];
69
#else
70
unsigned a_bits;
71
-#endif
72
TCGReg base;
73
+#endif
74
75
data_regl = *args++;
76
addr_regl = *args++;
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
78
79
#if defined(CONFIG_SOFTMMU)
80
tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
81
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
82
- tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
83
+ tcg_out_qemu_st_indexed(s, data_regl, addr_regl, TCG_REG_TMP2, opc);
84
add_qemu_ldst_label(s, 0, oi,
85
0, /* type param is unused for stores */
86
data_regl, addr_regl,
87
--
53
--
88
2.25.1
54
2.43.0
89
90
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
All mask setting is now done with parameters via fold_masks_*.
2
2
3
valgrind pointed out that arg_info()->val can be undefined which will
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
be the case if the arguments are not constant. The ordering of the
5
checks will have ensured we never relied on an undefined value but for
6
the sake of completeness re-order the code to be clear.
7
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Message-Id: <20220209112142.3367525-1-alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
tcg/optimize.c | 8 ++++----
6
tcg/optimize.c | 13 -------------
14
1 file changed, 4 insertions(+), 4 deletions(-)
7
1 file changed, 13 deletions(-)
15
8
16
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/optimize.c
11
--- a/tcg/optimize.c
19
+++ b/tcg/optimize.c
12
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static bool do_constant_folding_cond_eq(TCGCond c)
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
21
static int do_constant_folding_cond(TCGType type, TCGArg x,
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
22
TCGArg y, TCGCond c)
15
23
{
16
/* In flight values from optimization. */
24
- uint64_t xv = arg_info(x)->val;
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
25
- uint64_t yv = arg_info(y)->val;
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
26
-
43
-
27
if (arg_is_const(x) && arg_is_const(y)) {
44
/*
28
+ uint64_t xv = arg_info(x)->val;
45
* Process each opcode.
29
+ uint64_t yv = arg_info(y)->val;
46
* Sorted alphabetically by opcode as much as possible.
30
+
31
switch (type) {
32
case TCG_TYPE_I32:
33
return do_constant_folding_cond_32(xv, yv, c);
34
@@ -XXX,XX +XXX,XX @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
35
}
36
} else if (args_are_copies(x, y)) {
37
return do_constant_folding_cond_eq(c);
38
- } else if (arg_is_const(y) && yv == 0) {
39
+ } else if (arg_is_const(y) && arg_info(y)->val == 0) {
40
switch (c) {
41
case TCG_COND_LTU:
42
return 0;
43
--
47
--
44
2.25.1
48
2.43.0
45
46
diff view generated by jsdifflib
1
AVX512DQ has VPMULLQ.
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
2
3
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/i386/tcg-target.c.inc | 12 ++++++------
7
tcg/optimize.c | 4 ++--
8
1 file changed, 6 insertions(+), 6 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
9
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
15
#define OPC_PMOVZXDQ (0x35 | P_EXT38 | P_DATA16)
15
g_assert_not_reached();
16
#define OPC_PMULLW (0xd5 | P_EXT | P_DATA16)
16
}
17
#define OPC_PMULLD (0x40 | P_EXT38 | P_DATA16)
17
18
+#define OPC_VPMULLQ (0x40 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
19
#define OPC_POR (0xeb | P_EXT | P_DATA16)
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
20
#define OPC_PSHUFB (0x00 | P_EXT38 | P_DATA16)
20
return true;
21
#define OPC_PSHUFD (0x70 | P_EXT | P_DATA16)
21
}
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
22
23
OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
};
24
s_mask = s_mask_old >> pos;
25
static int const mul_insn[4] = {
25
s_mask |= -1ull << (len - 1);
26
- OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_UD2
26
27
+ OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_VPMULLQ
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
};
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
static int const shift_imm_insn[4] = {
29
return true;
30
OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
30
}
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
32
return 0;
33
34
case INDEX_op_mul_vec:
35
- if (vece == MO_8) {
36
- /* We can expand the operation for MO_8. */
37
+ switch (vece) {
38
+ case MO_8:
39
return -1;
40
- }
41
- if (vece == MO_64) {
42
- return 0;
43
+ case MO_64:
44
+ return have_avx512dq;
45
}
46
return 1;
47
31
48
--
32
--
49
2.25.1
33
2.43.0
50
51
diff view generated by jsdifflib
1
When TCG_TARGET_SIGNED_ADDR32 is set, adjust the tlb addend to
1
The big comment just above says functions should be sorted.
2
allow the 32-bit guest address to be sign extended within the
2
Add forward declarations as needed.
3
64-bit host register instead of zero extended.
4
3
5
This will simplify tcg hosts like MIPS, RISC-V, and LoongArch,
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
which naturally sign-extend 32-bit values, in contrast to x86_64
7
and AArch64 which zero-extend them.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
accel/tcg/cputlb.c | 12 +++++++++++-
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
14
1 file changed, 11 insertions(+), 1 deletion(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
15
9
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cputlb.c
12
--- a/tcg/optimize.c
19
+++ b/accel/tcg/cputlb.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
#include "qemu/plugin-memory.h"
15
* 3) those that produce information about the result value.
22
#endif
16
*/
23
#include "tcg/tcg-ldst.h"
17
24
+#include "tcg-target-sa32.h"
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
25
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
26
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
27
/* #define DEBUG_TLB */
21
+
28
@@ -XXX,XX +XXX,XX @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
22
static bool fold_add(OptContext *ctx, TCGOp *op)
29
30
static inline uintptr_t g2h_tlbe(const CPUTLBEntry *tlb, target_ulong gaddr)
31
{
23
{
32
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
24
if (fold_const2_commutative(ctx, op) ||
33
+ return tlb->addend + (int32_t)gaddr;
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
34
+ }
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
35
return tlb->addend + (uintptr_t)gaddr;
36
}
27
}
37
28
38
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
39
desc->iotlb[index].attrs = attrs;
30
+{
40
31
+ /* If true and false values are the same, eliminate the cmp. */
41
/* Now calculate the new entry */
32
+ if (args_are_copies(op->args[2], op->args[3])) {
42
- tn.addend = addend - vaddr_page;
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
43
+
44
+ if (TCG_TARGET_SIGNED_ADDR32 && TARGET_LONG_BITS == 32) {
45
+ tn.addend = addend - (int32_t)vaddr_page;
46
+ } else {
47
+ tn.addend = addend - vaddr_page;
48
+ }
34
+ }
49
+
35
+
50
if (prot & PAGE_READ) {
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
51
tn.addr_read = address;
37
+ uint64_t tv = arg_info(op->args[2])->val;
52
if (wp_flags & BP_MEM_READ) {
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
90
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
92
-{
93
- /* If true and false values are the same, eliminate the cmp. */
94
- if (args_are_copies(op->args[2], op->args[3])) {
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
96
- }
97
-
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
99
- uint64_t tv = arg_info(op->args[2])->val;
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
53
--
149
--
54
2.25.1
150
2.43.0
55
56
diff view generated by jsdifflib
1
The evex encoding is added here, for use in a subsequent patch.
1
The big comment just above says functions should be sorted.
2
2
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/i386/tcg-target.c.inc | 51 ++++++++++++++++++++++++++++++++++++++-
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
8
1 file changed, 50 insertions(+), 1 deletion(-)
7
1 file changed, 30 insertions(+), 30 deletions(-)
9
8
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
11
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
15
#define P_SIMDF3 0x20000 /* 0xf3 opcode prefix */
14
return true;
16
#define P_SIMDF2 0x40000 /* 0xf2 opcode prefix */
17
#define P_VEXL 0x80000 /* Set VEX.L = 1 */
18
+#define P_EVEX 0x100000 /* Requires EVEX encoding */
19
20
#define OPC_ARITH_EvIz    (0x81)
21
#define OPC_ARITH_EvIb    (0x83)
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
23
tcg_out8(s, opc);
24
}
15
}
25
16
26
+static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
27
+ int rm, int index)
28
+{
18
+{
29
+ /* The entire 4-byte evex prefix; with R' and V' set. */
19
+ /* Canonicalize the comparison to put immediate second. */
30
+ uint32_t p = 0x08041062;
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
31
+ int mm, pp;
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
24
+}
32
+
25
+
33
+ tcg_debug_assert(have_avx512vl);
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
34
+
27
+{
35
+ /* EVEX.mm */
28
+ /* If true and false values are the same, eliminate the cmp. */
36
+ if (opc & P_EXT3A) {
29
+ if (args_are_copies(op->args[3], op->args[4])) {
37
+ mm = 3;
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
38
+ } else if (opc & P_EXT38) {
39
+ mm = 2;
40
+ } else if (opc & P_EXT) {
41
+ mm = 1;
42
+ } else {
43
+ g_assert_not_reached();
44
+ }
31
+ }
45
+
32
+
46
+ /* EVEX.pp */
33
+ /* Canonicalize the comparison to put immediate second. */
47
+ if (opc & P_DATA16) {
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
48
+ pp = 1; /* 0x66 */
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
49
+ } else if (opc & P_SIMDF3) {
50
+ pp = 2; /* 0xf3 */
51
+ } else if (opc & P_SIMDF2) {
52
+ pp = 3; /* 0xf2 */
53
+ } else {
54
+ pp = 0;
55
+ }
36
+ }
56
+
37
+ /*
57
+ p = deposit32(p, 8, 2, mm);
38
+ * Canonicalize the "false" input reg to match the destination,
58
+ p = deposit32(p, 13, 1, (rm & 8) == 0); /* EVEX.RXB.B */
39
+ * so that the tcg backend can implement "move if true".
59
+ p = deposit32(p, 14, 1, (index & 8) == 0); /* EVEX.RXB.X */
40
+ */
60
+ p = deposit32(p, 15, 1, (r & 8) == 0); /* EVEX.RXB.R */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
61
+ p = deposit32(p, 16, 2, pp);
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
62
+ p = deposit32(p, 19, 4, ~v);
43
+ }
63
+ p = deposit32(p, 23, 1, (opc & P_VEXW) != 0);
44
+ return finish_folding(ctx, op);
64
+ p = deposit32(p, 29, 2, (opc & P_VEXL) != 0);
65
+
66
+ tcg_out32(s, p);
67
+ tcg_out8(s, opc);
68
+}
45
+}
69
+
46
+
70
static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
71
{
48
{
72
- tcg_out_vex_opc(s, opc, r, v, rm, 0);
49
uint64_t z_mask, s_mask;
73
+ if (opc & P_EVEX) {
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
74
+ tcg_out_evex_opc(s, opc, r, v, rm, 0);
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
75
+ } else {
76
+ tcg_out_vex_opc(s, opc, r, v, rm, 0);
77
+ }
78
tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
79
}
52
}
80
53
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
-{
56
- /* Canonicalize the comparison to put immediate second. */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
61
-}
62
-
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
-{
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
81
--
87
--
82
2.25.1
88
2.43.0
83
84
diff view generated by jsdifflib
New patch
1
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/fpu/softfloat.h | 6 ++++
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
9
fpu/softfloat-parts.c.inc | 7 +++--
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/fpu/softfloat.h
15
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
17
float16 float16_sub(float16, float16, float_status *status);
18
float16 float16_mul(float16, float16, float_status *status);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
217
--
218
2.43.0
219
220
diff view generated by jsdifflib
1
All RV64 32-bit operations sign-extend the output, so we are easily
1
Use the scalbn interface instead of float_muladd_halve_result.
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/riscv/tcg-target-sa32.h | 6 +++++-
6
target/arm/tcg/helper-a64.c | 6 +++---
9
tcg/riscv/tcg-target.c.inc | 8 ++------
7
1 file changed, 3 insertions(+), 3 deletions(-)
10
2 files changed, 7 insertions(+), 7 deletions(-)
11
8
12
diff --git a/tcg/riscv/tcg-target-sa32.h b/tcg/riscv/tcg-target-sa32.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/riscv/tcg-target-sa32.h
11
--- a/target/arm/tcg/helper-a64.c
15
+++ b/tcg/riscv/tcg-target-sa32.h
12
+++ b/target/arm/tcg/helper-a64.c
16
@@ -1 +1,5 @@
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
17
-#define TCG_TARGET_SIGNED_ADDR32 0
14
(float16_is_infinity(b) && float16_is_zero(a))) {
18
+/*
15
return float16_one_point_five;
19
+ * Do not set TCG_TARGET_SIGNED_ADDR32 for RV32;
16
}
20
+ * TCG expects this to only be set for 64-bit hosts.
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
21
+ */
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
22
+#define TCG_TARGET_SIGNED_ADDR32 (__riscv_xlen == 64)
23
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/riscv/tcg-target.c.inc
26
+++ b/tcg/riscv/tcg-target.c.inc
27
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
28
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
29
30
/* TLB Hit - translate address using addend. */
31
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
32
- tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
33
- addrl = TCG_REG_TMP0;
34
- }
35
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
36
}
19
}
37
20
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
39
data_regl, data_regh, addr_regl, addr_regh,
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
40
s->code_ptr, label_ptr);
23
(float32_is_infinity(b) && float32_is_zero(a))) {
41
#else
24
return float32_one_point_five;
42
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
43
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
44
tcg_out_ext32u(s, base, addr_regl);
45
addr_regl = base;
46
}
25
}
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
48
data_regl, data_regh, addr_regl, addr_regh,
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
49
s->code_ptr, label_ptr);
28
}
50
#else
29
51
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
52
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
53
tcg_out_ext32u(s, base, addr_regl);
32
(float64_is_infinity(b) && float64_is_zero(a))) {
54
addr_regl = base;
33
return float64_one_point_five;
55
}
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
56
--
40
--
57
2.25.1
41
2.43.0
58
42
59
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
Define as 0 for all tcg hosts. Put this in a separate header,
1
All uses have been convered to float*_muladd_scalbn.
2
because we'll want this in places that do not ordinarily have
3
access to all of tcg/tcg.h.
4
2
5
Reviewed-by: WANG Xuerui <git@xen0n.name>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
tcg/aarch64/tcg-target-sa32.h | 1 +
6
include/fpu/softfloat.h | 3 ---
12
tcg/arm/tcg-target-sa32.h | 1 +
7
fpu/softfloat.c | 6 ------
13
tcg/i386/tcg-target-sa32.h | 1 +
8
fpu/softfloat-parts.c.inc | 4 ----
14
tcg/loongarch64/tcg-target-sa32.h | 1 +
9
3 files changed, 13 deletions(-)
15
tcg/mips/tcg-target-sa32.h | 1 +
16
tcg/ppc/tcg-target-sa32.h | 1 +
17
tcg/riscv/tcg-target-sa32.h | 1 +
18
tcg/s390x/tcg-target-sa32.h | 1 +
19
tcg/sparc/tcg-target-sa32.h | 1 +
20
tcg/tci/tcg-target-sa32.h | 1 +
21
tcg/tcg.c | 4 ++++
22
11 files changed, 14 insertions(+)
23
create mode 100644 tcg/aarch64/tcg-target-sa32.h
24
create mode 100644 tcg/arm/tcg-target-sa32.h
25
create mode 100644 tcg/i386/tcg-target-sa32.h
26
create mode 100644 tcg/loongarch64/tcg-target-sa32.h
27
create mode 100644 tcg/mips/tcg-target-sa32.h
28
create mode 100644 tcg/ppc/tcg-target-sa32.h
29
create mode 100644 tcg/riscv/tcg-target-sa32.h
30
create mode 100644 tcg/s390x/tcg-target-sa32.h
31
create mode 100644 tcg/sparc/tcg-target-sa32.h
32
create mode 100644 tcg/tci/tcg-target-sa32.h
33
10
34
diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
35
new file mode 100644
36
index XXXXXXX..XXXXXXX
37
--- /dev/null
38
+++ b/tcg/aarch64/tcg-target-sa32.h
39
@@ -0,0 +1 @@
40
+#define TCG_TARGET_SIGNED_ADDR32 0
41
diff --git a/tcg/arm/tcg-target-sa32.h b/tcg/arm/tcg-target-sa32.h
42
new file mode 100644
43
index XXXXXXX..XXXXXXX
44
--- /dev/null
45
+++ b/tcg/arm/tcg-target-sa32.h
46
@@ -0,0 +1 @@
47
+#define TCG_TARGET_SIGNED_ADDR32 0
48
diff --git a/tcg/i386/tcg-target-sa32.h b/tcg/i386/tcg-target-sa32.h
49
new file mode 100644
50
index XXXXXXX..XXXXXXX
51
--- /dev/null
52
+++ b/tcg/i386/tcg-target-sa32.h
53
@@ -0,0 +1 @@
54
+#define TCG_TARGET_SIGNED_ADDR32 0
55
diff --git a/tcg/loongarch64/tcg-target-sa32.h b/tcg/loongarch64/tcg-target-sa32.h
56
new file mode 100644
57
index XXXXXXX..XXXXXXX
58
--- /dev/null
59
+++ b/tcg/loongarch64/tcg-target-sa32.h
60
@@ -0,0 +1 @@
61
+#define TCG_TARGET_SIGNED_ADDR32 0
62
diff --git a/tcg/mips/tcg-target-sa32.h b/tcg/mips/tcg-target-sa32.h
63
new file mode 100644
64
index XXXXXXX..XXXXXXX
65
--- /dev/null
66
+++ b/tcg/mips/tcg-target-sa32.h
67
@@ -0,0 +1 @@
68
+#define TCG_TARGET_SIGNED_ADDR32 0
69
diff --git a/tcg/ppc/tcg-target-sa32.h b/tcg/ppc/tcg-target-sa32.h
70
new file mode 100644
71
index XXXXXXX..XXXXXXX
72
--- /dev/null
73
+++ b/tcg/ppc/tcg-target-sa32.h
74
@@ -0,0 +1 @@
75
+#define TCG_TARGET_SIGNED_ADDR32 0
76
diff --git a/tcg/riscv/tcg-target-sa32.h b/tcg/riscv/tcg-target-sa32.h
77
new file mode 100644
78
index XXXXXXX..XXXXXXX
79
--- /dev/null
80
+++ b/tcg/riscv/tcg-target-sa32.h
81
@@ -0,0 +1 @@
82
+#define TCG_TARGET_SIGNED_ADDR32 0
83
diff --git a/tcg/s390x/tcg-target-sa32.h b/tcg/s390x/tcg-target-sa32.h
84
new file mode 100644
85
index XXXXXXX..XXXXXXX
86
--- /dev/null
87
+++ b/tcg/s390x/tcg-target-sa32.h
88
@@ -0,0 +1 @@
89
+#define TCG_TARGET_SIGNED_ADDR32 0
90
diff --git a/tcg/sparc/tcg-target-sa32.h b/tcg/sparc/tcg-target-sa32.h
91
new file mode 100644
92
index XXXXXXX..XXXXXXX
93
--- /dev/null
94
+++ b/tcg/sparc/tcg-target-sa32.h
95
@@ -0,0 +1 @@
96
+#define TCG_TARGET_SIGNED_ADDR32 0
97
diff --git a/tcg/tci/tcg-target-sa32.h b/tcg/tci/tcg-target-sa32.h
98
new file mode 100644
99
index XXXXXXX..XXXXXXX
100
--- /dev/null
101
+++ b/tcg/tci/tcg-target-sa32.h
102
@@ -0,0 +1 @@
103
+#define TCG_TARGET_SIGNED_ADDR32 0
104
diff --git a/tcg/tcg.c b/tcg/tcg.c
105
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/tcg.c
13
--- a/include/fpu/softfloat.h
107
+++ b/tcg/tcg.c
14
+++ b/include/fpu/softfloat.h
108
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
109
#include "exec/log.h"
16
| Using these differs from negating an input or output before calling
110
#include "tcg/tcg-ldst.h"
17
| the muladd function in that this means that a NaN doesn't have its
111
#include "tcg-internal.h"
18
| sign bit inverted before it is propagated.
112
+#include "tcg-target-sa32.h"
19
-| We also support halving the result before rounding, as a special
113
+
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
114
+/* Sanity check for TCG_TARGET_SIGNED_ADDR32. */
21
*----------------------------------------------------------------------------*/
115
+QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS == 32 && TCG_TARGET_SIGNED_ADDR32);
22
enum {
116
23
float_muladd_negate_c = 1,
117
#ifdef CONFIG_TCG_INTERPRETER
24
float_muladd_negate_product = 2,
118
#include <ffi.h>
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
27
};
28
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/fpu/softfloat.c
33
+++ b/fpu/softfloat.c
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
35
if (unlikely(!can_use_fpu(s))) {
36
goto soft;
37
}
38
- if (unlikely(flags & float_muladd_halve_result)) {
39
- goto soft;
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
119
--
69
--
120
2.25.1
70
2.43.0
121
71
122
72
diff view generated by jsdifflib
1
The general ternary logic operation can implement BITSEL.
1
This rounding mode is used by Hexagon.
2
Funnel the 4-operand operation into three variants of the
3
3-operand instruction, depending on input operand overlap.
4
2
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
4
---
9
tcg/i386/tcg-target.h | 2 +-
5
include/fpu/softfloat-types.h | 2 ++
10
tcg/i386/tcg-target.c.inc | 20 +++++++++++++++++++-
6
fpu/softfloat-parts.c.inc | 3 +++
11
2 files changed, 20 insertions(+), 2 deletions(-)
7
2 files changed, 5 insertions(+)
12
8
13
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/i386/tcg-target.h
11
--- a/include/fpu/softfloat-types.h
16
+++ b/tcg/i386/tcg-target.h
12
+++ b/include/fpu/softfloat-types.h
17
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
18
#define TCG_TARGET_HAS_mul_vec 1
14
float_round_to_odd = 5,
19
#define TCG_TARGET_HAS_sat_vec 1
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
20
#define TCG_TARGET_HAS_minmax_vec 1
16
float_round_to_odd_inf = 6,
21
-#define TCG_TARGET_HAS_bitsel_vec 0
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
22
+#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
18
+ float_round_nearest_even_max = 7,
23
#define TCG_TARGET_HAS_cmpsel_vec -1
19
} FloatRoundMode;
24
20
25
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
21
/*
26
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
27
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/i386/tcg-target.c.inc
24
--- a/fpu/softfloat-parts.c.inc
29
+++ b/tcg/i386/tcg-target.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
31
27
int exp, flags = 0;
32
TCGType type = vecl + TCG_TYPE_V64;
28
33
int insn, sub;
29
switch (s->float_rounding_mode) {
34
- TCGArg a0, a1, a2;
30
+ case float_round_nearest_even_max:
35
+ TCGArg a0, a1, a2, a3;
31
+ overflow_norm = true;
36
32
+ /* fall through */
37
a0 = args[0];
33
case float_round_nearest_even:
38
a1 = args[1];
34
if (N > 64 && frac_lsb == 0) {
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
40
sub = 0xdd; /* orB!C */
41
goto gen_simd_imm8;
42
43
+ case INDEX_op_bitsel_vec:
44
+ insn = OPC_VPTERNLOGQ;
45
+ a3 = args[3];
46
+ if (a0 == a1) {
47
+ a1 = a2;
48
+ a2 = a3;
49
+ sub = 0xca; /* A?B:C */
50
+ } else if (a0 == a2) {
51
+ a2 = a3;
52
+ sub = 0xe2; /* B?A:C */
53
+ } else {
54
+ tcg_out_mov(s, type, a0, a3);
55
+ sub = 0xb8; /* B?C:A */
56
+ }
57
+ goto gen_simd_imm8;
58
+
59
gen_simd_imm8:
60
tcg_debug_assert(insn != OPC_UD2);
61
if (type == TCG_TYPE_V256) {
62
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
63
case INDEX_op_x86_vpshrdv_vec:
64
return C_O1_I3(x, 0, x, x);
65
66
+ case INDEX_op_bitsel_vec:
67
case INDEX_op_x86_vpblendvb_vec:
68
return C_O1_I3(x, x, x, x);
69
70
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
71
case INDEX_op_nor_vec:
72
case INDEX_op_eqv_vec:
73
case INDEX_op_not_vec:
74
+ case INDEX_op_bitsel_vec:
75
return 1;
76
case INDEX_op_cmp_vec:
77
case INDEX_op_cmpsel_vec:
78
--
36
--
79
2.25.1
37
2.43.0
80
81
diff view generated by jsdifflib
1
AVX512VL has VPROLD and VPROLQ, layered onto the same
1
Certain Hexagon instructions suppress changes to the result
2
opcode as PSHIFTD, but requires EVEX encoding and W1.
2
when the product of fma() is a true zero.
3
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/i386/tcg-target.h | 2 +-
6
include/fpu/softfloat.h | 5 +++++
7
tcg/i386/tcg-target.c.inc | 15 +++++++++++++--
7
fpu/softfloat.c | 3 +++
8
2 files changed, 14 insertions(+), 3 deletions(-)
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
9
10
10
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.h
13
--- a/include/fpu/softfloat.h
13
+++ b/tcg/i386/tcg-target.h
14
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
15
#define TCG_TARGET_HAS_not_vec 0
16
| Using these differs from negating an input or output before calling
16
#define TCG_TARGET_HAS_neg_vec 0
17
| the muladd function in that this means that a NaN doesn't have its
17
#define TCG_TARGET_HAS_abs_vec 1
18
| sign bit inverted before it is propagated.
18
-#define TCG_TARGET_HAS_roti_vec 0
19
+|
19
+#define TCG_TARGET_HAS_roti_vec have_avx512vl
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
20
#define TCG_TARGET_HAS_rots_vec 0
21
+| such that the product is a true zero, then return C without addition.
21
#define TCG_TARGET_HAS_rotv_vec 0
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
22
#define TCG_TARGET_HAS_shi_vec 1
23
*----------------------------------------------------------------------------*/
23
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
24
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/i386/tcg-target.c.inc
34
--- a/fpu/softfloat.c
26
+++ b/tcg/i386/tcg-target.c.inc
35
+++ b/fpu/softfloat.c
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
28
#define OPC_PSHUFLW (0x70 | P_EXT | P_SIMDF2)
37
if (unlikely(!can_use_fpu(s))) {
29
#define OPC_PSHUFHW (0x70 | P_EXT | P_SIMDF3)
38
goto soft;
30
#define OPC_PSHIFTW_Ib (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
39
}
31
-#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /2 /6 /4 */
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
32
+#define OPC_PSHIFTD_Ib (0x72 | P_EXT | P_DATA16) /* /1 /2 /6 /4 */
41
+ goto soft;
33
#define OPC_PSHIFTQ_Ib (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
42
+ }
34
#define OPC_PSLLW (0xf1 | P_EXT | P_DATA16)
43
35
#define OPC_PSLLD (0xf2 | P_EXT | P_DATA16)
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
37
insn = shift_imm_insn[vece];
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
38
}
52
}
39
sub = 4;
53
if (c->cls == float_class_zero) {
40
+ goto gen_shift;
54
- if (a->sign != c->sign) {
41
+ case INDEX_op_rotli_vec:
55
+ if (flags & float_muladd_suppress_add_product_zero) {
42
+ insn = OPC_PSHIFTD_Ib | P_EVEX; /* VPROL[DQ] */
56
+ a->sign = c->sign;
43
+ if (vece == MO_64) {
57
+ } else if (a->sign != c->sign) {
44
+ insn |= P_VEXW;
58
goto return_sub_zero;
45
+ }
59
}
46
+ sub = 1;
60
goto return_zero;
47
+ goto gen_shift;
48
gen_shift:
49
tcg_debug_assert(vece != MO_8);
50
if (type == TCG_TYPE_V256) {
51
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
52
case INDEX_op_shli_vec:
53
case INDEX_op_shri_vec:
54
case INDEX_op_sari_vec:
55
+ case INDEX_op_rotli_vec:
56
case INDEX_op_x86_psrldq_vec:
57
return C_O1_I1(x, x);
58
59
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
60
case INDEX_op_xor_vec:
61
case INDEX_op_andc_vec:
62
return 1;
63
- case INDEX_op_rotli_vec:
64
case INDEX_op_cmp_vec:
65
case INDEX_op_cmpsel_vec:
66
return -1;
67
68
+ case INDEX_op_rotli_vec:
69
+ return have_avx512vl && vece >= MO_32 ? 1 : -1;
70
+
71
case INDEX_op_shli_vec:
72
case INDEX_op_shri_vec:
73
/* We must expand the operation for MO_8. */
74
--
61
--
75
2.25.1
62
2.43.0
diff view generated by jsdifflib
1
AVX512VL has a general ternary logic operation, VPTERNLOGQ,
1
There are no special cases for this instruction.
2
which can implement NOT, ORC, NAND, NOR, EQV.
2
Remove internal_mpyf as unused.
3
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/i386/tcg-target.h | 10 +++++-----
7
target/hexagon/fma_emu.h | 1 -
9
tcg/i386/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++++++++
8
target/hexagon/fma_emu.c | 8 --------
10
2 files changed, 39 insertions(+), 5 deletions(-)
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
11
12
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/i386/tcg-target.h
14
--- a/target/hexagon/fma_emu.h
15
+++ b/tcg/i386/tcg-target.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
#define TCG_TARGET_HAS_v256 have_avx2
17
float32 infinite_float32(uint8_t sign);
18
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
#define TCG_TARGET_HAS_andc_vec 1
19
int scale, float_status *fp_status);
20
-#define TCG_TARGET_HAS_orc_vec 0
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
-#define TCG_TARGET_HAS_nand_vec 0
21
float64 internal_mpyhh(float64 a, float64 b,
22
-#define TCG_TARGET_HAS_nor_vec 0
22
unsigned long long int accumulated,
23
-#define TCG_TARGET_HAS_eqv_vec 0
23
float_status *fp_status);
24
-#define TCG_TARGET_HAS_not_vec 0
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
+#define TCG_TARGET_HAS_orc_vec have_avx512vl
26
+#define TCG_TARGET_HAS_nand_vec have_avx512vl
27
+#define TCG_TARGET_HAS_nor_vec have_avx512vl
28
+#define TCG_TARGET_HAS_eqv_vec have_avx512vl
29
+#define TCG_TARGET_HAS_not_vec have_avx512vl
30
#define TCG_TARGET_HAS_neg_vec 0
31
#define TCG_TARGET_HAS_abs_vec 1
32
#define TCG_TARGET_HAS_roti_vec have_avx512vl
33
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
34
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/i386/tcg-target.c.inc
26
--- a/target/hexagon/fma_emu.c
36
+++ b/tcg/i386/tcg-target.c.inc
27
+++ b/target/hexagon/fma_emu.c
37
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
38
#define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
29
return accum_round_float32(result, fp_status);
39
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
30
}
40
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
31
41
+#define OPC_VPTERNLOGQ (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
42
#define OPC_VZEROUPPER (0x77 | P_EXT)
33
-{
43
#define OPC_XCHG_ax_r32    (0x90)
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
44
35
- return float32_mul(a, b, fp_status);
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
36
- }
46
insn = vpshldi_insn[vece];
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
47
sub = args[3];
38
-}
48
goto gen_simd_imm8;
39
-
49
+
40
float64 internal_mpyhh(float64 a, float64 b,
50
+ case INDEX_op_not_vec:
41
unsigned long long int accumulated,
51
+ insn = OPC_VPTERNLOGQ;
42
float_status *fp_status)
52
+ a2 = a1;
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
53
+ sub = 0x33; /* !B */
44
index XXXXXXX..XXXXXXX 100644
54
+ goto gen_simd_imm8;
45
--- a/target/hexagon/op_helper.c
55
+ case INDEX_op_nor_vec:
46
+++ b/target/hexagon/op_helper.c
56
+ insn = OPC_VPTERNLOGQ;
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
57
+ sub = 0x11; /* norCB */
48
{
58
+ goto gen_simd_imm8;
49
float32 RdV;
59
+ case INDEX_op_nand_vec:
50
arch_fpop_start(env);
60
+ insn = OPC_VPTERNLOGQ;
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
61
+ sub = 0x77; /* nandCB */
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
62
+ goto gen_simd_imm8;
53
arch_fpop_end(env);
63
+ case INDEX_op_eqv_vec:
54
return RdV;
64
+ insn = OPC_VPTERNLOGQ;
55
}
65
+ sub = 0x99; /* xnorCB */
66
+ goto gen_simd_imm8;
67
+ case INDEX_op_orc_vec:
68
+ insn = OPC_VPTERNLOGQ;
69
+ sub = 0xdd; /* orB!C */
70
+ goto gen_simd_imm8;
71
+
72
gen_simd_imm8:
73
tcg_debug_assert(insn != OPC_UD2);
74
if (type == TCG_TYPE_V256) {
75
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
76
case INDEX_op_or_vec:
77
case INDEX_op_xor_vec:
78
case INDEX_op_andc_vec:
79
+ case INDEX_op_orc_vec:
80
+ case INDEX_op_nand_vec:
81
+ case INDEX_op_nor_vec:
82
+ case INDEX_op_eqv_vec:
83
case INDEX_op_ssadd_vec:
84
case INDEX_op_usadd_vec:
85
case INDEX_op_sssub_vec:
86
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
87
88
case INDEX_op_abs_vec:
89
case INDEX_op_dup_vec:
90
+ case INDEX_op_not_vec:
91
case INDEX_op_shli_vec:
92
case INDEX_op_shri_vec:
93
case INDEX_op_sari_vec:
94
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
95
case INDEX_op_or_vec:
96
case INDEX_op_xor_vec:
97
case INDEX_op_andc_vec:
98
+ case INDEX_op_orc_vec:
99
+ case INDEX_op_nand_vec:
100
+ case INDEX_op_nor_vec:
101
+ case INDEX_op_eqv_vec:
102
+ case INDEX_op_not_vec:
103
return 1;
104
case INDEX_op_cmp_vec:
105
case INDEX_op_cmpsel_vec:
106
--
56
--
107
2.25.1
57
2.43.0
108
109
diff view generated by jsdifflib
1
AVX512VL has VPABSQ, VPMAXSQ, VPMAXUQ, VPMINSQ, VPMINUQ.
1
There are no special cases for this instruction.
2
2
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/i386/tcg-target.c.inc | 18 +++++++++++-------
6
target/hexagon/op_helper.c | 2 +-
8
1 file changed, 11 insertions(+), 7 deletions(-)
7
1 file changed, 1 insertion(+), 1 deletion(-)
9
8
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
11
--- a/target/hexagon/op_helper.c
13
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/target/hexagon/op_helper.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
15
#define OPC_PABSB (0x1c | P_EXT38 | P_DATA16)
14
float32 RsV, float32 RtV)
16
#define OPC_PABSW (0x1d | P_EXT38 | P_DATA16)
15
{
17
#define OPC_PABSD (0x1e | P_EXT38 | P_DATA16)
16
arch_fpop_start(env);
18
+#define OPC_VPABSQ (0x1f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
19
#define OPC_PACKSSDW (0x6b | P_EXT | P_DATA16)
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
20
#define OPC_PACKSSWB (0x63 | P_EXT | P_DATA16)
19
arch_fpop_end(env);
21
#define OPC_PACKUSDW (0x2b | P_EXT38 | P_DATA16)
20
return RxV;
22
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
21
}
23
#define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16)
24
#define OPC_PMAXSW (0xee | P_EXT | P_DATA16)
25
#define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16)
26
+#define OPC_VPMAXSQ (0x3d | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
27
#define OPC_PMAXUB (0xde | P_EXT | P_DATA16)
28
#define OPC_PMAXUW (0x3e | P_EXT38 | P_DATA16)
29
#define OPC_PMAXUD (0x3f | P_EXT38 | P_DATA16)
30
+#define OPC_VPMAXUQ (0x3f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
31
#define OPC_PMINSB (0x38 | P_EXT38 | P_DATA16)
32
#define OPC_PMINSW (0xea | P_EXT | P_DATA16)
33
#define OPC_PMINSD (0x39 | P_EXT38 | P_DATA16)
34
+#define OPC_VPMINSQ (0x39 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
35
#define OPC_PMINUB (0xda | P_EXT | P_DATA16)
36
#define OPC_PMINUW (0x3a | P_EXT38 | P_DATA16)
37
#define OPC_PMINUD (0x3b | P_EXT38 | P_DATA16)
38
+#define OPC_VPMINUQ (0x3b | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
39
#define OPC_PMOVSXBW (0x20 | P_EXT38 | P_DATA16)
40
#define OPC_PMOVSXWD (0x23 | P_EXT38 | P_DATA16)
41
#define OPC_PMOVSXDQ (0x25 | P_EXT38 | P_DATA16)
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
43
OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
44
};
45
static int const smin_insn[4] = {
46
- OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_UD2
47
+ OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_VPMINSQ
48
};
49
static int const smax_insn[4] = {
50
- OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_UD2
51
+ OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ
52
};
53
static int const umin_insn[4] = {
54
- OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_UD2
55
+ OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
56
};
57
static int const umax_insn[4] = {
58
- OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
59
+ OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
60
};
61
static int const rotlv_insn[4] = {
62
OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
64
OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
65
};
66
static int const abs_insn[4] = {
67
- /* TODO: AVX512 adds support for MO_64. */
68
- OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_UD2
69
+ OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_VPABSQ
70
};
71
72
TCGType type = vecl + TCG_TYPE_V64;
73
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
74
case INDEX_op_umin_vec:
75
case INDEX_op_umax_vec:
76
case INDEX_op_abs_vec:
77
- return vece <= MO_32;
78
+ return vece <= MO_32 || have_avx512vl;
79
80
default:
81
return 0;
82
--
22
--
83
2.25.1
23
2.43.0
84
85
diff view generated by jsdifflib
1
There is no such instruction on x86, so we should
1
There are no special cases for this instruction. Since hexagon
2
not be pretending it has arguments.
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
3
4
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/i386/tcg-target.c.inc | 1 -
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 1 deletion(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
13
--- a/target/hexagon/op_helper.c
14
+++ b/tcg/i386/tcg-target.c.inc
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
case INDEX_op_shls_vec:
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
case INDEX_op_shrs_vec:
17
float32 RsV, float32 RtV)
18
case INDEX_op_sars_vec:
18
{
19
- case INDEX_op_rotls_vec:
19
- float32 neg_RsV;
20
case INDEX_op_cmp_vec:
20
arch_fpop_start(env);
21
case INDEX_op_x86_shufps_vec:
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
case INDEX_op_x86_blend_vec:
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
23
--
28
--
24
2.25.1
29
2.43.0
25
26
diff view generated by jsdifflib
1
AVX512VL has VPROLVD and VPRORVQ.
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
2
5
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/i386/tcg-target.h | 2 +-
9
target/hexagon/op_helper.c | 11 +++--------
8
tcg/i386/tcg-target.c.inc | 25 ++++++++++++++++++++++++-
10
1 file changed, 3 insertions(+), 8 deletions(-)
9
2 files changed, 25 insertions(+), 2 deletions(-)
10
11
11
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.h
14
--- a/target/hexagon/op_helper.c
14
+++ b/tcg/i386/tcg-target.h
15
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
16
#define TCG_TARGET_HAS_abs_vec 1
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
17
#define TCG_TARGET_HAS_roti_vec have_avx512vl
18
float32 RsV, float32 RtV, float32 PuV)
18
#define TCG_TARGET_HAS_rots_vec 0
19
{
19
-#define TCG_TARGET_HAS_rotv_vec 0
20
- size4s_t tmp;
20
+#define TCG_TARGET_HAS_rotv_vec have_avx512vl
21
arch_fpop_start(env);
21
#define TCG_TARGET_HAS_shi_vec 1
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
22
#define TCG_TARGET_HAS_shs_vec 1
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
23
#define TCG_TARGET_HAS_shv_vec have_avx2
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
24
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
25
index XXXXXXX..XXXXXXX 100644
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
26
--- a/tcg/i386/tcg-target.c.inc
27
- RxV = tmp;
27
+++ b/tcg/i386/tcg-target.c.inc
28
- }
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
29
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
30
+ float_muladd_suppress_add_product_zero,
30
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
31
+ &env->fp_status);
31
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
32
arch_fpop_end(env);
32
+#define OPC_VPROLVD (0x15 | P_EXT38 | P_DATA16 | P_EVEX)
33
return RxV;
33
+#define OPC_VPROLVQ (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
34
}
34
+#define OPC_VPRORVD (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
35
+#define OPC_VPRORVQ (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
36
#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
37
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
38
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
static int const umax_insn[4] = {
41
OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
42
};
43
+ static int const rotlv_insn[4] = {
44
+ OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
45
+ };
46
+ static int const rotrv_insn[4] = {
47
+ OPC_UD2, OPC_UD2, OPC_VPRORVD, OPC_VPRORVQ
48
+ };
49
static int const shlv_insn[4] = {
50
OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
51
};
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
53
case INDEX_op_sarv_vec:
54
insn = sarv_insn[vece];
55
goto gen_simd;
56
+ case INDEX_op_rotlv_vec:
57
+ insn = rotlv_insn[vece];
58
+ goto gen_simd;
59
+ case INDEX_op_rotrv_vec:
60
+ insn = rotrv_insn[vece];
61
+ goto gen_simd;
62
case INDEX_op_shls_vec:
63
insn = shls_insn[vece];
64
goto gen_simd;
65
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
66
case INDEX_op_shlv_vec:
67
case INDEX_op_shrv_vec:
68
case INDEX_op_sarv_vec:
69
+ case INDEX_op_rotlv_vec:
70
+ case INDEX_op_rotrv_vec:
71
case INDEX_op_shls_vec:
72
case INDEX_op_shrs_vec:
73
case INDEX_op_sars_vec:
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
return 0;
76
case INDEX_op_rotlv_vec:
77
case INDEX_op_rotrv_vec:
78
- return have_avx2 && vece >= MO_32 ? -1 : 0;
79
+ switch (vece) {
80
+ case MO_32:
81
+ case MO_64:
82
+ return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
83
+ }
84
+ return 0;
85
86
case INDEX_op_mul_vec:
87
if (vece == MO_8) {
88
--
35
--
89
2.25.1
36
2.43.0
90
91
diff view generated by jsdifflib
1
Expand 32-bit and 64-bit scalar rotate with VPRO[LR]V;
1
There are multiple special cases for this instruction.
2
expand 16-bit scalar rotate with VPSHLDV.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
3
8
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
tcg/i386/tcg-target.c.inc | 49 +++++++++++++++++++++++----------------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
9
1 file changed, 29 insertions(+), 20 deletions(-)
13
1 file changed, 26 insertions(+), 79 deletions(-)
10
14
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
17
--- a/target/hexagon/op_helper.c
14
+++ b/tcg/i386/tcg-target.c.inc
18
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotli(TCGType type, unsigned vece,
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
16
tcg_temp_free_vec(t);
20
return RxV;
17
}
21
}
18
22
19
-static void expand_vec_rotls(TCGType type, unsigned vece,
23
-static bool is_zero_prod(float32 a, float32 b)
20
- TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
21
-{
24
-{
22
- TCGv_i32 rsh;
25
- return ((float32_is_zero(a) && is_finite(b)) ||
23
- TCGv_vec t;
26
- (float32_is_zero(b) && is_finite(a)));
24
-
25
- tcg_debug_assert(vece != MO_8);
26
-
27
- t = tcg_temp_new_vec(type);
28
- rsh = tcg_temp_new_i32();
29
-
30
- tcg_gen_neg_i32(rsh, lsh);
31
- tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
32
- tcg_gen_shls_vec(vece, t, v1, lsh);
33
- tcg_gen_shrs_vec(vece, v0, v1, rsh);
34
- tcg_gen_or_vec(vece, v0, v0, t);
35
- tcg_temp_free_vec(t);
36
- tcg_temp_free_i32(rsh);
37
-}
27
-}
38
-
28
-
39
static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
40
TCGv_vec v1, TCGv_vec sh, bool right)
30
-{
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
41
{
43
{
42
@@ -XXX,XX +XXX,XX @@ static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
43
tcg_temp_free_vec(t);
45
return RxV;
44
}
46
}
45
47
46
+static void expand_vec_rotls(TCGType type, unsigned vece,
48
-static bool is_inf_prod(int32_t a, int32_t b)
47
+ TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
48
+{
50
+ float32 RsV, float32 RtV, int negate)
49
+ TCGv_vec t = tcg_temp_new_vec(type);
51
{
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
50
+
56
+
51
+ tcg_debug_assert(vece != MO_8);
57
+ arch_fpop_start(env);
52
+
58
+
53
+ if (vece >= MO_32 ? have_avx512vl : have_avx512vbmi2) {
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
54
+ tcg_gen_dup_i32_vec(vece, t, lsh);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
55
+ if (vece >= MO_32) {
61
+ negate | float_muladd_suppress_add_product_zero,
56
+ tcg_gen_rotlv_vec(vece, v0, v1, t);
62
+ &env->fp_status);
57
+ } else {
63
+
58
+ expand_vec_rotv(type, vece, v0, v1, t, false);
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
59
+ }
72
+ }
60
+ } else {
61
+ TCGv_i32 rsh = tcg_temp_new_i32();
62
+
63
+ tcg_gen_neg_i32(rsh, lsh);
64
+ tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
65
+ tcg_gen_shls_vec(vece, t, v1, lsh);
66
+ tcg_gen_shrs_vec(vece, v0, v1, rsh);
67
+ tcg_gen_or_vec(vece, v0, v0, t);
68
+
69
+ tcg_temp_free_i32(rsh);
70
+ }
73
+ }
71
+
74
+
72
+ tcg_temp_free_vec(t);
75
+ arch_fpop_end(env);
73
+}
76
+ return RxV;
74
+
77
}
75
static void expand_vec_mul(TCGType type, unsigned vece,
78
76
TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
80
float32 RsV, float32 RtV)
77
{
81
{
82
- bool infinp;
83
- bool infminusinf;
84
- float32 tmp;
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
111
}
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
115
{
116
- bool infinp;
117
- bool infminusinf;
118
- float32 tmp;
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
78
--
149
--
79
2.25.1
150
2.43.0
80
81
diff view generated by jsdifflib
1
AVX512 has VPSRAQ with immediate operand, in the same form as
1
The function is now unused.
2
with AVX, but requires EVEX encoding and W1.
3
2
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/i386/tcg-target.c.inc | 30 +++++++++++++++++++++---------
6
target/hexagon/fma_emu.h | 2 -
9
1 file changed, 21 insertions(+), 9 deletions(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
10
9
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.h
14
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.h
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
16
break;
15
}
17
16
int32_t float32_getexp(float32 f32);
18
case INDEX_op_shli_vec:
17
float32 infinite_float32(uint8_t sign);
19
+ insn = shift_imm_insn[vece];
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
20
sub = 6;
19
- int scale, float_status *fp_status);
21
goto gen_shift;
20
float64 internal_mpyhh(float64 a, float64 b,
22
case INDEX_op_shri_vec:
21
unsigned long long int accumulated,
23
+ insn = shift_imm_insn[vece];
22
float_status *fp_status);
24
sub = 2;
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
goto gen_shift;
24
index XXXXXXX..XXXXXXX 100644
26
case INDEX_op_sari_vec:
25
--- a/target/hexagon/fma_emu.c
27
- tcg_debug_assert(vece != MO_64);
26
+++ b/target/hexagon/fma_emu.c
28
+ if (vece == MO_64) {
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
29
+ insn = OPC_PSHIFTD_Ib | P_VEXW | P_EVEX;
28
return -1;
30
+ } else {
29
}
31
+ insn = shift_imm_insn[vece];
30
32
+ }
31
-static uint64_t float32_getmant(float32 f32)
33
sub = 4;
32
-{
34
gen_shift:
33
- Float a = { .i = f32 };
35
tcg_debug_assert(vece != MO_8);
34
- if (float32_is_normal(f32)) {
36
- insn = shift_imm_insn[vece];
35
- return a.mant | 1ULL << 23;
37
if (type == TCG_TYPE_V256) {
36
- }
38
insn |= P_VEXL;
37
- if (float32_is_zero(f32)) {
39
}
38
- return 0;
40
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
39
- }
41
return vece == MO_8 ? -1 : 1;
40
- if (float32_is_denormal(f32)) {
42
41
- return a.mant;
43
case INDEX_op_sari_vec:
42
- }
44
- /* We must expand the operation for MO_8. */
43
- return ~0ULL;
45
- if (vece == MO_8) {
44
-}
46
+ switch (vece) {
45
-
47
+ case MO_8:
46
int32_t float32_getexp(float32 f32)
48
return -1;
47
{
49
- }
48
Float a = { .i = f32 };
50
- /* We can emulate this for MO_64, but it does not pay off
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
51
- unless we're producing at least 4 values. */
50
}
52
- if (vece == MO_64) {
51
53
+ case MO_16:
52
/* Return a maximum finite value with the requested sign */
54
+ case MO_32:
53
-static float32 maxfinite_float32(uint8_t sign)
55
+ return 1;
54
-{
56
+ case MO_64:
55
- if (sign) {
57
+ if (have_avx512vl) {
56
- return make_float32(SF_MINUS_MAXF);
58
+ return 1;
57
- } else {
59
+ }
58
- return make_float32(SF_MAXF);
60
+ /*
59
- }
61
+ * We can emulate this for MO_64, but it does not pay off
60
-}
62
+ * unless we're producing at least 4 values.
61
-
63
+ */
62
-/* Return a zero value with requested sign */
64
return type >= TCG_TYPE_V256 ? -1 : 0;
63
-static float32 zero_float32(uint8_t sign)
65
}
64
-{
66
- return 1;
65
- if (sign) {
67
+ return 0;
66
- return make_float32(0x80000000);
68
67
- } else {
69
case INDEX_op_shls_vec:
68
- return float32_zero;
70
case INDEX_op_shrs_vec:
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
71
--
219
--
72
2.25.1
220
2.43.0
73
74
diff view generated by jsdifflib
1
AVX512VL has VPSRAQ.
1
This massive macro is now only used once.
2
Expand it for use only by float64.
2
3
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/i386/tcg-target.c.inc | 12 ++++++++++--
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
8
1 file changed, 10 insertions(+), 2 deletions(-)
8
1 file changed, 127 insertions(+), 128 deletions(-)
9
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
13
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
15
#define OPC_PSLLQ (0xf3 | P_EXT | P_DATA16)
15
}
16
#define OPC_PSRAW (0xe1 | P_EXT | P_DATA16)
16
17
#define OPC_PSRAD (0xe2 | P_EXT | P_DATA16)
17
/* Return a maximum finite value with the requested sign */
18
+#define OPC_VPSRAQ (0x72 | P_EXT | P_DATA16 | P_VEXW | P_EVEX)
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
19
#define OPC_PSRLW (0xd1 | P_EXT | P_DATA16)
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
20
#define OPC_PSRLD (0xd2 | P_EXT | P_DATA16)
20
-{ \
21
#define OPC_PSRLQ (0xd3 | P_EXT | P_DATA16)
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ
23
- /* result zero */ \
24
};
24
- switch (fp_status->float_rounding_mode) { \
25
static int const sars_insn[4] = {
25
- case float_round_down: \
26
- OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_UD2
26
- return zero_##SUFFIX(1); \
27
+ OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
27
- default: \
28
};
28
- return zero_##SUFFIX(0); \
29
static int const abs_insn[4] = {
29
- } \
30
/* TODO: AVX512 adds support for MO_64. */
30
- } \
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
31
- /* Normalize right */ \
32
case INDEX_op_shrs_vec:
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
return vece >= MO_16;
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
case INDEX_op_sars_vec:
34
- /* So we need to normalize right while the high word is non-zero and \
35
- return vece >= MO_16 && vece <= MO_32;
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
+ switch (vece) {
36
- while ((int128_gethi(a.mant) != 0) || \
37
+ case MO_16:
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
+ case MO_32:
38
- a = accum_norm_right(a, 1); \
39
+ return 1;
39
- } \
40
+ case MO_64:
40
- /* \
41
+ return have_avx512vl;
41
- * OK, now normalize left \
42
+ }
42
- * We want to normalize left until we have a leading one in bit 24 \
43
+ return 0;
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
case INDEX_op_rotls_vec:
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
return vece >= MO_16 ? -1 : 0;
45
- * should be 0 \
46
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
47
--
278
--
48
2.25.1
279
2.43.0
49
50
diff view generated by jsdifflib
1
AVX512VL has VPSRAVQ, and
1
This structure, with bitfields, is incorrect for big-endian.
2
AVX512BW has VPSLLVW, VPSRAVW, VPSRLVW.
2
Use the existing float32_getexp_raw which uses extract32.
3
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++++++++++--------
7
target/hexagon/fma_emu.c | 16 +++-------------
9
1 file changed, 24 insertions(+), 8 deletions(-)
8
1 file changed, 3 insertions(+), 13 deletions(-)
10
9
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
14
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
14
@@ -XXX,XX +XXX,XX @@ typedef union {
16
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
17
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
18
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
19
+#define OPC_VPSLLVW (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
20
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
21
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
22
+#define OPC_VPSRAVW (0x11 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
23
#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
24
+#define OPC_VPSRAVQ (0x46 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
25
+#define OPC_VPSRLVW (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
26
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
27
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
28
#define OPC_VZEROUPPER (0x77 | P_EXT)
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_UD2
31
};
15
};
32
static int const shlv_insn[4] = {
16
} Double;
33
- /* TODO: AVX512 adds support for MO_16. */
17
34
- OPC_UD2, OPC_UD2, OPC_VPSLLVD, OPC_VPSLLVQ
18
-typedef union {
35
+ OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
19
- float f;
36
};
20
- uint32_t i;
37
static int const shrv_insn[4] = {
21
- struct {
38
- /* TODO: AVX512 adds support for MO_16. */
22
- uint32_t mant:23;
39
- OPC_UD2, OPC_UD2, OPC_VPSRLVD, OPC_VPSRLVQ
23
- uint32_t exp:8;
40
+ OPC_UD2, OPC_VPSRLVW, OPC_VPSRLVD, OPC_VPSRLVQ
24
- uint32_t sign:1;
41
};
25
- };
42
static int const sarv_insn[4] = {
26
-} Float;
43
- /* TODO: AVX512 adds support for MO_16, MO_64. */
27
-
44
- OPC_UD2, OPC_UD2, OPC_VPSRAVD, OPC_UD2
28
static uint64_t float64_getmant(float64 f64)
45
+ OPC_UD2, OPC_VPSRAVW, OPC_VPSRAVD, OPC_VPSRAVQ
29
{
46
};
30
Double a = { .i = f64 };
47
static int const shls_insn[4] = {
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
48
OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ
32
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
33
int32_t float32_getexp(float32 f32)
50
34
{
51
case INDEX_op_shlv_vec:
35
- Float a = { .i = f32 };
52
case INDEX_op_shrv_vec:
36
+ int exp = float32_getexp_raw(f32);
53
- return have_avx2 && vece >= MO_32;
37
if (float32_is_normal(f32)) {
54
+ switch (vece) {
38
- return a.exp;
55
+ case MO_16:
39
+ return exp;
56
+ return have_avx512bw;
40
}
57
+ case MO_32:
41
if (float32_is_denormal(f32)) {
58
+ case MO_64:
42
- return a.exp + 1;
59
+ return have_avx2;
43
+ return exp + 1;
60
+ }
44
}
61
+ return 0;
45
return -1;
62
case INDEX_op_sarv_vec:
46
}
63
- return have_avx2 && vece == MO_32;
64
+ switch (vece) {
65
+ case MO_16:
66
+ return have_avx512bw;
67
+ case MO_32:
68
+ return have_avx2;
69
+ case MO_64:
70
+ return have_avx512vl;
71
+ }
72
+ return 0;
73
case INDEX_op_rotlv_vec:
74
case INDEX_op_rotrv_vec:
75
return have_avx2 && vece >= MO_32 ? -1 : 0;
76
--
47
--
77
2.25.1
48
2.43.0
78
79
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
This structure, with bitfields, is incorrect for big-endian.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Use extract64 and deposit64 instead.
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/s390x/tcg-target.h | 6 +++---
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
7
tcg/s390x/tcg-target.c.inc | 17 +++++++++++++++++
8
1 file changed, 16 insertions(+), 30 deletions(-)
8
2 files changed, 20 insertions(+), 3 deletions(-)
9
9
10
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/s390x/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
13
+++ b/tcg/s390x/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
14
@@ -XXX,XX +XXX,XX @@
15
15
16
#define TCG_TARGET_HAS_andc_vec 1
16
#define WAY_BIG_EXP 4096
17
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
17
18
-#define TCG_TARGET_HAS_nand_vec 0
18
-typedef union {
19
-#define TCG_TARGET_HAS_nor_vec 0
19
- double f;
20
-#define TCG_TARGET_HAS_eqv_vec 0
20
- uint64_t i;
21
+#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
21
- struct {
22
+#define TCG_TARGET_HAS_nor_vec 1
22
- uint64_t mant:52;
23
+#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
23
- uint64_t exp:11;
24
#define TCG_TARGET_HAS_not_vec 1
24
- uint64_t sign:1;
25
#define TCG_TARGET_HAS_neg_vec 1
25
- };
26
#define TCG_TARGET_HAS_abs_vec 1
26
-} Double;
27
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
27
-
28
index XXXXXXX..XXXXXXX 100644
28
static uint64_t float64_getmant(float64 f64)
29
--- a/tcg/s390x/tcg-target.c.inc
29
{
30
+++ b/tcg/s390x/tcg-target.c.inc
30
- Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
31
+ uint64_t mant = extract64(f64, 0, 52);
32
VRRc_VMXL = 0xe7fd,
32
if (float64_is_normal(f64)) {
33
VRRc_VN = 0xe768,
33
- return a.mant | 1ULL << 52;
34
VRRc_VNC = 0xe769,
34
+ return mant | 1ULL << 52;
35
+ VRRc_VNN = 0xe76e,
35
}
36
VRRc_VNO = 0xe76b,
36
if (float64_is_zero(f64)) {
37
+ VRRc_VNX = 0xe76c,
37
return 0;
38
VRRc_VO = 0xe76a,
38
}
39
VRRc_VOC = 0xe76f,
39
if (float64_is_denormal(f64)) {
40
VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
40
- return a.mant;
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
+ return mant;
42
case INDEX_op_xor_vec:
42
}
43
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
43
return ~0ULL;
44
break;
44
}
45
+ case INDEX_op_nand_vec:
45
46
+ tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
46
int32_t float64_getexp(float64 f64)
47
+ break;
47
{
48
+ case INDEX_op_nor_vec:
48
- Double a = { .i = f64 };
49
+ tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
49
+ int exp = extract64(f64, 52, 11);
50
+ break;
50
if (float64_is_normal(f64)) {
51
+ case INDEX_op_eqv_vec:
51
- return a.exp;
52
+ tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
52
+ return exp;
53
+ break;
53
}
54
54
if (float64_is_denormal(f64)) {
55
case INDEX_op_shli_vec:
55
- return a.exp + 1;
56
tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
56
+ return exp + 1;
57
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
57
}
58
case INDEX_op_and_vec:
58
return -1;
59
case INDEX_op_andc_vec:
59
}
60
case INDEX_op_bitsel_vec:
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
+ case INDEX_op_eqv_vec:
61
/* Return a maximum finite value with the requested sign */
62
+ case INDEX_op_nand_vec:
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
case INDEX_op_neg_vec:
63
{
64
+ case INDEX_op_nor_vec:
64
+ uint64_t ret;
65
case INDEX_op_not_vec:
65
+
66
case INDEX_op_or_vec:
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
case INDEX_op_orc_vec:
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
68
/* result zero */
69
case INDEX_op_or_vec:
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
case INDEX_op_orc_vec:
70
}
71
case INDEX_op_xor_vec:
71
}
72
+ case INDEX_op_nand_vec:
72
/* Underflow? */
73
+ case INDEX_op_nor_vec:
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ case INDEX_op_eqv_vec:
74
+ ret = int128_getlo(a.mant);
75
case INDEX_op_cmp_vec:
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
case INDEX_op_mul_vec:
76
/* Leading one means: No, we're normal. So, we should be done... */
77
case INDEX_op_rotlv_vec:
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
78
--
100
--
79
2.25.1
101
2.43.0
80
81
diff view generated by jsdifflib
1
All 32-bit mips operations sign-extend the output, so we are easily
1
No need to open-code 64x64->128-bit multiplication.
2
able to keep TCG_TYPE_I32 values sign-extended in host registers.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/mips/tcg-target-sa32.h | 8 ++++++++
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
8
tcg/mips/tcg-target.c.inc | 10 ++--------
7
1 file changed, 3 insertions(+), 29 deletions(-)
9
2 files changed, 10 insertions(+), 8 deletions(-)
10
8
11
diff --git a/tcg/mips/tcg-target-sa32.h b/tcg/mips/tcg-target-sa32.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/mips/tcg-target-sa32.h
11
--- a/target/hexagon/fma_emu.c
14
+++ b/tcg/mips/tcg-target-sa32.h
12
+++ b/target/hexagon/fma_emu.c
15
@@ -1 +1,9 @@
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
16
+/*
14
return -1;
17
+ * Do not set TCG_TARGET_SIGNED_ADDR32 for mips32;
15
}
18
+ * TCG expects this to only be set for 64-bit hosts.
16
19
+ */
17
-static uint32_t int128_getw0(Int128 x)
20
+#ifdef __mips64
18
-{
21
+#define TCG_TARGET_SIGNED_ADDR32 1
19
- return int128_getlo(x);
22
+#else
20
-}
23
#define TCG_TARGET_SIGNED_ADDR32 0
21
-
24
+#endif
22
-static uint32_t int128_getw1(Int128 x)
25
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
23
-{
26
index XXXXXXX..XXXXXXX 100644
24
- return int128_getlo(x) >> 32;
27
--- a/tcg/mips/tcg-target.c.inc
25
-}
28
+++ b/tcg/mips/tcg-target.c.inc
26
-
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
30
TCG_TMP0, TCG_TMP3, cmp_off);
28
{
31
}
29
- Int128 a, b;
32
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
33
- /* Zero extend a 32-bit guest address for a 64-bit host. */
31
+ uint64_t l, h;
34
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
32
35
- tcg_out_ext32u(s, base, addrl);
33
- a = int128_make64(ai);
36
- addrl = base;
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
37
- }
47
- }
38
-
48
-
39
/*
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
40
* Mask the page bits, keeping the alignment bits to compare against.
50
+ mulu64(&l, &h, ai, bi);
41
* For unaligned accesses, compare against the end of the access to
51
+ return int128_make128(l, h);
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
52
}
43
data_regl, data_regh, addr_regl, addr_regh,
53
44
s->code_ptr, label_ptr);
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
45
#else
46
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
47
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
48
tcg_out_ext32u(s, base, addr_regl);
49
addr_regl = base;
50
}
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
52
data_regl, data_regh, addr_regl, addr_regh,
53
s->code_ptr, label_ptr);
54
#else
55
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
56
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS && !guest_base_signed_addr32) {
57
tcg_out_ext32u(s, base, addr_regl);
58
addr_regl = base;
59
}
60
--
55
--
61
2.25.1
56
2.43.0
62
63
diff view generated by jsdifflib
1
From: Ziqiao Kong <ziqiaokong@gmail.com>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
3
The last entry of DEF_HELPERS_FLAGS_n is DEF_HELPER_FLAGS_7 and
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
thus the MAX_OPC_PARAM_IARGS should be 7.
5
6
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
7
Signed-off-by: Ziqiao Kong <ziqiaokong@gmail.com>
8
Message-Id: <20220227113127.414533-2-ziqiaokong@gmail.com>
9
Fixes: e6cadf49c3d ("tcg: Add support for a helper with 7 arguments")
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/tcg/tcg.h | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
13
tcg/tci/tcg-target.c.inc | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
14
2 files changed, 2 insertions(+), 2 deletions(-)
15
9
16
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/include/tcg/tcg.h
12
--- a/target/hexagon/fma_emu.c
19
+++ b/include/tcg/tcg.h
13
+++ b/target/hexagon/fma_emu.c
20
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
21
#else
15
float64_is_infinity(b)) {
22
#define MAX_OPC_PARAM_PER_ARG 1
16
return float64_mul(a, b, fp_status);
23
#endif
17
}
24
-#define MAX_OPC_PARAM_IARGS 6
18
- x.mant = int128_mul_6464(accumulated, 1);
25
+#define MAX_OPC_PARAM_IARGS 7
19
+ x.mant = int128_make64(accumulated);
26
#define MAX_OPC_PARAM_OARGS 1
20
x.sticky = sticky;
27
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
28
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
29
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/tci/tcg-target.c.inc
32
+++ b/tcg/tci/tcg-target.c.inc
33
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
34
TCG_REG_R0,
35
};
36
37
-#if MAX_OPC_PARAM_IARGS != 6
38
+#if MAX_OPC_PARAM_IARGS != 7
39
# error Fix needed, number of supported input arguments changed!
40
#endif
41
42
--
23
--
43
2.25.1
24
2.43.0
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Convert all targets simultaneously, as the gen_intermediate_code
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
function disappears from the target. While there are possible
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
workarounds, they're larger than simply performing the conversion.
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
tcg/ppc/tcg-target.h | 6 +++---
8
include/exec/translator.h | 14 --------------
7
tcg/ppc/tcg-target.c.inc | 15 +++++++++++++++
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
8
2 files changed, 18 insertions(+), 3 deletions(-)
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
9
71
10
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.h
74
--- a/include/exec/translator.h
13
+++ b/tcg/ppc/tcg-target.h
75
+++ b/include/exec/translator.h
14
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
76
@@ -XXX,XX +XXX,XX @@
15
77
#include "qemu/bswap.h"
16
#define TCG_TARGET_HAS_andc_vec 1
78
#include "exec/vaddr.h"
17
#define TCG_TARGET_HAS_orc_vec have_isa_2_07
79
18
-#define TCG_TARGET_HAS_nand_vec 0
80
-/**
19
-#define TCG_TARGET_HAS_nor_vec 0
81
- * gen_intermediate_code
20
-#define TCG_TARGET_HAS_eqv_vec 0
82
- * @cpu: cpu context
21
+#define TCG_TARGET_HAS_nand_vec have_isa_2_07
83
- * @tb: translation block
22
+#define TCG_TARGET_HAS_nor_vec 1
84
- * @max_insns: max number of instructions to translate
23
+#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
85
- * @pc: guest virtual program counter address
24
#define TCG_TARGET_HAS_not_vec 1
86
- * @host_pc: host physical program counter address
25
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
87
- *
26
#define TCG_TARGET_HAS_abs_vec 0
88
- * This function must be provided by the target, which should create
27
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
89
- * the target-specific DisasContext, and then invoke translator_loop.
28
index XXXXXXX..XXXXXXX 100644
90
- */
29
--- a/tcg/ppc/tcg-target.c.inc
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
30
+++ b/tcg/ppc/tcg-target.c.inc
92
- vaddr pc, void *host_pc);
31
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
93
-
32
case INDEX_op_xor_vec:
94
/**
33
case INDEX_op_andc_vec:
95
* DisasJumpType:
34
case INDEX_op_not_vec:
96
* @DISAS_NEXT: Next instruction in program order.
35
+ case INDEX_op_nor_vec:
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
36
+ case INDEX_op_eqv_vec:
98
index XXXXXXX..XXXXXXX 100644
37
+ case INDEX_op_nand_vec:
99
--- a/include/hw/core/tcg-cpu-ops.h
38
return 1;
100
+++ b/include/hw/core/tcg-cpu-ops.h
39
case INDEX_op_orc_vec:
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
40
return have_isa_2_07;
102
* Called when the first CPU is realized.
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
103
*/
42
case INDEX_op_orc_vec:
104
void (*initialize)(void);
43
insn = VORC;
105
+ /**
44
break;
106
+ * @translate_code: Translate guest instructions to TCGOps
45
+ case INDEX_op_nand_vec:
107
+ * @cpu: cpu context
46
+ insn = VNAND;
108
+ * @tb: translation block
47
+ break;
109
+ * @max_insns: max number of instructions to translate
48
+ case INDEX_op_nor_vec:
110
+ * @pc: guest virtual program counter address
49
+ insn = VNOR;
111
+ * @host_pc: host physical program counter address
50
+ break;
112
+ *
51
+ case INDEX_op_eqv_vec:
113
+ * This function must be provided by the target, which should create
52
+ insn = VEQV;
114
+ * the target-specific DisasContext, and then invoke translator_loop.
53
+ break;
115
+ */
54
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
55
case INDEX_op_cmp_vec:
117
+ int *max_insns, vaddr pc, void *host_pc);
56
switch (args[3]) {
118
/**
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
58
case INDEX_op_xor_vec:
120
*
59
case INDEX_op_andc_vec:
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
60
case INDEX_op_orc_vec:
122
index XXXXXXX..XXXXXXX 100644
61
+ case INDEX_op_nor_vec:
123
--- a/target/alpha/cpu.h
62
+ case INDEX_op_eqv_vec:
124
+++ b/target/alpha/cpu.h
63
+ case INDEX_op_nand_vec:
125
@@ -XXX,XX +XXX,XX @@ enum {
64
case INDEX_op_cmp_vec:
126
};
65
case INDEX_op_ssadd_vec:
127
66
case INDEX_op_sssub_vec:
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
67
--
953
--
68
2.25.1
954
2.43.0
69
955
70
956
diff view generated by jsdifflib